diff --git a/src/plugins/intel_gpu/src/graph/impls/onednn/deconvolution_onednn.cpp b/src/plugins/intel_gpu/src/graph/impls/onednn/deconvolution_onednn.cpp index 1d5707194c560d..ee2e80e57aca50 100644 --- a/src/plugins/intel_gpu/src/graph/impls/onednn/deconvolution_onednn.cpp +++ b/src/plugins/intel_gpu/src/graph/impls/onednn/deconvolution_onednn.cpp @@ -35,6 +35,14 @@ static std::shared_ptr get_deconvol auto output_md = onednn::layout_to_memory_desc(output_layout, tag_in_out); auto grouped_weights = format::is_grouped(weights_layout.format) || prim->grouped_weights_shape; + // Extend deconv parameters in case if spatials rank of output memory doesn't match size of parameters + int64_t insert_count = static_cast(output_md.get_dims().size()) - 2 - stride.size(); + if (insert_count > 0) { + stride.insert(stride.end(), insert_count, 1); + pad_l.insert(pad_l.end(), insert_count, 0); + pad_r.insert(pad_r.end(), insert_count, 0); + } + for (size_t i = 0; i < dilation.size(); i++) { dilation[i]--; int weights_offset = (grouped_weights ? 3 : 2) + static_cast(i); @@ -45,14 +53,6 @@ static std::shared_ptr get_deconvol pad_r[i] = (is - 1) * stride[i] - os + kernel_range - pad_l[i]; } - // Extend deconv parameters in case if spatials rank of output memory doesn't match size of parameters - int64_t insert_count = static_cast(output_md.get_dims().size()) - 2 - stride.size(); - if (insert_count > 0) { - stride.insert(stride.end(), insert_count, 1); - pad_l.insert(pad_l.end(), insert_count, 0); - pad_r.insert(pad_r.end(), insert_count, 0); - } - if (!prim->bias.empty()) { auto bias_md = onednn::layout_to_memory_desc(impl_params.get_input_layout(2), dnnl::memory::format_tag::any, true); return std::make_shared(