Skip to content

Commit 155d849

Browse files
[GPU] Conv should not extend strides and dilations for 3d output
1 parent 1e9768b commit 155d849

File tree

4 files changed

+140
-3
lines changed

4 files changed

+140
-3
lines changed

src/plugins/intel_gpu/src/graph/layout_optimizer.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -536,7 +536,7 @@ bool should_use_winograd_2x3_s1(const convolution_node& node,
536536

537537
auto prim = node.get_primitive();
538538
if (input_layout.data_type != data_types::f16
539-
|| input_layout.feature() % 64 != 0 // current algorithm is effective for ifm to be multiply of 64
539+
|| (input_layout.is_static() && input_layout.feature() % 64 != 0) // current algorithm is effective for ifm to be multiply of 64
540540
|| weights_layout.spatial(0) != 3 // weights have to be 3x3 by definiton
541541
|| weights_layout.spatial(1) != 3 // weights have to be 3x3 by definition
542542
|| weights_layout.batch() % 64 != 0 // current algorithm is effective for ofm to be multiply of 64
@@ -616,7 +616,7 @@ bool layout_optimizer::convolution_byxf_opt(const layout& input_layout,
616616
all_ones(conv->dilation) &&
617617
!node.get_transposed() &&
618618
node.get_groups() == 1 &&
619-
input_layout.feature() % 32 == 0 &&
619+
(input_layout.is_static() && input_layout.feature() % 32 == 0) &&
620620
weights_layout.spatial(1) == 1 && output_layout.feature() % 64 == 0 &&
621621
weights_layout.batch() % 64 == 0 &&
622622
all_ones(conv->stride) &&

src/plugins/intel_gpu/src/plugin/ops/convolution.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ static void CreateConvolutionOp(ProgramBuilder& p, const std::shared_ptr<ov::int
4444
auto pads_end = op->get_pads_end();
4545
auto auto_pad = op->get_auto_pad();
4646

47-
if (!op->is_dynamic()) {
47+
if (!op->is_dynamic() && outDims.size() >= 4) {
4848
// Extend 1d vectors to 2d as 1d can't be handled properly by the graph optimizer for now
4949
strides.resize(std::max<size_t>(2, strides.size()), 1);
5050
dilations.resize(std::max<size_t>(2, strides.size()), 1);

src/plugins/intel_gpu/tests/functional/shared_tests_instances/single_layer_tests/convolution.cpp

+19
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,25 @@ const std::vector<ov::element::Type> netPrecisions = {
1414
ov::element::f16
1515
};
1616

17+
/* ============= 1D Convolution ============= */
18+
const auto conv1DParams = ::testing::Combine(
19+
::testing::Values(std::vector<size_t>({1})),
20+
::testing::Values(std::vector<size_t>({1})),
21+
::testing::Values(std::vector<ptrdiff_t>({0})),
22+
::testing::Values(std::vector<ptrdiff_t>({0})),
23+
::testing::Values(std::vector<size_t>({1})),
24+
::testing::Values(192),
25+
::testing::Values(ov::op::PadType::EXPLICIT)
26+
);
27+
28+
INSTANTIATE_TEST_SUITE_P(smoke_Convolution1D, ConvolutionLayerTest,
29+
::testing::Combine(
30+
conv1DParams,
31+
::testing::Values(ov::element::f32),
32+
::testing::Values(ov::test::static_partial_shapes_to_test_representation(std::vector<ov::PartialShape>({{1, 256, 1}}))),
33+
::testing::Values(ov::test::utils::DEVICE_GPU)),
34+
ConvolutionLayerTest::getTestCaseName);
35+
1736
/* ============= 2D Convolution ============= */
1837
const std::vector<std::vector<size_t >> kernels = {{3, 3},
1938
{3, 5}};
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,118 @@
1+
// Copyright (C) 2024 Intel Corporation
2+
// SPDX-License-Identifier: Apache-2.0
3+
//
4+
#include "shared_test_classes/base/ov_subgraph.hpp"
5+
#include "shared_test_classes/single_op/convolution.hpp"
6+
7+
#include "common_test_utils/ov_tensor_utils.hpp"
8+
#include "common_test_utils/node_builders/convolution.hpp"
9+
#include "common_test_utils/data_utils.hpp"
10+
#include "common_test_utils/node_builders/constant.hpp"
11+
#include "common_test_utils/node_builders/fake_quantize.hpp"
12+
13+
#include "openvino/op/parameter.hpp"
14+
#include "openvino/op/constant.hpp"
15+
#include "openvino/op/result.hpp"
16+
#include "openvino/op/convolution.hpp"
17+
#include "openvino/op/fake_quantize.hpp"
18+
19+
20+
namespace {
21+
using ov::test::InputShape;
22+
23+
typedef std::tuple<
24+
std::vector<InputShape>, // input shape
25+
ov::element::Type, // Network precision
26+
std::string // Device name
27+
> convStaticConcatDynamicGPUTestDynamicParamsSet;
28+
class ConvStaticConcatDynamicGPUTestDynamic : public testing::WithParamInterface<convStaticConcatDynamicGPUTestDynamicParamsSet>,
29+
virtual public ov::test::SubgraphBaseTest {
30+
public:
31+
static std::string getTestCaseName(const testing::TestParamInfo<convStaticConcatDynamicGPUTestDynamicParamsSet>& obj) {
32+
std::vector<InputShape> inputShape;
33+
ov::element::Type model_type;
34+
std::string targetDevice;
35+
36+
convStaticConcatDynamicGPUTestDynamicParamsSet basicParamsSet = obj.param;
37+
std::tie(inputShape, model_type, targetDevice) = basicParamsSet;
38+
39+
std::ostringstream result;
40+
result << "IS_Dynamic=";
41+
result << ov::test::utils::partialShape2str({inputShape[0].first}) << "_";
42+
for (const auto& actual_shape : {inputShape[0].second}) {
43+
result << ov::test::utils::partialShape2str({actual_shape[0]}) << "_";
44+
}
45+
result << "IS_Static=";
46+
result << ov::test::utils::partialShape2str({inputShape[1].first}) << "_";
47+
for (const auto& actual_shape : {inputShape[1].second}) {
48+
result << ov::test::utils::partialShape2str({actual_shape[0]}) << "_";
49+
}
50+
result << "model_type=" << model_type << "_";
51+
result << "targetDevice=" << targetDevice;
52+
return result.str();
53+
}
54+
55+
protected:
56+
void SetUp() override {
57+
std::vector<InputShape> inputShape;
58+
ov::element::Type model_type;
59+
convStaticConcatDynamicGPUTestDynamicParamsSet basicParamsSet = this->GetParam();
60+
std::tie(inputShape, model_type, targetDevice) = basicParamsSet;
61+
62+
init_input_shapes(inputShape);
63+
64+
ov::ParameterVector inputParams;
65+
for (auto&& shape : inputDynamicShapes)
66+
inputParams.push_back(std::make_shared<ov::op::v0::Parameter>(model_type, shape));
67+
68+
// Constant weight
69+
auto sh0 = inputShape[0].first[1].get_length();
70+
auto sh1 = inputShape[1].first[1].get_length();
71+
ov::PartialShape inShape1 = {sh0, sh1, 1};
72+
auto tensor1 = ov::test::utils::create_and_fill_tensor(model_type, inShape1.to_shape());
73+
std::shared_ptr<ov::Node> constantWeightOp = std::make_shared<ov::op::v0::Constant>(tensor1);
74+
constantWeightOp->set_friendly_name("constantWeight");
75+
76+
// Static convolution
77+
auto convolutionOp = ov::test::utils::make_convolution(inputParams[1], constantWeightOp, model_type,
78+
{3}, {1}, {0}, {0}, {1}, ov::op::PadType::EXPLICIT, 1);
79+
convolutionOp->set_friendly_name("convolution");
80+
81+
// Dynamic Concat
82+
const auto concat = std::make_shared<ov::op::v0::Concat>(ov::OutputVector({inputParams[0], convolutionOp}), 2);
83+
84+
// Function
85+
auto makeFunction = [](const ov::element::Type &ngPrc, ov::ParameterVector &params, const std::shared_ptr<ov::Node> &lastNode) {
86+
ov::ResultVector results;
87+
88+
for (size_t i = 0; i < lastNode->get_output_size(); i++)
89+
results.push_back(std::make_shared<ov::op::v0::Result>(lastNode->output(i)));
90+
91+
return std::make_shared<ov::Model>(results, params, "Concat");
92+
};
93+
function = makeFunction(model_type, inputParams, concat);
94+
}
95+
};
96+
97+
TEST_P(ConvStaticConcatDynamicGPUTestDynamic, Inference) {
98+
run();
99+
}
100+
101+
const std::vector<std::vector<ov::test::InputShape>> dynInputShapes1D = {
102+
{
103+
{{1, 192, ov::Dimension::dynamic()}, {{1, 192, 1}}},
104+
{{1, 256, 1}, {{1, 256, 1}}},
105+
},
106+
{
107+
{{1, 32, ov::Dimension::dynamic()}, {{1, 32, 1}}},
108+
{{1, 48, 1}, {{1, 48, 1}}},
109+
},
110+
};
111+
112+
INSTANTIATE_TEST_SUITE_P(smoke_static_conv_n_dynamic_concat, ConvStaticConcatDynamicGPUTestDynamic,
113+
::testing::Combine(::testing::ValuesIn(dynInputShapes1D),
114+
::testing::Values(ov::element::f16),
115+
::testing::Values(ov::test::utils::DEVICE_GPU)),
116+
ConvStaticConcatDynamicGPUTestDynamic::getTestCaseName);
117+
118+
} // namespace

0 commit comments

Comments
 (0)