Skip to content

Commit 2cc7a82

Browse files
committed
Merge branch 'attribute_pattern_matching' of https://github.com/jane-intel/openvino into attribute_pattern_matching
Signed-off-by: Evgeniia Nugmanova <evgeniia.nugmanova@intel.com>
2 parents d36881e + 9c633bc commit 2cc7a82

File tree

18 files changed

+127
-95
lines changed

18 files changed

+127
-95
lines changed

.github/workflows/workflow_rerunner.yml

+1
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ on:
1212
- Linux ARM64 (Ubuntu 20.04, Python 3.11)
1313
- Linux Static CC (Ubuntu 22.04, Python 3.11, Clang)
1414
- Linux RISC-V with Conan (Ubuntu 22.04, Python 3.10)
15+
- Linux (Ubuntu 22.04, Python 3.11, Intel DPC++ Compiler)
1516
- Fedora 29 (RHEL 8.4), Python 3.9
1617
- Windows (VS 2022, Python 3.11, Release)
1718
- Windows (VS 2022, Python 3.11, Debug)

src/core/include/openvino/core/type.hpp

+6
Original file line numberDiff line numberDiff line change
@@ -101,6 +101,12 @@ is_type(Value value) {
101101
return value && value->get_type_info().is_castable(Type::get_type_info_static());
102102
}
103103

104+
/// \brief Tests if value is a pointer/shared_ptr that can be statically cast to any of the specified types
105+
template <typename Type, typename... Types, typename Value>
106+
bool is_type_any_of(Value value) {
107+
return is_type<Type>(value) || (is_type_any_of<Types>(value) || ...);
108+
}
109+
104110
/// Casts a Value* to a Type* if it is of type Type, nullptr otherwise
105111
template <typename Type, typename Value>
106112
typename std::enable_if<std::is_convertible<decltype(static_cast<Type*>(std::declval<Value>())), Type*>::value,

src/plugins/intel_cpu/src/emitters/snippets/aarch64/jit_kernel_emitter.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ void jit_kernel_emitter::emit_impl(const std::vector<size_t>& in, const std::vec
172172
auto expected_out_type = snippets::RegType::undefined;
173173
const auto& node = expression->get_node();
174174
// Note: currently only a few operations are allowed to have mixed in/out register types => skip validation here
175-
if (!ov::is_type<snippets::op::LoopEnd>(node) && !ov::is_type<snippets::op::RegSpillBase>(node) &&
175+
if (!ov::is_type_any_of<snippets::op::LoopEnd, snippets::op::RegSpillBase>(node) &&
176176
!std::dynamic_pointer_cast<jit_nop_emitter>(emitter)) {
177177
std::tie(expected_in_type, expected_out_type) = get_expected_reg_types(emitter);
178178
}

src/plugins/intel_cpu/src/emitters/snippets/x64/jit_kernel_emitter.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ void jit_kernel_emitter::emit_impl(const std::vector<size_t>& in, const std::vec
125125
const auto& node = expression->get_node();
126126
// Note: A few operations are allowed to have mixed register types on their inputs (or outputs) => skip
127127
// validation here
128-
if (!ov::is_type<snippets::op::LoopEnd>(node) && !ov::is_type<snippets::op::RegSpillBase>(node) &&
128+
if (!ov::is_type_any_of<snippets::op::LoopEnd, snippets::op::RegSpillBase>(node) &&
129129
!std::dynamic_pointer_cast<jit_nop_emitter>(emitter)) {
130130
std::tie(expected_in_type, expected_out_type) = get_expected_reg_types(emitter);
131131
}

src/plugins/intel_cpu/src/nodes/conv.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@ class Convolution::FusedSubgraph {
226226

227227
bool Convolution::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
228228
try {
229-
if (!ov::is_type<ov::op::v1::Convolution>(op) && !ov::is_type<ov::op::v1::GroupConvolution>(op)) {
229+
if (!ov::is_type_any_of<ov::op::v1::Convolution, ov::op::v1::GroupConvolution>(op)) {
230230
errorMessage = "Only opset1 Convolution and GroupConvolution operations are supported";
231231
return false;
232232
}

src/plugins/intel_cpu/src/nodes/dft.cpp

+1-4
Original file line numberDiff line numberDiff line change
@@ -27,10 +27,7 @@ bool DFT::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::s
2727
errorMessage = "Doesn't support op with dynamic shapes";
2828
return false;
2929
}
30-
const auto interpDFT = ov::is_type<const op::v7::DFT>(op);
31-
const auto interpIDFT = ov::is_type<const op::v7::IDFT>(op);
32-
33-
if (!interpDFT && !interpIDFT) {
30+
if (!ov::is_type_any_of<const op::v7::DFT, const op::v7::IDFT>(op)) {
3431
errorMessage = "Only opset7 DFT/IDFT operation is supported";
3532
return false;
3633
}

src/plugins/intel_cpu/src/nodes/fullyconnected.cpp

+1-3
Original file line numberDiff line numberDiff line change
@@ -78,9 +78,7 @@ ov::element::TypeVector FullyConnected::getSupportedCompressedActivationsTypes()
7878
bool FullyConnected::isSupportedOperation(const std::shared_ptr<const ov::Node>& op,
7979
std::string& errorMessage) noexcept {
8080
try {
81-
if (!ov::is_type<const ov::op::internal::FullyConnected>(op) &&
82-
!ov::is_type<const ov::op::internal::FullyConnectedQuantizedLegacy>(op) &&
83-
!ov::is_type<const ov::op::internal::FullyConnectedCompressed>(op)) {
81+
if (!ov::is_type<const ov::op::internal::FullyConnected>(op)) {
8482
return false;
8583
}
8684

src/plugins/intel_cpu/src/nodes/pooling.cpp

+7-5
Original file line numberDiff line numberDiff line change
@@ -145,19 +145,21 @@ dnnl::pooling_forward::primitive_desc createDescriptorHelper(const dnnl::engine&
145145

146146
bool Pooling::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
147147
try {
148-
if (ov::is_type<const ov::op::v8::MaxPool>(op) || ov::is_type<const ov::op::v14::MaxPool>(op)) {
148+
if (ov::is_type_any_of<const ov::op::v8::MaxPool, const ov::op::v14::MaxPool>(op)) {
149149
if (!op->get_output_target_inputs(1).empty()) {
150150
errorMessage = "MaxPool from opset8 and opset14 is supported only with one output";
151151
return false;
152152
}
153-
} else if (!ov::is_type<const ov::op::v1::MaxPool>(op) && !ov::is_type<const ov::op::v8::MaxPool>(op) &&
154-
!ov::is_type<const ov::op::v14::MaxPool>(op) && !ov::is_type<const ov::op::v1::AvgPool>(op) &&
155-
!ov::is_type<const ov::op::v14::AvgPool>(op)) {
153+
} else if (!ov::is_type_any_of<const ov::op::v1::MaxPool,
154+
const ov::op::v8::MaxPool,
155+
const ov::op::v14::MaxPool,
156+
const ov::op::v1::AvgPool,
157+
const ov::op::v14::AvgPool>(op)) {
156158
errorMessage = "Supported ops are MaxPool-1, MaxPool-8, MaxPool-14, AvgPool-1 and AvgPool-14";
157159
return false;
158160
}
159161
#if defined(OV_CPU_WITH_ACL)
160-
if (ov::as_type_ptr<const ov::op::v8::MaxPool>(op) || ov::as_type_ptr<const ov::op::v14::MaxPool>(op)) {
162+
if (ov::is_type_any_of<const ov::op::v8::MaxPool, const ov::op::v14::MaxPool>(op)) {
161163
if (ov::as_type_ptr<const ov::op::util::MaxPoolBase>(op)->get_kernel() != ov::Shape(2, 2)) {
162164
errorMessage =
163165
"Pooling indices returning source tensor coordinates is only supported for pool size 2x2";

src/plugins/intel_cpu/src/nodes/strided_slice.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -20,8 +20,7 @@ namespace ov::intel_cpu::node {
2020

2121
bool StridedSlice::isSupportedOperation(const std::shared_ptr<const ov::Node>& op, std::string& errorMessage) noexcept {
2222
try {
23-
if (!ov::is_type<ov::op::v1::StridedSlice>(op) && !ov::is_type<ov::op::v8::Slice>(op) &&
24-
!ov::is_type<ov::op::v15::SliceScatter>(op)) {
23+
if (!ov::is_type_any_of<ov::op::v1::StridedSlice, ov::op::v8::Slice, ov::op::v15::SliceScatter>(op)) {
2524
errorMessage = "Only StridedSlice from opset1, Slice from opset8 and SliceScatter from opset15 operations "
2625
"are supported.";
2726
return false;

src/plugins/intel_cpu/src/shape_inference/shape_inference.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -623,9 +623,9 @@ std::shared_ptr<IStaticShapeInfer> make_shape_inference(std::shared_ptr<ov::Node
623623
return shape_infer;
624624
} else if (ov::is_type<op::util::UnaryElementwiseArithmetic>(op)) {
625625
return std::make_shared<ShapeInferCopy>(std::move(op));
626-
} else if (ov::is_type<op::util::BinaryElementwiseArithmetic>(op) ||
627-
ov::is_type<op::util::BinaryElementwiseComparison>(op) ||
628-
ov::is_type<op::util::BinaryElementwiseLogical>(op)) {
626+
} else if (ov::is_type_any_of<op::util::BinaryElementwiseArithmetic,
627+
op::util::BinaryElementwiseComparison,
628+
op::util::BinaryElementwiseLogical>(op)) {
629629
return std::make_shared<ShapeInferEltwise>(std::move(op));
630630
} else {
631631
return std::make_shared<ShapeInferFallback>(std::move(op));

src/plugins/intel_cpu/src/transformations/cpu_opset/common/pass/stateful_sdpa_fusion.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -177,8 +177,7 @@ StatefulSDPAFusion::StatefulSDPAFusion() {
177177
// the second one leads to Assign, and this is checked later
178178
// the third child is allowed to be a ShapeOf op only, thus one of them must be ShapeOf
179179
if (!std::any_of(children.begin(), children.end(), [](const ov::Input<ov::Node>& child) {
180-
return ov::is_type<ov::op::v3::ShapeOf>(child.get_node()) ||
181-
ov::is_type<ov::op::v0::ShapeOf>(child.get_node());
180+
return ov::is_type_any_of<ov::op::v3::ShapeOf, ov::op::v0::ShapeOf>(child.get_node());
182181
})) {
183182
return false;
184183
}

src/plugins/intel_cpu/src/transformations/snippets/aarch64/pass/snippets_mark_skipped.cpp

+11-12
Original file line numberDiff line numberDiff line change
@@ -72,14 +72,16 @@ bool isFullyConnected(const std::shared_ptr<const ov::Node>& node) {
7272
bool SupportsFusingWithConvolution_Simple(const std::shared_ptr<const Node>& node) {
7373
// Note: some other operations support this fusing (SoftPlus, Sqrt).
7474
// Skip them here, when they are supported by Snippets ARM. Ticket: 141170.
75-
return ov::is_type<ov::op::v0::Abs>(node) || ov::is_type<ov::op::v0::Clamp>(node) ||
76-
ov::is_type<ov::op::v0::Elu>(node) || ov::is_type<ov::op::v0::Relu>(node) ||
77-
ov::is_type<ov::op::v0::Sigmoid>(node) || ov::is_type<ov::op::v0::Tanh>(node);
75+
return ov::is_type_any_of<ov::op::v0::Abs,
76+
ov::op::v0::Clamp,
77+
ov::op::v0::Elu,
78+
ov::op::v0::Relu,
79+
ov::op::v0::Sigmoid,
80+
ov::op::v0::Tanh>(node);
7881
}
7982
// Convolution is a special case, since it supports peculiar fusings
8083
bool isSuitableConvolutionParent(const std::shared_ptr<const Node>& node) {
81-
const bool is_suitable_node =
82-
ov::is_type<ov::op::v1::Convolution>(node) || ov::is_type<ov::op::v1::GroupConvolution>(node);
84+
const bool is_suitable_node = ov::is_type_any_of<ov::op::v1::Convolution, ov::op::v1::GroupConvolution>(node);
8385
// has a single output, connected to a single child
8486
const auto out = node->outputs();
8587
const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1);
@@ -93,9 +95,8 @@ bool isSuitableBinaryConvolutionParent(const std::shared_ptr<const Node>& node)
9395
return is_suitable_node && has_only_child;
9496
}
9597
bool isSuitableMiscParent(const std::shared_ptr<const Node>& node) {
96-
const bool is_suitable_node = ov::is_type<ov::op::v0::NormalizeL2>(node) ||
97-
ov::is_type<ov::op::v1::ConvolutionBackpropData>(node) ||
98-
ov::is_type<ov::op::v1::GroupConvolutionBackpropData>(node);
98+
const bool is_suitable_node =
99+
ov::is_type_any_of<ov::op::v0::NormalizeL2, ov::op::util::ConvolutionBackPropBase>(node);
99100
// has a single output, connected to a single child
100101
const auto out = node->outputs();
101102
const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1);
@@ -126,8 +127,7 @@ bool isSuitableChildForFusingBias(const std::shared_ptr<const Node>& node, int f
126127
}
127128

128129
auto is_suitable_parent = [](const std::shared_ptr<const Node>& node) {
129-
return (ov::is_type<ov::op::v1::Convolution>(node) || ov::is_type<ov::op::v1::GroupConvolution>(node) ||
130-
ov::is_type<ov::op::v0::MatMul>(node));
130+
return (ov::is_type_any_of<ov::op::v1::Convolution, ov::op::v1::GroupConvolution, ov::op::v0::MatMul>(node));
131131
};
132132

133133
for (const auto& in : node->inputs()) {
@@ -221,8 +221,7 @@ bool isSuitableConvert(const std::shared_ptr<const Node>& node) {
221221
}
222222

223223
auto is_skipped_op(const std::shared_ptr<ov::Node>& op) -> bool {
224-
return ov::is_type<ov::op::v0::Constant>(op) || ov::is_type<ov::op::v0::Parameter>(op) ||
225-
ov::is_type<ov::op::v0::Result>(op);
224+
return ov::is_type_any_of<ov::op::v0::Constant, ov::op::v0::Parameter, ov::op::v0::Result>(op);
226225
}
227226

228227
bool isSuitableMatMulWithConstantPath(const std::shared_ptr<Node>& node) {

src/plugins/intel_cpu/src/transformations/snippets/x64/pass/snippets_mark_skipped.cpp

+38-26
Original file line numberDiff line numberDiff line change
@@ -72,11 +72,16 @@ bool isFullyConnected(const std::shared_ptr<const ov::Node>& node) {
7272
bool SupportsFusingWithConvolution_SumActivation(const std::shared_ptr<const Node>& node) {
7373
// todo: Do all PReLUs are fused? Not sure about round and softRelu
7474
// EltwiseRoundHalfToEven, EltwiseRoundHalfAwayFromZero, EltwiseSoftRelu
75-
return ov::is_type<ov::op::v0::Relu>(node) || ov::is_type<ov::op::v0::PRelu>(node) ||
76-
ov::is_type<ov::op::v0::Elu>(node) || ov::is_type<ov::op::v0::Sigmoid>(node) ||
77-
ov::is_type<ov::op::v5::HSigmoid>(node) || ov::is_type<ov::op::v0::Clamp>(node) ||
78-
ov::is_type<ov::op::v4::Swish>(node) || ov::is_type<ov::op::v4::HSwish>(node) ||
79-
ov::is_type<ov::op::v4::Mish>(node) || ov::is_type<ov::op::v5::Round>(node);
75+
return ov::is_type_any_of<ov::op::v0::Relu,
76+
ov::op::v0::PRelu,
77+
ov::op::v0::Elu,
78+
ov::op::v0::Sigmoid,
79+
ov::op::v5::HSigmoid,
80+
ov::op::v0::Clamp,
81+
ov::op::v4::Swish,
82+
ov::op::v4::HSwish,
83+
ov::op::v4::Mish,
84+
ov::op::v5::Round>(node);
8085
}
8186

8287
bool canBePerformedAsScaleShift(const std::shared_ptr<const Node>& node, const int channelAxis) {
@@ -120,8 +125,7 @@ bool canBePerformedAsScaleShift(const std::shared_ptr<const Node>& node, const i
120125

121126
// Prelu and MulAdd are still ignored
122127
// isConvertablePowerStatic() is ignored
123-
return (ov::is_type<ov::opset1::Add>(node) || ov::is_type<ov::opset1::Multiply>(node) ||
124-
ov::is_type<ov::opset1::Subtract>(node) || ov::is_type<ov::opset1::Divide>(node)) &&
128+
return ov::is_type_any_of<ov::opset1::Add, ov::opset1::Multiply, ov::opset1::Subtract, ov::opset1::Divide>(node) &&
125129
isBroadcastableToDataInput();
126130
}
127131

@@ -131,15 +135,18 @@ inline bool canBeMatMulExecutedInInt8(const ov::element::Type& firstType, const
131135

132136
bool SupportsFusingWithConvolution_Simple(const std::shared_ptr<const Node>& node,
133137
const int channelAxis = DEFAULT_AXIS) {
134-
return SupportsFusingWithConvolution_SumActivation(node) || ov::is_type<ov::op::v0::Tanh>(node) ||
135-
ov::is_type<ov::op::v0::Gelu>(node) || ov::is_type<ov::op::v7::Gelu>(node) ||
136-
ov::is_type<ov::op::v0::Abs>(node) || ov::is_type<ov::op::v0::Sqrt>(node) ||
137-
ov::is_type<ov::op::v0::FakeQuantize>(node) || canBePerformedAsScaleShift(node, channelAxis);
138+
return SupportsFusingWithConvolution_SumActivation(node) ||
139+
ov::is_type_any_of<ov::op::v0::Tanh,
140+
ov::op::v0::Gelu,
141+
ov::op::v7::Gelu,
142+
ov::op::v0::Abs,
143+
ov::op::v0::Sqrt,
144+
ov::op::v0::FakeQuantize>(node) ||
145+
canBePerformedAsScaleShift(node, channelAxis);
138146
}
139147
// Convolution is a special case, since it supports peculiar fusings
140148
bool isSuitableConvolutionParent(const std::shared_ptr<const Node>& node) {
141-
const bool is_suitable_node =
142-
ov::is_type<ov::op::v1::Convolution>(node) || ov::is_type<ov::op::v1::GroupConvolution>(node);
149+
const bool is_suitable_node = ov::is_type_any_of<ov::op::v1::Convolution, ov::op::v1::GroupConvolution>(node);
143150
// has a single output, connected to a single child
144151
const auto out = node->outputs();
145152
const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1);
@@ -168,14 +175,18 @@ int getChannelAxis(const ov::AxisSet& axes, bool keep_dims) {
168175
return channelAxis;
169176
}
170177
bool isSuitableMiscParent(const std::shared_ptr<const Node>& node) {
171-
const bool is_suitable_node =
172-
ov::is_type<ov::op::v0::MVN>(node) || ov::is_type<ov::op::v6::MVN>(node) ||
173-
ov::is_type<ov::op::v0::NormalizeL2>(node) || ov::is_type<ov::op::v0::Interpolate>(node) ||
174-
ov::is_type<ov::op::v4::Interpolate>(node) || ov::is_type<ov::op::v0::LSTMCell>(node) ||
175-
ov::is_type<ov::op::v4::LSTMCell>(node) || ov::is_type<ov::opset1::ConvolutionBackpropData>(node) ||
176-
ov::is_type<ov::op::util::ArithmeticReductionKeepDims>(node) ||
177-
ov::is_type<ov::opset1::GroupConvolutionBackpropData>(node) || ov::is_type<ov::opset1::AvgPool>(node) ||
178-
ov::is_type<ov::op::v14::AvgPool>(node);
178+
const bool is_suitable_node = ov::is_type_any_of<ov::op::v0::MVN,
179+
ov::op::v6::MVN,
180+
ov::op::v0::NormalizeL2,
181+
ov::op::v0::Interpolate,
182+
ov::op::v4::Interpolate,
183+
ov::op::v0::LSTMCell,
184+
ov::op::v4::LSTMCell,
185+
ov::opset1::ConvolutionBackpropData,
186+
ov::op::util::ArithmeticReductionKeepDims,
187+
ov::opset1::GroupConvolutionBackpropData,
188+
ov::opset1::AvgPool,
189+
ov::op::v14::AvgPool>(node);
179190
// has a single output, connected to a single child
180191
const auto out = node->outputs();
181192
const bool has_only_child = (out.size() == 1) && (out[0].get_target_inputs().size() == 1);
@@ -307,9 +318,11 @@ bool isSuitableChildForFusingMatMul(const std::shared_ptr<const Node>& node,
307318

308319
// MatMul specific checks from ::canFuse()
309320
if (one_of(updatedChainType, NodeFusingType::FusedWithMatMul, NodeFusingType::FusedWithMatMulI8)) {
310-
const auto is_binary_eltwise = ov::is_type<ov::op::v1::Add>(node) || ov::is_type<ov::op::v1::Multiply>(node) ||
311-
ov::is_type<ov::op::v1::Subtract>(node) ||
312-
ov::is_type<ov::op::v1::Divide>(node) || ov::is_type<ov::op::v0::PRelu>(node);
321+
const auto is_binary_eltwise = ov::is_type_any_of<ov::op::v1::Add,
322+
ov::op::v1::Multiply,
323+
ov::op::v1::Subtract,
324+
ov::op::v1::Divide,
325+
ov::op::v0::PRelu>(node);
313326
const auto rank = node->get_output_partial_shape(0).rank();
314327
if (dnnl::impl::cpu::x64::mayiuse(dnnl::impl::cpu::x64::avx512_core) && rank.is_static() && is_binary_eltwise) {
315328
const auto const1 = ov::is_type<ov::op::v0::Constant>(node->get_input_node_shared_ptr(0));
@@ -490,8 +503,7 @@ bool isSuitableConvert(const std::shared_ptr<const Node>& node) {
490503
}
491504

492505
auto is_skipped_op(const std::shared_ptr<ov::Node>& op) -> bool {
493-
return ov::is_type<ov::op::v0::Constant>(op) || ov::is_type<ov::op::v0::Parameter>(op) ||
494-
ov::is_type<ov::op::v0::Result>(op);
506+
return ov::is_type_any_of<ov::op::v0::Constant, ov::op::v0::Parameter, ov::op::v0::Result>(op);
495507
}
496508
} // namespace
497509

src/plugins/intel_cpu/src/transformations/tpp/x64/op/eltwise.cpp

+1-2
Original file line numberDiff line numberDiff line change
@@ -26,8 +26,7 @@ namespace ov::intel_cpu::tpp::op {
2626
#define UNARY_AUX_METHODS(UNARY_OP) GENERAL_AUX_METHODS(UNARY_OP, UnaryEltwiseTPP, new_args.at(0))
2727

2828
bool EltwiseTPP::is_supported(const std::shared_ptr<ov::Node>& node) {
29-
return ov::is_type<ov::op::v1::Add>(node) || ov::is_type<ov::op::v1::Subtract>(node) ||
30-
ov::is_type<ov::op::v1::Multiply>(node) || ov::is_type<ov::op::v1::Divide>(node);
29+
return ov::is_type_any_of<ov::op::v1::Add, ov::op::v1::Subtract, ov::op::v1::Multiply, ov::op::v1::Divide>(node);
3130
}
3231

3332
bool EltwiseTPP::visit_attributes(AttributeVisitor& visitor) {

src/plugins/intel_cpu/src/transformations/tpp/x64/pass/fuse_tpp_to_equations.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -27,10 +27,8 @@ bool FuseTPPToEquations::fuse_from_root(const NodePtr& root, const std::shared_p
2727
auto get_tpp_op = [](const NodePtr& n) {
2828
auto tpp = std::dynamic_pointer_cast<op::EltwiseTPP>(n);
2929
bool not_supported_op =
30-
// ticket: 152532
31-
ov::is_type<ov::snippets::op::ReduceBase>(n) ||
32-
// ticket: 152510
33-
ov::is_type<ov::op::v0::Relu>(n);
30+
// tickets: 152532, 152510
31+
ov::is_type_any_of<ov::snippets::op::ReduceBase, ov::op::v0::Relu>(n);
3432
return not_supported_op ? nullptr : tpp;
3533
};
3634

0 commit comments

Comments
 (0)