@@ -492,6 +492,13 @@ std::vector<std::string> disabledTestPatterns() {
492
492
retVector.emplace_back (R"( .*smoke_RDFT_CPU_2D/RDFTTestCPU.CompareWithRefs/prec=f32_IS0=\[\]_TS0=\(\(16.38\)\)_constAxes=true_axes=\(\(0.1\)\)_isInverse=false.*)" );
493
493
#endif
494
494
if (!ov::with_cpu_x86_avx512_core ()) {
495
+ // Issue: MFDNN-12818
496
+ retVector.emplace_back (
497
+ R"( .*smoke_LPT/RecurrentCellTransformation.CompareWithRefImpl/f32_\[1,1,3\]_CPU_f32FQ_X_level=256_.*_FQ_W_level=255.*)" );
498
+ retVector.emplace_back (
499
+ R"( .*smoke_static/ConvertFqRnnToQuantizedRnn.CompareWithRefs/Type=GRUSequence.*2.5.10.*2.1.4.*2.1.4.*)" );
500
+ }
501
+ if (!ov::intel_cpu::hasHardwareSupport (ov::element::bf16)) {
495
502
// on platforms which do not support bfloat16, we are disabling bf16 tests since there are no bf16 primitives,
496
503
// tests are useless on such platforms
497
504
retVector.emplace_back (R"( .*(BF|bf)16.*)" );
@@ -504,7 +511,7 @@ std::vector<std::string> disabledTestPatterns() {
504
511
retVector.emplace_back (R"( .*Snippets.*MHA.*)" );
505
512
retVector.emplace_back (R"( .*Snippets.*(MatMul|Matmul).*)" );
506
513
}
507
- if (!ov::with_cpu_x86_avx512_core_fp16 ( )) {
514
+ if (!ov::intel_cpu::hasHardwareSupport (ov::element::f16 )) {
508
515
// Skip fp16 tests for paltforms that don't support fp16 precision
509
516
retVector.emplace_back (R"( .*INFERENCE_PRECISION_HINT=(F|f)16.*)" );
510
517
retVector.emplace_back (R"( .*ConcatMultiQuerySDPTest.*f16.*)" );
@@ -638,7 +645,7 @@ std::vector<std::string> disabledTestPatterns() {
638
645
retVector.emplace_back (R"( .*smoke_Deconv_(2|3)D_NSPC_INT8_AMX/DeconvolutionLayerCPUTest.*)" );
639
646
}
640
647
641
- if (ov::with_cpu_x86_avx512_core_fp16 ()) {
648
+ if (ov::with_cpu_x86_avx512_core_fp16 () || ov::with_cpu_x86_avx2_vnni_2 () ) {
642
649
// Issue: 143852
643
650
retVector.emplace_back (R"( smoke_ConvertRangeSubgraphCPUTest/ConvertRangeSubgraphCPUTest\.CompareWithRefs.*Prc=f16.*)" );
644
651
retVector.emplace_back (R"( (smoke|nightly)_FC_3D_FP16/.*_Fused=Multiply\(PerChannel\).*)" );
@@ -649,5 +656,13 @@ std::vector<std::string> disabledTestPatterns() {
649
656
retVector.emplace_back (R"( smoke_Conv_Sum_Broadcast_FP16/ConvSumInPlaceTest.*Relu\.Multiply\(PerChannel\)\.Add\(PerChannel\).*)" );
650
657
}
651
658
659
+ if (ov::with_cpu_x86_avx2_vnni_2 ()) {
660
+ // jit_gemm_BF16 kernels are not supported for conv,inner_product,matmul on avx2_vnni_2 platforms
661
+ retVector.emplace_back (R"( smoke_Conv_.*D_GEMM_BF16.*)" );
662
+ retVector.emplace_back (
663
+ R"( smoke_GroupConv_.*D_Gemm_BF16/GroupConvolutionLayerCPUTest.CompareWithRefs.*primitive=jit_gemm.*)" );
664
+ retVector.emplace_back (R"( smoke_.*MatMulLayerCPUTest.*INFERENCE_PRECISION_HINT=bf16.*_primitive=jit_gemm.*)" );
665
+ }
666
+
652
667
return retVector;
653
668
}
0 commit comments