@@ -575,34 +575,34 @@ class gemm_gpu_tests: public ::testing::Test {
575
575
ov::Shape in2_shape_aligned = { aligned_batch1_size, aligned_batch2_size, aligned_n_size, aligned_k_size };
576
576
577
577
// Use dynamic padding for all BFYX dimensions
578
- tensor dyn_pad_dims_input1 ({ 0 , 0 , 0 , 0 }, 0 ) ;
579
- tensor dyn_pad_dims_input2 ({ 0 , 0 , 0 , 0 }, 0 ) ;
578
+ padding::DynamicDimsMask dyn_pad_dims_input1;
579
+ padding::DynamicDimsMask dyn_pad_dims_input2;
580
580
581
581
if (n_dim_only) {
582
- dyn_pad_dims_input1 = tensor ({ 0 , 0 , 0 , 0 }, 0 ) ;
583
- dyn_pad_dims_input2 = tensor ({ 0 , 0 , 1 , 0 }, 0 );
582
+ dyn_pad_dims_input1 = 0ul ;
583
+ dyn_pad_dims_input2 = padding::DynamicDimsMask ( " 1000 " );
584
584
} else {
585
- dyn_pad_dims_input1 = tensor ({ 1 , 1 , 1 , 1 }, 0 );
586
- dyn_pad_dims_input2 = tensor ({ 1 , 1 , 1 , 1 }, 0 );
585
+ dyn_pad_dims_input1 = padding::DynamicDimsMask ( " 1111 " );
586
+ dyn_pad_dims_input2 = padding::DynamicDimsMask ( " 1111 " );
587
587
}
588
588
589
- auto in1_layout = layout{ {-1 , -1 , -1 , -1 }, data_types::f16, format::bfyx, padding ({0 , 0 , 0 , 0 }, {0 , 0 , 0 , 0 }, 0 . 0f , dyn_pad_dims_input1)};
590
- auto in2_layout = layout{ {-1 , -1 , -1 , -1 }, data_types::f16, format::bfyx, padding ({0 , 0 , 0 , 0 }, {0 , 0 , 0 , 0 }, 0 . 0f , dyn_pad_dims_input2)};
589
+ auto in1_layout = layout{ {-1 , -1 , -1 , -1 }, data_types::f16, format::bfyx, padding ({0 , 0 , 0 , 0 }, {0 , 0 , 0 , 0 }, dyn_pad_dims_input1)};
590
+ auto in2_layout = layout{ {-1 , -1 , -1 , -1 }, data_types::f16, format::bfyx, padding ({0 , 0 , 0 , 0 }, {0 , 0 , 0 , 0 }, dyn_pad_dims_input2)};
591
591
592
592
auto aligned_input1_mem = engine.allocate_memory ({ov::PartialShape (in1_shape_aligned), data_types::f16, format::bfyx});
593
593
auto aligned_input2_mem = engine.allocate_memory ({ov::PartialShape (in2_shape_aligned), data_types::f16, format::bfyx});
594
594
595
595
auto input1_mem = engine.reinterpret_buffer (*aligned_input1_mem, layout{ov::PartialShape (in1_shape),
596
596
data_types::f16,
597
597
format::bfyx,
598
- n_dim_only ? padding ({0 , 0 , 0 , 0 }, {0 , 0 , 0 , 0 }, 0 . 0f , dyn_pad_dims_input1) :
599
- padding ({padding_size_batch1, 0 , 0 , 0 }, {0 , padding_size_batch2, padding_size_m, padding_size_k}, 0 . 0f , dyn_pad_dims_input1)});
598
+ n_dim_only ? padding ({0 , 0 , 0 , 0 }, {0 , 0 , 0 , 0 }, dyn_pad_dims_input1) :
599
+ padding ({padding_size_batch1, 0 , 0 , 0 }, {0 , padding_size_batch2, padding_size_m, padding_size_k}, dyn_pad_dims_input1)});
600
600
601
601
auto input2_mem = engine.reinterpret_buffer (*aligned_input2_mem, layout{ov::PartialShape (in2_shape),
602
602
data_types::f16,
603
603
format::bfyx,
604
- n_dim_only ? padding ({0 , 0 , 0 , 0 }, {0 , 0 , padding_size_n, 0 }, 0 . 0f , dyn_pad_dims_input2) :
605
- padding ({0 , padding_size_batch2, 0 , 0 }, {padding_size_batch1, 0 , padding_size_n, padding_size_k }, 0 . 0f , dyn_pad_dims_input2)});
604
+ n_dim_only ? padding ({0 , 0 , 0 , 0 }, {0 , 0 , 0 , padding_size_n} , dyn_pad_dims_input2) :
605
+ padding ({0 , padding_size_batch2, 0 , 0 }, {padding_size_batch1, 0 , padding_size_n, padding_size_k }, dyn_pad_dims_input2)});
606
606
607
607
auto input_1_data = rg.generate_random_1d <ov::float16>(ov::shape_size (in1_shape), -2 , 2 );
608
608
auto input_2_data = rg.generate_random_1d <ov::float16>(ov::shape_size (in2_shape), -2 , 2 );
@@ -1595,13 +1595,16 @@ TEST_F(gemm_gpu_tests, dynamic_padding_n_dim_only) {
1595
1595
this ->test_dynamic_padding (false , true );
1596
1596
}
1597
1597
1598
+ #ifndef ENABLE_ONEDNN_FOR_GPU
1599
+ // Disable onednn test because onednn does not support format_tag::cbda, format_tag::badc.
1598
1600
TEST_F (gemm_gpu_tests, dynamic_padding_w_transpose_order_all_dim) {
1599
1601
this ->test_dynamic_padding_w_transpose_order (false , false );
1600
1602
}
1601
1603
1602
1604
TEST_F (gemm_gpu_tests, dynamic_padding_w_transpose_order_n_dim_only) {
1603
1605
this ->test_dynamic_padding_w_transpose_order (false , true );
1604
1606
}
1607
+ #endif
1605
1608
1606
1609
TEST_F (gemm_gpu_tests, dynamic_multi_inference_same_shape) {
1607
1610
this ->test_dynamic_multi_inference_same_shape (false );
0 commit comments