Skip to content

Commit 812ecc3

Browse files
authored
gpu: nvidia: skip unsupported gtests (#2202)
1 parent c6bef5f commit 812ecc3

7 files changed

+18
-10
lines changed

examples/CMakeLists.txt

+4-1
Original file line numberDiff line numberDiff line change
@@ -75,7 +75,10 @@ if(DNNL_SYCL_CUDA)
7575
${CMAKE_CURRENT_SOURCE_DIR}/primitives/lstm.cpp
7676
${CMAKE_CURRENT_SOURCE_DIR}/primitives/layer_normalization.cpp
7777
${CMAKE_CURRENT_SOURCE_DIR}/primitives/reorder.cpp
78-
${CMAKE_CURRENT_SOURCE_DIR}/primitives/shuffle.cpp)
78+
${CMAKE_CURRENT_SOURCE_DIR}/primitives/shuffle.cpp
79+
${CMAKE_CURRENT_SOURCE_DIR}/primitives/group_normalization.cpp
80+
${CMAKE_CURRENT_SOURCE_DIR}/primitives/vanilla_rnn.cpp
81+
${CMAKE_CURRENT_SOURCE_DIR}/primitives/lbr_gru.cpp)
7982
endif()
8083

8184
# Remove examples for Graph API if graph component is not enabled

tests/gtests/sycl/api/CMakeLists.txt

+2-2
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,12 @@ register_exe(${TEST_EXE} "${TEST_SOURCES}" "test" "dnnl_gtest")
2424
# so they need to be compiled with the correct device triple
2525
if(DNNL_WITH_SYCL)
2626
if(DNNL_SYCL_GENERIC)
27-
CHECK_CXX_COMPILER_FLAG("-fsycl -fsycl-targets=nvptx64-nvidia-cuda" NVIDIA_TARGET_SUPPORTED)
27+
CHECK_CXX_COMPILER_FLAG("-fsycl -fsycl-targets=nvptx64-nvidia-cuda,spir64" NVIDIA_TARGET_SUPPORTED)
2828
endif()
2929

3030
# Enable linking SYCL kernels.
3131
if(DNNL_SYCL_CUDA OR (DNNL_SYCL_GENERIC AND NVIDIA_TARGET_SUPPORTED))
32-
append(CMAKE_CXX_FLAGS "-fsycl-targets=nvptx64-nvidia-cuda")
32+
append(CMAKE_CXX_FLAGS "-fsycl-targets=nvptx64-nvidia-cuda,spir64")
3333
append(CMAKE_CXX_FLAGS "-Wno-linker-warnings")
3434
endif()
3535

tests/gtests/test_deconvolution.cpp

+4-4
Original file line numberDiff line numberDiff line change
@@ -312,10 +312,10 @@ class deconvolution_test_t
312312
auto aa = allows_attr_t {false};
313313

314314
#ifndef DNNL_SYCL_GENERIC
315-
aa.po_binary = !is_nvidia_gpu(eng) && !is_amd_gpu(eng);
316-
aa.po_eltwise = !is_nvidia_gpu(eng) && !is_amd_gpu(eng);
317-
aa.po_prelu = !is_nvidia_gpu(eng) && !is_amd_gpu(eng);
318-
aa.po_sum = !is_nvidia_gpu(eng) && !is_amd_gpu(eng);
315+
aa.po_binary = !is_amd_gpu(eng);
316+
aa.po_eltwise = !is_amd_gpu(eng);
317+
aa.po_prelu = !is_amd_gpu(eng);
318+
aa.po_sum = !is_amd_gpu(eng);
319319
#else
320320
aa.po_eltwise = true;
321321
aa.po_sum = true;

tests/gtests/test_group_normalization.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,8 @@ class group_normalization_test_t
4646

4747
protected:
4848
void SetUp() override {
49+
SKIP_IF_CUDA(
50+
true, "Group Normalization operator is not supported in CUDA");
4951
SKIP_IF_HIP(
5052
true, "Group Normalization operator is not supported in HIP");
5153
SKIP_IF_GENERIC(true,

tests/gtests/test_iface_attr_quantization.cpp

+3
Original file line numberDiff line numberDiff line change
@@ -432,6 +432,9 @@ TEST_F(attr_quantization_test_t, TestLRN) {
432432
}
433433

434434
TEST_F(attr_quantization_test_t, TestMatmul) {
435+
// cuDNN doesn't support zero points
436+
SKIP_IF_CUDA(true, "Test not supported on cuda");
437+
435438
for (auto a_dt : {data_type::f32, data_type::u8}) {
436439
const data_type b_dt
437440
= a_dt == data_type::f32 ? data_type::f32 : data_type::s8;

tests/gtests/test_matmul.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -260,9 +260,9 @@ class matmul_iface_test_t
260260
auto matmul_pd = pd_t(eng, src_md, weights_md, bia_md, dst_md, attr);
261261

262262
auto aa = allows_attr_t {false};
263-
aa.po_binary = !is_nvidia_gpu(eng) && !is_amd_gpu(eng);
263+
aa.po_binary = !is_amd_gpu(eng);
264264
aa.po_eltwise = true;
265-
aa.po_prelu = !is_nvidia_gpu(eng) && !is_amd_gpu(eng);
265+
aa.po_prelu = !is_amd_gpu(eng);
266266
aa.po_sum = true;
267267
// scales are not supported by HIP
268268
aa.scales = !is_amd_gpu(eng);

tests/gtests/test_softmax.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -168,7 +168,7 @@ class softmax_test_t : public ::testing::TestWithParam<softmax_test_params_t> {
168168
: p.aprop_kind;
169169

170170
allows_attr_t aa {false};
171-
if (!(is_nvidia_gpu(eng) || is_amd_gpu(eng))) {
171+
if (!is_amd_gpu(eng)) {
172172
aa.po_eltwise = true;
173173
aa.po_binary = true;
174174
}

0 commit comments

Comments
 (0)