Skip to content

Commit 92a05f1

Browse files
arathi-hlabfacebook-github-bot
authored andcommitted
Handling of skip category of tests (#2597)
Summary: many test got skipped either due to metadat.yaml or any other reason but at the end showing result passed instead of skipped so these change will help to distinguish genuine skipped vs real passed models Pull Request resolved: #2597 Reviewed By: xuzhao9 Differential Revision: D73366712 Pulled By: atalman fbshipit-source-id: b7b346cd067d8c63f83f864dd315bc2b278cc824
1 parent 12a7578 commit 92a05f1

File tree

1 file changed

+16
-6
lines changed

1 file changed

+16
-6
lines changed

test_bench.py

+16-6
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,9 @@ def test_train(self, model_path, device, benchmark):
7272
extra_args=[],
7373
metadata=get_metadata_from_yaml(model_path),
7474
):
75-
raise NotImplementedError("Test skipped by its metadata.")
75+
pytest.skip(
76+
f"Test train on {device} is skipped by its metadata skipping..."
77+
)
7678
# TODO: skipping quantized tests for now due to BC-breaking changes for prepare
7779
# api, enable after PyTorch 1.13 release
7880
if "quantized" in model_name:
@@ -90,8 +92,11 @@ def test_train(self, model_path, device, benchmark):
9092
)
9193
benchmark.extra_info["test"] = "train"
9294

93-
except NotImplementedError:
94-
print(f"Test train on {device} is not implemented, skipping...")
95+
except Exception as e:
96+
if isinstance(e, NotImplementedError):
97+
print(f"Test train on {device} is not implemented")
98+
else:
99+
pytest.fail(f"Test train failed on {device} due to this error: {e}")
95100

96101
def test_eval(self, model_path, device, benchmark, pytestconfig):
97102
try:
@@ -102,7 +107,9 @@ def test_eval(self, model_path, device, benchmark, pytestconfig):
102107
extra_args=[],
103108
metadata=get_metadata_from_yaml(model_path),
104109
):
105-
raise NotImplementedError("Test skipped by its metadata.")
110+
pytest.skip(
111+
f"Test eval on {device} is skipped by its metadata skipping..."
112+
)
106113
# TODO: skipping quantized tests for now due to BC-breaking changes for prepare
107114
# api, enable after PyTorch 1.13 release
108115
if "quantized" in model_name:
@@ -121,8 +128,11 @@ def test_eval(self, model_path, device, benchmark, pytestconfig):
121128
)
122129
benchmark.extra_info["test"] = "eval"
123130

124-
except NotImplementedError:
125-
print(f"Test eval on {device} is not implemented, skipping...")
131+
except Exception as e:
132+
if isinstance(e, NotImplementedError):
133+
print(f"Test eval on {device} is not implemented")
134+
else:
135+
pytest.fail(f"Test eval failed on {device} due to this error: {e}")
126136

127137

128138
@pytest.mark.benchmark(

0 commit comments

Comments
 (0)