Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit 6b5d614

Browse files
committedSep 11, 2024·
Fixed test_testing scripts with master
1 parent 6699708 commit 6b5d614

File tree

2 files changed

+137
-106
lines changed

2 files changed

+137
-106
lines changed
 

‎src/python_testing/test_testing/MockTestRunner.py

+9-20
Original file line numberDiff line numberDiff line change
@@ -32,13 +32,14 @@ async def __call__(self, *args, **kwargs):
3232

3333
class MockTestRunner():
3434

35-
def __init__(self, filename: str, classname: str, test: str, endpoint: int = 0, pics: dict[str, bool] = None, paa_trust_store_path=None):
36-
self.test = test
37-
self.endpoint = endpoint
38-
self.pics = pics
35+
def __init__(self, filename: str, classname: str, test: str, endpoint: int = None, pics: dict[str, bool] = None, paa_trust_store_path=None):
3936
self.kvs_storage = 'kvs_admin.json'
40-
self.paa_path = paa_trust_store_path
37+
self.config = MatterTestConfig(endpoint=endpoint, paa_trust_store_path=paa_trust_store_path,
38+
pics=pics, storage_path=self.kvs_storage)
4139
self.set_test(filename, classname, test)
40+
41+
self.set_test_config(self.config)
42+
4243
self.stack = MatterStackState(self.config)
4344
self.default_controller = self.stack.certificate_authorities[0].adminList[0].NewController(
4445
nodeId=self.config.controller_node_id,
@@ -48,33 +49,21 @@ def __init__(self, filename: str, classname: str, test: str, endpoint: int = 0,
4849

4950
def set_test(self, filename: str, classname: str, test: str):
5051
self.test = test
51-
self.set_test_config()
52-
53-
module_name = Path(os.path.basename(filename)).stem
54-
55-
try:
56-
module = importlib.import_module(module_name)
57-
except ModuleNotFoundError:
58-
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
59-
module = importlib.import_module(module_name)
60-
52+
self.config.tests = [self.test]
53+
module = importlib.import_module(Path(os.path.basename(filename)).stem)
6154
self.test_class = getattr(module, classname)
6255

6356
def set_test_config(self, test_config: MatterTestConfig = MatterTestConfig()):
6457
self.config = test_config
6558
self.config.tests = [self.test]
66-
self.config.endpoint = self.endpoint
6759
self.config.storage_path = self.kvs_storage
68-
self.config.paa_trust_store_path = self.paa_path
6960
if not self.config.dut_node_ids:
7061
self.config.dut_node_ids = [1]
71-
if self.pics:
72-
self.config.pics = self.pics
7362

7463
def Shutdown(self):
7564
self.stack.Shutdown()
7665

77-
def run_test_with_mock_read(self, read_cache: Attribute.AsyncReadTransaction.ReadResponse, hooks=None):
66+
def run_test_with_mock_read(self, read_cache: Attribute.AsyncReadTransaction.ReadResponse, hooks=None):
7867
self.default_controller.Read = AsyncMock(return_value=read_cache)
7968
# This doesn't need to do anything since we are overriding the read anyway
8069
self.default_controller.FindOrEstablishPASESession = AsyncMock(return_value=None)

‎src/python_testing/test_testing/TestDecorators.py

+128-86
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,9 @@
3030

3131
import chip.clusters as Clusters
3232
from chip.clusters import Attribute
33-
from matter_testing_support.matter_testing import (MatterBaseTest, async_test_body, get_accepted_endpoints_for_test, has_attribute,
34-
has_cluster, has_feature, per_endpoint_test, per_node_test)
33+
from matter_testing_support.matter_testing import (MatterBaseTest, MatterTestConfig, async_test_body, has_attribute, has_cluster, has_feature,
34+
run_if_endpoint_matches, run_on_singleton_matching_endpoint, should_run_test_on_endpoint)
35+
from typing import Optional
3536
from mobly import asserts
3637
from MockTestRunner import MockTestRunner
3738

@@ -127,107 +128,101 @@ async def test_endpoints(self):
127128
all_endpoints = await self.default_controller.Read(self.dut_node_id, [()])
128129
all_endpoints = list(all_endpoints.attributes.keys())
129130

130-
msg = "Unexpected endpoint list returned"
131-
132-
endpoints = await get_accepted_endpoints_for_test(self, has_onoff)
133-
asserts.assert_equal(endpoints, all_endpoints, msg)
131+
msg = "Unexpected evaluation of should_run_test_on_endpoint"
132+
for e in all_endpoints:
133+
self.matter_test_config.endpoint = e
134+
should_run = await should_run_test_on_endpoint(self, has_onoff)
135+
asserts.assert_true(should_run, msg)
134136

135-
endpoints = await get_accepted_endpoints_for_test(self, has_onoff_onoff)
136-
asserts.assert_equal(endpoints, all_endpoints, msg)
137+
should_run = await should_run_test_on_endpoint(self, has_onoff_onoff)
138+
asserts.assert_true(should_run, msg)
137139

138-
endpoints = await get_accepted_endpoints_for_test(self, has_onoff_ontime)
139-
asserts.assert_equal(endpoints, [], msg)
140+
should_run = await should_run_test_on_endpoint(self, has_onoff_ontime)
141+
asserts.assert_false(should_run, msg)
140142

141-
endpoints = await get_accepted_endpoints_for_test(self, has_timesync)
142-
asserts.assert_equal(endpoints, [], msg)
143+
should_run = await should_run_test_on_endpoint(self, has_timesync)
144+
asserts.assert_false(should_run, msg)
143145

144-
endpoints = await get_accepted_endpoints_for_test(self, has_timesync_utc)
145-
asserts.assert_equal(endpoints, [], msg)
146-
147-
# This test should cause an assertion because it has pics_ method
148-
@per_node_test
149-
async def test_whole_node_with_pics(self):
150-
pass
151-
152-
# This method returns the top level pics for test_whole_node_with_pics
153-
# It is used to test that test_whole_node_with_pics will fail since you can't have a whole node test gated on a PICS.
154-
def pics_whole_node_with_pics(self):
155-
return ['EXAMPLE.S']
146+
should_run = await should_run_test_on_endpoint(self, has_timesync_utc)
147+
asserts.assert_false(should_run, msg)
156148

157149
# This test should cause an assertion because it has a pics_ method
158-
@per_endpoint_test(has_cluster(Clusters.OnOff))
159-
async def test_per_endpoint_with_pics(self):
150+
@run_if_endpoint_matches(has_cluster(Clusters.OnOff))
151+
async def test_endpoint_with_pics(self):
160152
pass
161153

162-
# This method returns the top level pics for test_per_endpoint_with_pics
163-
# It is used to test that test_per_endpoint_with_pics will fail since you can't have a per endpoint test gated on a PICS.
164-
def pics_per_endpoint_with_pics(self):
154+
# This method returns the top level pics for test_endpoint_with_pics
155+
# It is used to test that test_endpoint_with_pics will fail since you can't have a per endpoint test gated on a PICS.
156+
def pics_endpoint_with_pics(self):
165157
return ['EXAMPLE.S']
166158

167-
# This test should be run once
168-
@per_node_test
169-
async def test_whole_node_ok(self):
170-
pass
171-
172159
# This test should be run once per endpoint
173-
@per_endpoint_test(has_cluster(Clusters.OnOff))
160+
@run_if_endpoint_matches(has_cluster(Clusters.OnOff))
174161
async def test_endpoint_cluster_yes(self):
175162
pass
176163

177164
# This test should be skipped since this cluster isn't on any endpoint
178-
@per_endpoint_test(has_cluster(Clusters.TimeSynchronization))
165+
@run_if_endpoint_matches(has_cluster(Clusters.TimeSynchronization))
179166
async def test_endpoint_cluster_no(self):
180167
pass
181168

182169
# This test should be run once per endpoint
183-
@per_endpoint_test(has_attribute(Clusters.OnOff.Attributes.OnOff))
170+
@run_if_endpoint_matches(has_attribute(Clusters.OnOff.Attributes.OnOff))
184171
async def test_endpoint_attribute_yes(self):
185172
pass
186173

187174
# This test should be skipped since this attribute isn't on the supported cluster
188-
@per_endpoint_test(has_attribute(Clusters.OnOff.Attributes.OffWaitTime))
175+
@run_if_endpoint_matches(has_attribute(Clusters.OnOff.Attributes.OffWaitTime))
189176
async def test_endpoint_attribute_supported_cluster_no(self):
190177
pass
191178

192179
# This test should be skipped since this attribute is part of an unsupported cluster
193-
@per_endpoint_test(has_attribute(Clusters.TimeSynchronization.Attributes.Granularity))
180+
@run_if_endpoint_matches(has_attribute(Clusters.TimeSynchronization.Attributes.Granularity))
194181
async def test_endpoint_attribute_unsupported_cluster_no(self):
195182
pass
196183

197184
# This test should be run once per endpoint
198-
@per_endpoint_test(has_feature(Clusters.OnOff, Clusters.OnOff.Bitmaps.Feature.kLighting))
185+
@run_if_endpoint_matches(has_feature(Clusters.OnOff, Clusters.OnOff.Bitmaps.Feature.kLighting))
199186
async def test_endpoint_feature_yes(self):
200187
pass
201188

202189
# This test should be skipped since this attribute is part of an unsupported cluster
203-
@per_endpoint_test(has_feature(Clusters.TimeSynchronization, Clusters.TimeSynchronization.Bitmaps.Feature.kNTPClient))
190+
@run_if_endpoint_matches(has_feature(Clusters.TimeSynchronization, Clusters.TimeSynchronization.Bitmaps.Feature.kNTPClient))
204191
async def test_endpoint_feature_unsupported_cluster_no(self):
205192
pass
206193

207194
# This test should be run since both are present
208-
@per_endpoint_test(has_attribute(Clusters.OnOff.Attributes.OnOff) and has_cluster(Clusters.OnOff))
195+
@run_if_endpoint_matches(has_attribute(Clusters.OnOff.Attributes.OnOff) and has_cluster(Clusters.OnOff))
209196
async def test_endpoint_boolean_yes(self):
210197
pass
211198

212199
# This test should be skipped since we have an OnOff cluster, but no Time sync
213-
@per_endpoint_test(has_cluster(Clusters.OnOff) and has_cluster(Clusters.TimeSynchronization))
200+
@run_if_endpoint_matches(has_cluster(Clusters.OnOff) and has_cluster(Clusters.TimeSynchronization))
214201
async def test_endpoint_boolean_no(self):
215202
pass
216203

217-
@per_endpoint_test(has_cluster(Clusters.OnOff))
204+
@run_if_endpoint_matches(has_cluster(Clusters.OnOff))
218205
async def test_fail_on_ep0(self):
219206
if self.matter_test_config.endpoint == 0:
220207
asserts.fail("Expected failure")
221208

222-
@per_endpoint_test(has_cluster(Clusters.OnOff))
209+
@run_if_endpoint_matches(has_cluster(Clusters.OnOff))
223210
async def test_fail_on_ep1(self):
224211
if self.matter_test_config.endpoint == 1:
225212
asserts.fail("Expected failure")
226213

227-
@per_node_test
228-
async def test_fail_on_whole_node(self):
214+
@run_on_singleton_matching_endpoint(has_cluster(Clusters.OnOff))
215+
async def test_run_on_singleton_matching_endpoint(self):
216+
pass
217+
218+
@run_on_singleton_matching_endpoint(has_cluster(Clusters.OnOff))
219+
async def test_run_on_singleton_matching_endpoint_failure(self):
229220
asserts.fail("Expected failure")
230221

222+
@run_on_singleton_matching_endpoint(has_attribute(Clusters.OnOff.Attributes.OffWaitTime))
223+
async def test_no_run_on_singleton_matching_endpoint(self):
224+
pass
225+
231226

232227
def main():
233228
failures = []
@@ -249,82 +244,129 @@ def main():
249244
if not ok:
250245
failures.append("Test case failure: test_endpoints")
251246

252-
test_name = 'test_whole_node_with_pics'
253-
test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name)
254-
ok = test_runner.run_test_with_mock_read(read_resp, hooks)
255-
if ok:
256-
failures.append(f"Did not get expected test assertion on {test_name}")
257-
258-
test_name = 'test_per_endpoint_with_pics'
247+
test_name = 'test_endpoint_with_pics'
259248
test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name)
260249
ok = test_runner.run_test_with_mock_read(read_resp, hooks)
261250
if ok:
262251
failures.append(f"Did not get expected test assertion on {test_name}")
263252

264253
# Test should run once for the whole node, regardless of the number of endpoints
265-
def run_check(test_name: str, read_response: Attribute.AsyncReadTransaction.ReadResponse, expected_runs: int, expect_skip: bool) -> None:
254+
def run_check(test_name: str, read_response: Attribute.AsyncReadTransaction.ReadResponse, expect_skip: bool) -> None:
266255
nonlocal failures
267256
test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name)
268257
hooks = DecoratorTestRunnerHooks()
269-
ok = test_runner.run_test_with_mock_read(read_response, hooks)
270-
started_ok = len(hooks.started) == expected_runs
271-
skipped_ok = (hooks.skipped != []) == expect_skip
272-
stopped_ok = hooks.stopped == expected_runs
258+
num_endpoints = 2
259+
for e in [0, 1]:
260+
test_runner.set_test_config(MatterTestConfig(endpoint=e))
261+
ok = test_runner.run_test_with_mock_read(read_response, hooks)
262+
started_ok = len(hooks.started) == num_endpoints
263+
expected_num_skips = 2 if expect_skip else 0
264+
skipped_ok = len(hooks.skipped) == expected_num_skips
265+
stopped_ok = hooks.stopped == num_endpoints
273266
if not ok or not started_ok or not skipped_ok or not stopped_ok:
274267
failures.append(
275-
f'Expected {expected_runs} run of {test_name}, skips expected: {expect_skip}. Runs: {hooks.started}, skips: {hooks.skipped} stops: {hooks.stopped}')
276-
277-
def check_once_per_node(test_name: str):
278-
run_check(test_name, get_clusters([0]), 1, False)
279-
run_check(test_name, get_clusters([0, 1]), 1, False)
268+
f'Expected {num_endpoints} run of {test_name}, skips expected: {expect_skip}. Runs: {hooks.started}, skips: {hooks.skipped} stops: {hooks.stopped}')
280269

281270
def check_once_per_endpoint(test_name: str):
282-
run_check(test_name, get_clusters([0]), 1, False)
283-
run_check(test_name, get_clusters([0, 1]), 2, False)
271+
run_check(test_name, get_clusters([0, 1]), False)
284272

285-
def check_skipped(test_name: str):
286-
run_check(test_name, get_clusters([0]), 1, True)
287-
run_check(test_name, get_clusters([0, 1]), 1, True)
273+
def check_all_skipped(test_name: str):
274+
run_check(test_name, get_clusters([0, 1]), True)
288275

289-
check_once_per_node('test_whole_node_ok')
290276
check_once_per_endpoint('test_endpoint_cluster_yes')
291-
check_skipped('test_endpoint_cluster_no')
277+
check_all_skipped('test_endpoint_cluster_no')
292278
check_once_per_endpoint('test_endpoint_attribute_yes')
293-
check_skipped('test_endpoint_attribute_supported_cluster_no')
294-
check_skipped('test_endpoint_attribute_unsupported_cluster_no')
279+
check_all_skipped('test_endpoint_attribute_supported_cluster_no')
280+
check_all_skipped('test_endpoint_attribute_unsupported_cluster_no')
295281
check_once_per_endpoint('test_endpoint_feature_yes')
296-
check_skipped('test_endpoint_feature_unsupported_cluster_no')
282+
check_all_skipped('test_endpoint_feature_unsupported_cluster_no')
297283
check_once_per_endpoint('test_endpoint_boolean_yes')
298-
check_skipped('test_endpoint_boolean_no')
284+
check_all_skipped('test_endpoint_boolean_no')
299285

300286
test_name = 'test_fail_on_ep0'
301287
test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name)
302288
read_resp = get_clusters([0, 1])
289+
# fail on EP0, pass on EP1
290+
test_runner.set_test_config(MatterTestConfig(endpoint=0))
303291
ok = test_runner.run_test_with_mock_read(read_resp, hooks)
304292
if ok:
305293
failures.append(f"Did not get expected test assertion on {test_name}")
306-
307-
test_name = 'test_fail_on_ep1'
308-
test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name)
309-
read_resp = get_clusters([0, 1])
294+
test_runner.set_test_config(MatterTestConfig(endpoint=1))
310295
ok = test_runner.run_test_with_mock_read(read_resp, hooks)
311-
if ok:
312-
failures.append(f"Did not get expected test assertion on {test_name}")
296+
if not ok:
297+
failures.append(f"Unexpected failure on {test_name}")
313298

314299
test_name = 'test_fail_on_ep1'
315300
test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name)
316-
read_resp = get_clusters([0])
301+
read_resp = get_clusters([0, 1])
302+
# pass on EP0, fail on EP1
303+
test_runner.set_test_config(MatterTestConfig(endpoint=0))
317304
ok = test_runner.run_test_with_mock_read(read_resp, hooks)
318305
if not ok:
319306
failures.append(f"Unexpected failure on {test_name}")
320-
321-
test_name = 'test_fail_on_whole_node'
322-
test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name)
323-
read_resp = get_clusters([0, 1])
307+
test_runner.set_test_config(MatterTestConfig(endpoint=1))
324308
ok = test_runner.run_test_with_mock_read(read_resp, hooks)
325309
if ok:
326310
failures.append(f"Did not get expected test assertion on {test_name}")
327311

312+
def run_singleton_dynamic(test_name: str, cluster_list: list[int]) -> tuple[bool, DecoratorTestRunnerHooks]:
313+
nonlocal failures
314+
read_resp = get_clusters(cluster_list)
315+
test_runner.set_test('TestDecorators.py', 'TestDecorators', test_name)
316+
test_runner.set_test_config(MatterTestConfig(endpoint=2))
317+
hooks = DecoratorTestRunnerHooks()
318+
ok = test_runner.run_test_with_mock_read(read_resp, hooks)
319+
# for all tests, we need to ensure the endpoint was set back to the prior values
320+
if test_runner.config.endpoint != 2:
321+
failures.append(f"Dynamic tests {test_name} with clusters {cluster_list} did not set endpoint back to prior")
322+
# All tests should have a start and a stop
323+
started_ok = len(hooks.started) == 1
324+
stopped_ok = hooks.stopped == 1
325+
if not started_ok or not stopped_ok:
326+
failures.append(
327+
f'Hooks failure on {test_name}, Runs: {hooks.started}, skips: {hooks.skipped} stops: {hooks.stopped}')
328+
return ok, hooks
329+
330+
def expect_success_dynamic(test_name: str, cluster_list: list[int]):
331+
ok, hooks = run_singleton_dynamic(test_name, cluster_list)
332+
if not ok:
333+
failures.append(f"Unexpected failure on {test_name} with cluster list {cluster_list}")
334+
if hooks.skipped:
335+
failures.append(f'Unexpected skip call on {test_name} with cluster list {cluster_list}')
336+
337+
def expect_failure_dynamic(test_name: str, cluster_list: list[int]):
338+
ok, hooks = run_singleton_dynamic(test_name, cluster_list)
339+
if ok:
340+
failures.append(f"Unexpected success on {test_name} with cluster list {cluster_list}")
341+
if hooks.skipped:
342+
# We don't expect a skip call because the test actually failed.
343+
failures.append(f'Skip called for {test_name} with cluster list {cluster_list}')
344+
345+
def expect_skip_dynamic(test_name: str, cluster_list: list[int]):
346+
ok, hooks = run_singleton_dynamic(test_name, cluster_list)
347+
if not ok:
348+
failures.append(f"Unexpected failure on {test_name} with cluster list {cluster_list}")
349+
if not hooks.skipped:
350+
# We don't expect a skip call because the test actually failed.
351+
failures.append(f'Skip not called for {test_name} with cluster list {cluster_list}')
352+
353+
test_name = 'test_run_on_singleton_matching_endpoint'
354+
expect_success_dynamic(test_name, [0])
355+
expect_success_dynamic(test_name, [1])
356+
# expect failure because there is more than 1 endpoint
357+
expect_failure_dynamic(test_name, [0, 1])
358+
359+
test_name = 'test_run_on_singleton_matching_endpoint_failure'
360+
expect_failure_dynamic(test_name, [0])
361+
expect_failure_dynamic(test_name, [1])
362+
expect_failure_dynamic(test_name, [0, 1])
363+
364+
test_name = 'test_no_run_on_singleton_matching_endpoint'
365+
# no failure, no matches, expect skips on all endpoints
366+
expect_skip_dynamic(test_name, [0])
367+
expect_skip_dynamic(test_name, [1])
368+
expect_skip_dynamic(test_name, [0, 1])
369+
328370
test_runner.Shutdown()
329371
print(
330372
f"Test of Decorators: test response incorrect: {len(failures)}")

0 commit comments

Comments
 (0)
Please sign in to comment.