From 1b7f78954fa60ca2aa643466b3bca5785ed55abc Mon Sep 17 00:00:00 2001 From: C Freeman Date: Wed, 22 May 2024 12:35:56 -0400 Subject: [PATCH 1/3] TC-SC-3.6: Add precondition to remove extra fabrics (#33503) * TC-SC-3.6: Add precondition to remove extra fabrics Before commissioning the other fabrics, remove pre-existing fabrics from the device because the TH does not have the ability to check for subscriptions from them. Note that this change means that any pre-exisiting fabrics on the device WILL NOT BE THERE after this test. This is the same behaviour as in RR-1.1. Test: Tested against all-clusters app. chip-tool pairing onnetwork-long 0x12344321 20202021 3840 chip-tool pairing open-commissioning-window 0x12344321 0 900 10000 3840 python src/python_testing/TC_SC_3_6.py --commissioning-method on-network \ --discriminator 3840 --passcode 20202021 Results (only relevant logs): [MatterTest] 05-17 07:54:32.981 INFO Pre-condition: Remove all pre-existing fabrics on the device that do not belong to the TH ... [MatterTest] 05-17 07:54:32.994 INFO Removing extra fabric at 1 from device. ... INFO:root:Final result: PASS ! * add missing import --- src/python_testing/TC_SC_3_6.py | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/python_testing/TC_SC_3_6.py b/src/python_testing/TC_SC_3_6.py index a6994cbf288539..ec09d4bb8e815e 100644 --- a/src/python_testing/TC_SC_3_6.py +++ b/src/python_testing/TC_SC_3_6.py @@ -20,6 +20,7 @@ import queue import time from threading import Event +from typing import List import chip.clusters as Clusters from chip.clusters import ClusterObjects as ClustersObjects @@ -123,6 +124,24 @@ async def test_TC_SC_3_6(self): ) asserts.assert_greater_equal(capability_minima.caseSessionsPerFabric, 3) + logging.info("Pre-condition: Remove all pre-existing fabrics on the device that do not belong to the TH") + commissioned_fabric_count: int = await self.read_single_attribute( + dev_ctrl, node_id=self.dut_node_id, + endpoint=0, attribute=Clusters.OperationalCredentials.Attributes.CommissionedFabrics) + + if commissioned_fabric_count > 1: + fabrics: List[Clusters.OperationalCredentials.Structs.FabricDescriptorStruct] = await self.read_single_attribute( + dev_ctrl, node_id=self.dut_node_id, endpoint=0, + attribute=Clusters.OperationalCredentials.Attributes.Fabrics, fabricFiltered=False) + current_fabric_index = await self.read_single_attribute_check_success(cluster=Clusters.OperationalCredentials, attribute=Clusters.OperationalCredentials.Attributes.CurrentFabricIndex) + for fabric in fabrics: + if fabric.fabricIndex == current_fabric_index: + continue + # This is not the test client's fabric, so remove it. + logging.info(f"Removing extra fabric at {fabric.fabricIndex} from device.") + await dev_ctrl.SendCommand( + self.dut_node_id, 0, Clusters.OperationalCredentials.Commands.RemoveFabric(fabricIndex=fabric.fabricIndex)) + logging.info("Pre-conditions: use existing fabric to configure new fabrics so that total is %d fabrics" % num_fabrics_to_commission) From 4acd304e395681d1b0c5bd8913d3550fc0ff394c Mon Sep 17 00:00:00 2001 From: C Freeman Date: Mon, 15 Apr 2024 21:38:59 -0400 Subject: [PATCH 2/3] TC-RR-1.1: Fix for pre-existing fabric removal (#32921) * TC-RR-1.1: Fix for pre-existing fabric removal This test assumed that the client ordering matched to the ordering of the fabric table, but this is not the case if there is a fabric on the device before the test starts. In this case, the initial fabric is in table slot 1 with index 1, the test starts in table slot 2, with fabric index 2. Then the initial fabric is removed from table slot 1, and the test adds a new fabric. The new fabric is allocated fabric index 3, but appears in slot 1 in the table, and the order between the controllers and the fabric table as read off the device is now out of sync. Instead, name the controllers based on the fabric index rather than the index in the fabric table. TEST: commissioned all-clusters using chip-tool then ran RR-1.1. Test now passes, whereas before there was a failure. * Update src/python_testing/TC_RR_1_1.py Co-authored-by: Andrei Litvin * Update src/python_testing/TC_RR_1_1.py * Address review comments, fix --------- Co-authored-by: Andrei Litvin --- src/python_testing/TC_RR_1_1.py | 50 +++++++++++++++------------------ 1 file changed, 23 insertions(+), 27 deletions(-) diff --git a/src/python_testing/TC_RR_1_1.py b/src/python_testing/TC_RR_1_1.py index 98db3c3aae5fbd..72fee33bb46abb 100644 --- a/src/python_testing/TC_RR_1_1.py +++ b/src/python_testing/TC_RR_1_1.py @@ -20,6 +20,7 @@ import math import queue import random +import string import time from typing import Any, Dict, List, Set @@ -36,6 +37,10 @@ # +def generate_controller_name(fabric_index: int, controller_index: int): + return f"RD{fabric_index}{string.ascii_uppercase[controller_index]}" + + class TC_RR_1_1(MatterBaseTest): def setup_class(self): self._pseudo_random_generator = random.Random(1234) @@ -96,11 +101,6 @@ async def test_TC_RR_1_1(self): logging.info("--> User label cluster not present on any endpoitns") # Generate list of all clients names - all_names = [] - for fabric_idx in range(num_fabrics_to_commission): - for controller_idx in range(num_controllers_per_fabric): - all_names.append("RD%d%s" % (fabric_idx, chr(ord('A') + controller_idx))) - logging.info(f"Client names that will be used: {all_names}") client_list = [] # TODO: Shall we also verify SupportedFabrics attribute, and the CapabilityMinima attribute? @@ -119,7 +119,8 @@ async def test_TC_RR_1_1(self): node_ids = [200 + (i * 100) for i in range(num_controllers_per_fabric - 1)] # Prepare clients for first fabric, that includes the default controller - dev_ctrl.name = all_names.pop(0) + fabric_index = await self.read_single_attribute_check_success(cluster=Clusters.OperationalCredentials, attribute=Clusters.OperationalCredentials.Attributes.CurrentFabricIndex, dev_ctrl=dev_ctrl) + dev_ctrl.name = generate_controller_name(fabric_index, 0) client_list.append(dev_ctrl) if num_controllers_per_fabric > 1: @@ -130,8 +131,8 @@ async def test_TC_RR_1_1(self): privilege=Clusters.AccessControl.Enums.AccessControlEntryPrivilegeEnum.kAdminister, targetNodeId=self.dut_node_id, catTags=[0x0001_0001] ) - for controller in new_controllers: - controller.name = all_names.pop(0) + for idx, controller in enumerate(new_controllers): + controller.name = generate_controller_name(fabric_index, idx+1) client_list.extend(new_controllers) # Step 1c - Ensure there are no leftover fabrics from another process. @@ -163,11 +164,11 @@ async def test_TC_RR_1_1(self): fabrics: List[Clusters.OperationalCredentials.Structs.FabricDescriptorStruct] = await self.read_single_attribute( dev_ctrl, node_id=self.dut_node_id, endpoint=0, attribute=Clusters.OperationalCredentials.Attributes.Fabrics, fabricFiltered=False) + current_fabric_index = await self.read_single_attribute_check_success(cluster=Clusters.OperationalCredentials, attribute=Clusters.OperationalCredentials.Attributes.CurrentFabricIndex) for fabric in fabrics: - if fabric.fabricID == dev_ctrl.fabricId: + if fabric.fabricIndex == current_fabric_index: continue - - # This is not the initial client's fabric, so remove it. + # This is not the test client's fabric, so remove it. await dev_ctrl.SendCommand( self.dut_node_id, 0, Clusters.OperationalCredentials.Commands.RemoveFabric(fabricIndex=fabric.fabricIndex)) @@ -184,13 +185,13 @@ async def test_TC_RR_1_1(self): new_fabric_admin = new_certificate_authority.NewFabricAdmin(vendorId=0xFFF1, fabricId=admin_index) new_admin_ctrl = new_fabric_admin.NewController(nodeId=dev_ctrl.nodeId, catTags=[0x0001_0001]) - new_admin_ctrl.name = all_names.pop(0) - client_list.append(new_admin_ctrl) await CommissioningBuildingBlocks.AddNOCForNewFabricFromExisting(commissionerDevCtrl=dev_ctrl, newFabricDevCtrl=new_admin_ctrl, existingNodeId=self.dut_node_id, newNodeId=self.dut_node_id) - + fabric_index = await self.read_single_attribute_check_success(cluster=Clusters.OperationalCredentials, attribute=Clusters.OperationalCredentials.Attributes.CurrentFabricIndex, dev_ctrl=new_admin_ctrl) + new_admin_ctrl.name = generate_controller_name(fabric_index, 0) + client_list.append(new_admin_ctrl) if num_controllers_per_fabric > 1: new_controllers = await CommissioningBuildingBlocks.CreateControllersOnFabric( fabricAdmin=new_fabric_admin, @@ -200,8 +201,8 @@ async def test_TC_RR_1_1(self): targetNodeId=self.dut_node_id, catTags=[0x0001_0001] ) - for controller in new_controllers: - controller.name = all_names.pop(0) + for idx, controller in enumerate(new_controllers): + controller.name = generate_controller_name(fabric_index, idx+1) client_list.extend(new_controllers) @@ -224,10 +225,8 @@ async def test_TC_RR_1_1(self): # Step 2: Set the Label field for each fabric and BasicInformation.NodeLabel to 32 characters logging.info("Step 2: Setting the Label field for each fabric and BasicInformation.NodeLabel to 32 characters") - for table_idx in range(len(fabric_table)): - # Client is client A for each fabric to set the Label field - fabric = fabric_table[table_idx] - client_name = "RD%dA" % table_idx + for fabric in fabric_table: + client_name = generate_controller_name(fabric.fabricIndex, 0) client = client_by_name[client_name] # Send the UpdateLabel command @@ -451,10 +450,8 @@ async def test_TC_RR_1_1(self): # Create a list of per-fabric clients to use for filling group resources accross all fabrics. fabric_unique_clients: List[Any] = [] - for table_idx in range(len(fabric_table)): - # Client is client A for each fabric - fabric = fabric_table[table_idx] - client_name = "RD%dA" % table_idx + for fabric in fabric_table: + client_name = generate_controller_name(fabric.fabricIndex, 0) fabric_unique_clients.append(client_by_name[client_name]) # Step 13: Write and verify indicated_max_group_keys_per_fabric group keys to all fabrics. @@ -696,9 +693,8 @@ async def send_acl(self, enable_access_to_group_cluster: bool, fabric_table: List[ Clusters.OperationalCredentials.Structs.FabricDescriptorStruct]): - for table_idx, fabric in enumerate(fabric_table): - # Client is client A for each fabric - client_name = "RD%dA" % table_idx + for fabric in fabric_table: + client_name = generate_controller_name(fabric.fabricIndex, 0) client = client_by_name[client_name] acl = self.build_acl(enable_access_to_group_cluster) From 8133fe28e75c7a7b2c75e5c64dbe36b0cbc1d60a Mon Sep 17 00:00:00 2001 From: C Freeman Date: Wed, 22 May 2024 12:33:14 -0400 Subject: [PATCH 3/3] TC-OPCREDS-3.7: support DUTs with existing fabrics (#33511) * TC-OPCREDS-3.7: support DUTs with existing fabrics * Restyled by prettier-yaml --------- Co-authored-by: Restyled.io --- .../certification/Test_TC_OPCREDS_3_7.yaml | 33 ++++++++++++------- 1 file changed, 21 insertions(+), 12 deletions(-) diff --git a/src/app/tests/suites/certification/Test_TC_OPCREDS_3_7.yaml b/src/app/tests/suites/certification/Test_TC_OPCREDS_3_7.yaml index d74827b753538d..45827bd94ed279 100644 --- a/src/app/tests/suites/certification/Test_TC_OPCREDS_3_7.yaml +++ b/src/app/tests/suites/certification/Test_TC_OPCREDS_3_7.yaml @@ -65,15 +65,14 @@ tests: saveAs: TH1_Fabric_Index - label: - "Step 3.2: TH1 does a non-fabric-filtered read of the Fabrics - attribute from the Node Operational Credentials cluster. Save the - FabricIndex for TH1 as TH1_Fabric_Index for future use." - PICS: OPCREDS.S.A0001 + "Step 3.2: TH1 does a fabric-filtered read of the Fabrics attribute + from the Node Operational Credentials cluster. Save the FabricIndex + for TH1 as TH1_Fabric_Index for future use." identity: "alpha" command: "readAttribute" cluster: "Operational Credentials" attribute: "Fabrics" - fabricFiltered: false + fabricFiltered: true response: value: [{ "FabricIndex": TH1_Fabric_Index, "Label": "" }] constraints: @@ -251,7 +250,21 @@ tests: # verification: "" - label: - "Step 13: TH2 does a non-fabric-filtered read of the Fabrics attribute + "Step 13a: TH1 does a fabric-filtered read of the Fabrics attribute + from the Node Operational Credentials cluster" + nodeId: 0x43211234 + command: "readAttribute" + cluster: "Operational Credentials" + attribute: "Fabrics" + fabricFiltered: true + response: + value: [{ "FabricIndex": TH1_Fabric_Index, "Label": "" }] + constraints: + type: list + + # verification: "" + - label: + "Step 13b: TH2 does a fabric-filtered read of the Fabrics attribute from the Node Operational Credentials cluster" PICS: OPCREDS.S.A0001 identity: "beta" @@ -259,13 +272,9 @@ tests: command: "readAttribute" cluster: "Operational Credentials" attribute: "Fabrics" - fabricFiltered: false + fabricFiltered: true response: - value: - [ - { "FabricIndex": TH1_Fabric_Index, "Label": "" }, - { "FabricIndex": TH2_Fabric_Index, "Label": "" }, - ] + value: [{ "FabricIndex": TH2_Fabric_Index, "Label": "" }] constraints: type: list