Skip to content

Commit cc5ea19

Browse files
authored
[Darwin] Fix flaky subscription pool test (#35606)
1 parent c06a0be commit cc5ea19

File tree

2 files changed

+21
-17
lines changed

2 files changed

+21
-17
lines changed

src/darwin/Framework/CHIP/MTRDevice_Concrete.mm

+10-3
Original file line numberDiff line numberDiff line change
@@ -1167,8 +1167,7 @@ - (void)_scheduleSubscriptionPoolWork:(dispatch_block_t)workBlock inNanoseconds:
11671167
return;
11681168
}
11691169

1170-
// Wait the required amount of time, then put it in the subscription pool to wait additionally for a spot, if needed
1171-
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, inNanoseconds), self.queue, ^{
1170+
dispatch_block_t workBlockToQueue = ^{
11721171
// In the case where a resubscription triggering event happened and already established, running the work block should result in a no-op
11731172
MTRAsyncWorkItem * workItem = [[MTRAsyncWorkItem alloc] initWithQueue:self.queue];
11741173
[workItem setReadyHandler:^(id _Nonnull context, NSInteger retryCount, MTRAsyncWorkCompletionBlock _Nonnull completion) {
@@ -1199,7 +1198,15 @@ - (void)_scheduleSubscriptionPoolWork:(dispatch_block_t)workBlock inNanoseconds:
11991198
}];
12001199
[self->_deviceController.concurrentSubscriptionPool enqueueWorkItem:workItem description:description];
12011200
MTR_LOG("%@ - enqueued in the subscription pool", self);
1202-
});
1201+
};
1202+
1203+
if (inNanoseconds > 0) {
1204+
// Wait the required amount of time, then put it in the subscription pool to wait additionally for a spot
1205+
dispatch_after(dispatch_time(DISPATCH_TIME_NOW, inNanoseconds), self.queue, workBlockToQueue);
1206+
} else {
1207+
// Put in subscription pool directly if there is no wait time
1208+
workBlockToQueue();
1209+
}
12031210
}
12041211

12051212
- (void)_handleResubscriptionNeededWithDelay:(NSNumber *)resubscriptionDelayMs

src/darwin/Framework/CHIPTests/MTRPerControllerStorageTests.m

+11-14
Original file line numberDiff line numberDiff line change
@@ -2597,20 +2597,17 @@ - (void)doTestSubscriptionPoolWithSize:(NSInteger)subscriptionPoolSize deviceOnb
25972597

25982598
// Create the base device to attempt to read from the 5th device
25992599
__auto_type * baseDeviceReadExpectation = [self expectationWithDescription:@"BaseDevice read"];
2600-
// Dispatch async to get around XCTest, so that this runs after the above devices queue their subscriptions
2601-
dispatch_async(queue, ^{
2602-
__auto_type * baseDevice = [MTRBaseDevice deviceWithNodeID:@(105) controller:controller];
2603-
__auto_type * onOffCluster = [[MTRBaseClusterOnOff alloc] initWithDevice:baseDevice endpointID:@(1) queue:queue];
2604-
[onOffCluster readAttributeOnOffWithCompletion:^(NSNumber * value, NSError * _Nullable error) {
2605-
XCTAssertNil(error);
2606-
// We expect the device to be off.
2607-
XCTAssertEqualObjects(value, @(0));
2608-
[baseDeviceReadExpectation fulfill];
2609-
os_unfair_lock_lock(&counterLock);
2610-
baseDeviceReadCompleted = YES;
2611-
os_unfair_lock_unlock(&counterLock);
2612-
}];
2613-
});
2600+
__auto_type * baseDevice = [MTRBaseDevice deviceWithNodeID:@(105) controller:controller];
2601+
__auto_type * onOffCluster = [[MTRBaseClusterOnOff alloc] initWithDevice:baseDevice endpointID:@(1) queue:queue];
2602+
[onOffCluster readAttributeOnOffWithCompletion:^(NSNumber * value, NSError * _Nullable error) {
2603+
XCTAssertNil(error);
2604+
// We expect the device to be off.
2605+
XCTAssertEqualObjects(value, @(0));
2606+
[baseDeviceReadExpectation fulfill];
2607+
os_unfair_lock_lock(&counterLock);
2608+
baseDeviceReadCompleted = YES;
2609+
os_unfair_lock_unlock(&counterLock);
2610+
}];
26142611

26152612
// Make the wait time depend on pool size and device count (can expand number of devices in the future)
26162613
NSArray * expectationsToWait = [subscriptionExpectations.allValues arrayByAddingObject:baseDeviceReadExpectation];

0 commit comments

Comments
 (0)