38
38
#define K_EVENT_WAIT_ALL 0x01 /* Wait for all events */
39
39
#define K_EVENT_WAIT_MASK 0x01
40
40
41
- #define K_EVENT_WAIT_RESET 0x02 /* Reset events prior to waiting */
41
+ #define K_EVENT_OPTION_RESET 0x02 /* Reset events prior to waiting */
42
+ #define K_EVENT_OPTION_CLEAR 0x04 /* Clear events that are received */
42
43
43
44
struct event_walk_data {
44
45
struct k_thread * head ;
45
46
uint32_t events ;
47
+ uint32_t clear_events ;
46
48
};
47
49
48
50
#ifdef CONFIG_OBJ_CORE_EVENT
@@ -77,54 +79,64 @@ void z_vrfy_k_event_init(struct k_event *event)
77
79
#endif /* CONFIG_USERSPACE */
78
80
79
81
/**
80
- * @brief determine if desired set of events been satisfied
82
+ * @brief determine the set of events that have been satisfied
81
83
*
82
84
* This routine determines if the current set of events satisfies the desired
83
85
* set of events. If @a wait_condition is K_EVENT_WAIT_ALL, then at least
84
86
* all the desired events must be present to satisfy the request. If @a
85
87
* wait_condition is not K_EVENT_WAIT_ALL, it is assumed to be K_EVENT_WAIT_ANY.
86
88
* In the K_EVENT_WAIT_ANY case, the request is satisfied when any of the
87
89
* current set of events are present in the desired set of events.
90
+ *
91
+ * @return event bits that satisfy the wait condition or zero
88
92
*/
89
- static bool are_wait_conditions_met (uint32_t desired , uint32_t current ,
90
- unsigned int wait_condition )
93
+ static uint32_t are_wait_conditions_met (uint32_t desired , uint32_t current ,
94
+ unsigned int wait_condition )
91
95
{
92
- uint32_t match = current & desired ;
96
+ uint32_t match = current & desired ;
93
97
94
- if (wait_condition == K_EVENT_WAIT_ALL ) {
95
- return match == desired ;
98
+ if ((wait_condition == K_EVENT_WAIT_ALL ) && (match != desired )) {
99
+ /* special case for K_EVENT_WAIT_ALL */
100
+ return 0 ;
96
101
}
97
102
98
- /* wait_condition assumed to be K_EVENT_WAIT_ANY */
99
-
100
- return match != 0 ;
103
+ /* return the matched events for any wait condition */
104
+ return match ;
101
105
}
102
106
103
107
static int event_walk_op (struct k_thread * thread , void * data )
104
108
{
105
- unsigned int wait_condition ;
109
+ uint32_t match ;
110
+ unsigned int wait_condition ;
106
111
struct event_walk_data * event_data = data ;
107
112
108
113
wait_condition = thread -> event_options & K_EVENT_WAIT_MASK ;
109
114
110
- if ( are_wait_conditions_met (thread -> events , event_data -> events ,
111
- wait_condition )) {
112
-
115
+ match = are_wait_conditions_met (thread -> events , event_data -> events ,
116
+ wait_condition );
117
+ if ( match != 0 ) {
113
118
/*
114
119
* Events create a list of threads to wake up. We do
115
120
* not want z_thread_timeout to wake these threads; they
116
121
* will be woken up by k_event_post_internal once they
117
122
* have been processed.
118
123
*/
119
124
thread -> no_wake_on_timeout = true;
125
+ z_abort_timeout (& thread -> base .timeout );
120
126
121
127
/*
122
- * The wait conditions have been satisfied. Add this
123
- * thread to the list of threads to unpend.
128
+ * The wait conditions have been satisfied. So, set the
129
+ * received events and then add this thread to the list
130
+ * of threads to unpend.
131
+ *
132
+ * NOTE: thread event options can consume an event
124
133
*/
134
+ thread -> events = match ;
135
+ if (thread -> event_options & K_EVENT_OPTION_CLEAR ) {
136
+ event_data -> clear_events |= match ;
137
+ }
125
138
thread -> next_event_link = event_data -> head ;
126
139
event_data -> head = thread ;
127
- z_abort_timeout (& thread -> base .timeout );
128
140
}
129
141
130
142
return 0 ;
@@ -147,8 +159,7 @@ static uint32_t k_event_post_internal(struct k_event *event, uint32_t events,
147
159
previous_events = event -> events & events_mask ;
148
160
events = (event -> events & ~events_mask ) |
149
161
(events & events_mask );
150
- event -> events = events ;
151
- data .events = events ;
162
+
152
163
/*
153
164
* Posting an event has the potential to wake multiple pended threads.
154
165
* It is desirable to unpend all affected threads simultaneously. This
@@ -159,20 +170,24 @@ static uint32_t k_event_post_internal(struct k_event *event, uint32_t events,
159
170
* 3. Ready each of the threads in the linked list
160
171
*/
161
172
173
+ data .events = events ;
174
+ data .clear_events = 0 ;
162
175
z_sched_waitq_walk (& event -> wait_q , event_walk_op , & data );
163
176
164
177
if (data .head != NULL ) {
165
178
thread = data .head ;
166
179
struct k_thread * next ;
167
180
do {
168
181
arch_thread_return_value_set (thread , 0 );
169
- thread -> events = events ;
170
182
next = thread -> next_event_link ;
171
183
z_sched_wake_thread (thread , false);
172
184
thread = next ;
173
185
} while (thread != NULL );
174
186
}
175
187
188
+ /* stash any events not consumed */
189
+ event -> events = data .events & ~data .clear_events ;
190
+
176
191
z_reschedule (& event -> lock , key );
177
192
178
193
SYS_PORT_TRACING_OBJ_FUNC_EXIT (k_event , post , event , events ,
@@ -262,21 +277,22 @@ static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events,
262
277
263
278
k_spinlock_key_t key = k_spin_lock (& event -> lock );
264
279
265
- if (options & K_EVENT_WAIT_RESET ) {
280
+ if (options & K_EVENT_OPTION_RESET ) {
266
281
event -> events = 0 ;
267
282
}
268
283
269
284
/* Test if the wait conditions have already been met. */
270
-
271
- if (are_wait_conditions_met (events , event -> events , wait_condition )) {
272
- rv = event -> events ;
285
+ rv = are_wait_conditions_met (events , event -> events , wait_condition );
286
+ if (rv != 0 ) {
287
+ /* clear the events that are matched */
288
+ if (options & K_EVENT_OPTION_CLEAR ) {
289
+ event -> events &= ~rv ;
290
+ }
273
291
274
292
k_spin_unlock (& event -> lock , key );
275
293
goto out ;
276
294
}
277
295
278
- /* Match conditions have not been met. */
279
-
280
296
if (K_TIMEOUT_EQ (timeout , K_NO_WAIT )) {
281
297
k_spin_unlock (& event -> lock , key );
282
298
goto out ;
@@ -299,10 +315,9 @@ static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events,
299
315
}
300
316
301
317
out :
302
- SYS_PORT_TRACING_OBJ_FUNC_EXIT (k_event , wait , event ,
303
- events , rv & events );
318
+ SYS_PORT_TRACING_OBJ_FUNC_EXIT (k_event , wait , event , events , rv );
304
319
305
- return rv & events ;
320
+ return rv ;
306
321
}
307
322
308
323
/**
@@ -311,7 +326,7 @@ static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events,
311
326
uint32_t z_impl_k_event_wait (struct k_event * event , uint32_t events ,
312
327
bool reset , k_timeout_t timeout )
313
328
{
314
- uint32_t options = reset ? K_EVENT_WAIT_RESET : 0 ;
329
+ uint32_t options = reset ? K_EVENT_OPTION_RESET : 0 ;
315
330
316
331
return k_event_wait_internal (event , events , options , timeout );
317
332
}
@@ -331,7 +346,7 @@ uint32_t z_vrfy_k_event_wait(struct k_event *event, uint32_t events,
331
346
uint32_t z_impl_k_event_wait_all (struct k_event * event , uint32_t events ,
332
347
bool reset , k_timeout_t timeout )
333
348
{
334
- uint32_t options = reset ? (K_EVENT_WAIT_RESET | K_EVENT_WAIT_ALL )
349
+ uint32_t options = reset ? (K_EVENT_OPTION_RESET | K_EVENT_WAIT_ALL )
335
350
: K_EVENT_WAIT_ALL ;
336
351
337
352
return k_event_wait_internal (event , events , options , timeout );
@@ -347,6 +362,45 @@ uint32_t z_vrfy_k_event_wait_all(struct k_event *event, uint32_t events,
347
362
#include <zephyr/syscalls/k_event_wait_all_mrsh.c>
348
363
#endif /* CONFIG_USERSPACE */
349
364
365
+ uint32_t z_impl_k_event_wait_safe (struct k_event * event , uint32_t events ,
366
+ bool reset , k_timeout_t timeout )
367
+ {
368
+ uint32_t options = reset ? (K_EVENT_OPTION_CLEAR | K_EVENT_OPTION_RESET )
369
+ : K_EVENT_OPTION_CLEAR ;
370
+
371
+ return k_event_wait_internal (event , events , options , timeout );
372
+ }
373
+
374
+ #ifdef CONFIG_USERSPACE
375
+ uint32_t z_vrfy_k_event_wait_safe (struct k_event * event , uint32_t events ,
376
+ bool reset , k_timeout_t timeout )
377
+ {
378
+ K_OOPS (K_SYSCALL_OBJ (event , K_OBJ_EVENT ));
379
+ return z_impl_k_event_wait_safe (event , events , reset , timeout );
380
+ }
381
+ #include <zephyr/syscalls/k_event_wait_safe_mrsh.c>
382
+ #endif /* CONFIG_USERSPACE */
383
+
384
+ uint32_t z_impl_k_event_wait_all_safe (struct k_event * event , uint32_t events ,
385
+ bool reset , k_timeout_t timeout )
386
+ {
387
+ uint32_t options = reset ? (K_EVENT_OPTION_CLEAR |
388
+ K_EVENT_OPTION_RESET | K_EVENT_WAIT_ALL )
389
+ : (K_EVENT_OPTION_CLEAR | K_EVENT_WAIT_ALL );
390
+
391
+ return k_event_wait_internal (event , events , options , timeout );
392
+ }
393
+
394
+ #ifdef CONFIG_USERSPACE
395
+ uint32_t z_vrfy_k_event_wait_all_safe (struct k_event * event , uint32_t events ,
396
+ bool reset , k_timeout_t timeout )
397
+ {
398
+ K_OOPS (K_SYSCALL_OBJ (event , K_OBJ_EVENT ));
399
+ return z_impl_k_event_wait_all_safe (event , events , reset , timeout );
400
+ }
401
+ #include <zephyr/syscalls/k_event_wait_all_safe_mrsh.c>
402
+ #endif /* CONFIG_USERSPACE */
403
+
350
404
#ifdef CONFIG_OBJ_CORE_EVENT
351
405
static int init_event_obj_core_list (void )
352
406
{
0 commit comments