Skip to content

Commit dcfc6ed

Browse files
committed
kernel: event api extensions to clear events and avoid phantom events
This is variation of the PR to handle phantom events and hopefully this get merged into the PR to land. See-also: #89624 Signed-off-by: Charles Hardin <ckhardin@gmail.com>
1 parent 0d794df commit dcfc6ed

File tree

5 files changed

+225
-34
lines changed

5 files changed

+225
-34
lines changed

doc/kernel/services/synchronization/events.rst

Lines changed: 54 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -108,8 +108,8 @@ the event object.
108108
...
109109
}
110110
111-
Waiting for Events
112-
==================
111+
Waiting for Events (without removal)
112+
====================================
113113

114114
Threads wait for events by calling :c:func:`k_event_wait`.
115115

@@ -152,6 +152,58 @@ before continuing.
152152
...
153153
}
154154
155+
Waiting for Events (with removal)
156+
=================================
157+
158+
Threads wait for events (with atomic removal upon receipt) by calling
159+
:c:func:`k_event_wait_safe`.
160+
161+
The following code builds on the example above, and waits up to 50 milliseconds
162+
for any of the specified events to be posted. A warning is issued if none
163+
of the events are posted in time.
164+
165+
If events are received on time, then they will not be present in the event
166+
object until the next time that the events are set or posted.
167+
168+
.. code-block:: c
169+
170+
void consumer_thread(void)
171+
{
172+
uint32_t events;
173+
174+
events = k_event_wait_safe(&my_event, 0xFFF, false, K_MSEC(50));
175+
if (events == 0) {
176+
printk("No input devices are available!");
177+
} else {
178+
/* Access the desired input device(s) */
179+
...
180+
}
181+
...
182+
}
183+
184+
Alternatively, the consumer thread may desire to wait for all the events
185+
(with atomic removal upon receipt) before continuing using
186+
:c:func:`k_event_wait_all_safe`.
187+
188+
If all events are received on time, then they will not be present in the event
189+
object until the next time that the events are set or posted.
190+
191+
.. code-block:: c
192+
193+
void consumer_thread(void)
194+
{
195+
uint32_t events;
196+
197+
events = k_event_wait_all_safe(&my_event, 0x121, false, K_MSEC(50));
198+
if (events == 0) {
199+
printk("At least one input device is not available!");
200+
} else {
201+
/* Access the desired input devices */
202+
...
203+
}
204+
...
205+
}
206+
155207
Suggested Uses
156208
**************
157209

include/zephyr/kernel.h

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2507,6 +2507,52 @@ __syscall uint32_t k_event_wait(struct k_event *event, uint32_t events,
25072507
__syscall uint32_t k_event_wait_all(struct k_event *event, uint32_t events,
25082508
bool reset, k_timeout_t timeout);
25092509

2510+
/**
2511+
* @brief Wait for any of the specified events (safe version)
2512+
*
2513+
* This call is nearly identical to @ref k_event_wait with the main difference
2514+
* being that the safe version atomically clears received events from the
2515+
* event object. This mitigates the need for calling @ref k_event_clear, or
2516+
* passing a "reset" argument, since doing so may result in lost event
2517+
* information.
2518+
*
2519+
* @param event Address of the event object
2520+
* @param events Set of desired events on which to wait
2521+
* @param reset If true, clear the set of events tracked by the event object
2522+
* before waiting. If false, do not clear the events.
2523+
* @param timeout Waiting period for the desired set of events or one of the
2524+
* special values K_NO_WAIT and K_FOREVER.
2525+
*
2526+
* @retval set of matching events upon success
2527+
* @retval 0 if no matching event was received within the specified time
2528+
*/
2529+
__syscall uint32_t k_event_wait_safe(struct k_event *event, uint32_t events,
2530+
bool reset, k_timeout_t timeout);
2531+
2532+
/**
2533+
* @brief Wait for all of the specified events (safe version)
2534+
*
2535+
* This call is nearly identical to @ref k_event_wait_all with the main
2536+
* difference being that the safe version atomically clears received events
2537+
* from the event object. This mitigates the need for calling
2538+
* @ref k_event_clear, or passing a "reset" argument, since doing so may
2539+
* result in lost event information.
2540+
*
2541+
* @param event Address of the event object
2542+
* @param events Set of desired events on which to wait
2543+
* @param reset If true, clear the set of events tracked by the event object
2544+
* before waiting. If false, do not clear the events.
2545+
* @param timeout Waiting period for the desired set of events or one of the
2546+
* special values K_NO_WAIT and K_FOREVER.
2547+
*
2548+
* @retval set of matching events upon success
2549+
* @retval 0 if all matching events were not received within the specified time
2550+
*/
2551+
__syscall uint32_t k_event_wait_all_safe(struct k_event *event, uint32_t events,
2552+
bool reset, k_timeout_t timeout);
2553+
2554+
2555+
25102556
/**
25112557
* @brief Test the events currently tracked in the event object
25122558
*

include/zephyr/kernel/thread.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -279,7 +279,7 @@ struct k_thread {
279279
#if defined(CONFIG_EVENTS)
280280
struct k_thread *next_event_link;
281281

282-
uint32_t events;
282+
uint32_t events; /* dual purpose - wait on and then received */
283283
uint32_t event_options;
284284

285285
/** true if timeout should not wake the thread */

kernel/events.c

Lines changed: 85 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -38,11 +38,13 @@
3838
#define K_EVENT_WAIT_ALL 0x01 /* Wait for all events */
3939
#define K_EVENT_WAIT_MASK 0x01
4040

41-
#define K_EVENT_WAIT_RESET 0x02 /* Reset events prior to waiting */
41+
#define K_EVENT_OPTION_RESET 0x02 /* Reset events prior to waiting */
42+
#define K_EVENT_OPTION_CLEAR 0x04 /* Clear events that are received */
4243

4344
struct event_walk_data {
4445
struct k_thread *head;
4546
uint32_t events;
47+
uint32_t clear_events;
4648
};
4749

4850
#ifdef CONFIG_OBJ_CORE_EVENT
@@ -77,54 +79,64 @@ void z_vrfy_k_event_init(struct k_event *event)
7779
#endif /* CONFIG_USERSPACE */
7880

7981
/**
80-
* @brief determine if desired set of events been satisfied
82+
* @brief determine the set of events that have been satisfied
8183
*
8284
* This routine determines if the current set of events satisfies the desired
8385
* set of events. If @a wait_condition is K_EVENT_WAIT_ALL, then at least
8486
* all the desired events must be present to satisfy the request. If @a
8587
* wait_condition is not K_EVENT_WAIT_ALL, it is assumed to be K_EVENT_WAIT_ANY.
8688
* In the K_EVENT_WAIT_ANY case, the request is satisfied when any of the
8789
* current set of events are present in the desired set of events.
90+
*
91+
* @return event bits that satisfy the wait condition or zero
8892
*/
89-
static bool are_wait_conditions_met(uint32_t desired, uint32_t current,
90-
unsigned int wait_condition)
93+
static uint32_t are_wait_conditions_met(uint32_t desired, uint32_t current,
94+
unsigned int wait_condition)
9195
{
92-
uint32_t match = current & desired;
96+
uint32_t match = current & desired;
9397

94-
if (wait_condition == K_EVENT_WAIT_ALL) {
95-
return match == desired;
98+
if ((wait_condition == K_EVENT_WAIT_ALL) && (match != desired)) {
99+
/* special case for K_EVENT_WAIT_ALL */
100+
return 0;
96101
}
97102

98-
/* wait_condition assumed to be K_EVENT_WAIT_ANY */
99-
100-
return match != 0;
103+
/* return the matched events for any wait condition */
104+
return match;
101105
}
102106

103107
static int event_walk_op(struct k_thread *thread, void *data)
104108
{
105-
unsigned int wait_condition;
109+
uint32_t match;
110+
unsigned int wait_condition;
106111
struct event_walk_data *event_data = data;
107112

108113
wait_condition = thread->event_options & K_EVENT_WAIT_MASK;
109114

110-
if (are_wait_conditions_met(thread->events, event_data->events,
111-
wait_condition)) {
112-
115+
match = are_wait_conditions_met(thread->events, event_data->events,
116+
wait_condition);
117+
if (match != 0) {
113118
/*
114119
* Events create a list of threads to wake up. We do
115120
* not want z_thread_timeout to wake these threads; they
116121
* will be woken up by k_event_post_internal once they
117122
* have been processed.
118123
*/
119124
thread->no_wake_on_timeout = true;
125+
z_abort_timeout(&thread->base.timeout);
120126

121127
/*
122-
* The wait conditions have been satisfied. Add this
123-
* thread to the list of threads to unpend.
128+
* The wait conditions have been satisfied. So, set the
129+
* received events and then add this thread to the list
130+
* of threads to unpend.
131+
*
132+
* NOTE: thread event options can consume an event
124133
*/
134+
thread->events = match;
135+
if (thread->event_options & K_EVENT_OPTION_CLEAR) {
136+
event_data->clear_events |= match;
137+
}
125138
thread->next_event_link = event_data->head;
126139
event_data->head = thread;
127-
z_abort_timeout(&thread->base.timeout);
128140
}
129141

130142
return 0;
@@ -147,8 +159,7 @@ static uint32_t k_event_post_internal(struct k_event *event, uint32_t events,
147159
previous_events = event->events & events_mask;
148160
events = (event->events & ~events_mask) |
149161
(events & events_mask);
150-
event->events = events;
151-
data.events = events;
162+
152163
/*
153164
* Posting an event has the potential to wake multiple pended threads.
154165
* It is desirable to unpend all affected threads simultaneously. This
@@ -159,20 +170,24 @@ static uint32_t k_event_post_internal(struct k_event *event, uint32_t events,
159170
* 3. Ready each of the threads in the linked list
160171
*/
161172

173+
data.events = events;
174+
data.clear_events = 0;
162175
z_sched_waitq_walk(&event->wait_q, event_walk_op, &data);
163176

164177
if (data.head != NULL) {
165178
thread = data.head;
166179
struct k_thread *next;
167180
do {
168181
arch_thread_return_value_set(thread, 0);
169-
thread->events = events;
170182
next = thread->next_event_link;
171183
z_sched_wake_thread(thread, false);
172184
thread = next;
173185
} while (thread != NULL);
174186
}
175187

188+
/* stash any events not consumed */
189+
event->events = data.events & ~data.clear_events;
190+
176191
z_reschedule(&event->lock, key);
177192

178193
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, post, event, events,
@@ -262,21 +277,22 @@ static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events,
262277

263278
k_spinlock_key_t key = k_spin_lock(&event->lock);
264279

265-
if (options & K_EVENT_WAIT_RESET) {
280+
if (options & K_EVENT_OPTION_RESET) {
266281
event->events = 0;
267282
}
268283

269284
/* Test if the wait conditions have already been met. */
270-
271-
if (are_wait_conditions_met(events, event->events, wait_condition)) {
272-
rv = event->events;
285+
rv = are_wait_conditions_met(events, event->events, wait_condition);
286+
if (rv != 0) {
287+
/* clear the events that are matched */
288+
if (options & K_EVENT_OPTION_CLEAR) {
289+
event->events &= ~rv;
290+
}
273291

274292
k_spin_unlock(&event->lock, key);
275293
goto out;
276294
}
277295

278-
/* Match conditions have not been met. */
279-
280296
if (K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
281297
k_spin_unlock(&event->lock, key);
282298
goto out;
@@ -299,10 +315,9 @@ static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events,
299315
}
300316

301317
out:
302-
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event,
303-
events, rv & events);
318+
SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_event, wait, event, events, rv);
304319

305-
return rv & events;
320+
return rv;
306321
}
307322

308323
/**
@@ -311,7 +326,7 @@ static uint32_t k_event_wait_internal(struct k_event *event, uint32_t events,
311326
uint32_t z_impl_k_event_wait(struct k_event *event, uint32_t events,
312327
bool reset, k_timeout_t timeout)
313328
{
314-
uint32_t options = reset ? K_EVENT_WAIT_RESET : 0;
329+
uint32_t options = reset ? K_EVENT_OPTION_RESET : 0;
315330

316331
return k_event_wait_internal(event, events, options, timeout);
317332
}
@@ -331,7 +346,7 @@ uint32_t z_vrfy_k_event_wait(struct k_event *event, uint32_t events,
331346
uint32_t z_impl_k_event_wait_all(struct k_event *event, uint32_t events,
332347
bool reset, k_timeout_t timeout)
333348
{
334-
uint32_t options = reset ? (K_EVENT_WAIT_RESET | K_EVENT_WAIT_ALL)
349+
uint32_t options = reset ? (K_EVENT_OPTION_RESET | K_EVENT_WAIT_ALL)
335350
: K_EVENT_WAIT_ALL;
336351

337352
return k_event_wait_internal(event, events, options, timeout);
@@ -347,6 +362,45 @@ uint32_t z_vrfy_k_event_wait_all(struct k_event *event, uint32_t events,
347362
#include <zephyr/syscalls/k_event_wait_all_mrsh.c>
348363
#endif /* CONFIG_USERSPACE */
349364

365+
uint32_t z_impl_k_event_wait_safe(struct k_event *event, uint32_t events,
366+
bool reset, k_timeout_t timeout)
367+
{
368+
uint32_t options = reset ? (K_EVENT_OPTION_CLEAR | K_EVENT_OPTION_RESET)
369+
: K_EVENT_OPTION_CLEAR;
370+
371+
return k_event_wait_internal(event, events, options, timeout);
372+
}
373+
374+
#ifdef CONFIG_USERSPACE
375+
uint32_t z_vrfy_k_event_wait_safe(struct k_event *event, uint32_t events,
376+
bool reset, k_timeout_t timeout)
377+
{
378+
K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
379+
return z_impl_k_event_wait_safe(event, events, reset, timeout);
380+
}
381+
#include <zephyr/syscalls/k_event_wait_safe_mrsh.c>
382+
#endif /* CONFIG_USERSPACE */
383+
384+
uint32_t z_impl_k_event_wait_all_safe(struct k_event *event, uint32_t events,
385+
bool reset, k_timeout_t timeout)
386+
{
387+
uint32_t options = reset ? (K_EVENT_OPTION_CLEAR |
388+
K_EVENT_OPTION_RESET | K_EVENT_WAIT_ALL)
389+
: (K_EVENT_OPTION_CLEAR | K_EVENT_WAIT_ALL);
390+
391+
return k_event_wait_internal(event, events, options, timeout);
392+
}
393+
394+
#ifdef CONFIG_USERSPACE
395+
uint32_t z_vrfy_k_event_wait_all_safe(struct k_event *event, uint32_t events,
396+
bool reset, k_timeout_t timeout)
397+
{
398+
K_OOPS(K_SYSCALL_OBJ(event, K_OBJ_EVENT));
399+
return z_impl_k_event_wait_all_safe(event, events, reset, timeout);
400+
}
401+
#include <zephyr/syscalls/k_event_wait_all_safe_mrsh.c>
402+
#endif /* CONFIG_USERSPACE */
403+
350404
#ifdef CONFIG_OBJ_CORE_EVENT
351405
static int init_event_obj_core_list(void)
352406
{

0 commit comments

Comments
 (0)