|
| 1 | +/* |
| 2 | + * Copyright (c) 2025 Nordic Semiconductor ASA |
| 3 | + * |
| 4 | + * SPDX-License-Identifier: LicenseRef-Nordic-5-Clause |
| 5 | + */ |
| 6 | + |
| 7 | +#include <zephyr/kernel.h> |
| 8 | +#include <zephyr/ztest.h> |
| 9 | + |
| 10 | +/* |
| 11 | + * Taken from zephyr/tests/kernel/context/kernel.context |
| 12 | + */ |
| 13 | + |
| 14 | +/* |
| 15 | + * @brief Timeout tests |
| 16 | + * |
| 17 | + * Test the k_sleep() API, as well as the k_thread_create() ones. |
| 18 | + */ |
| 19 | +struct timeout_order { |
| 20 | + void *link_in_fifo; |
| 21 | + int32_t timeout; |
| 22 | + int timeout_order; |
| 23 | + int q_order; |
| 24 | +}; |
| 25 | + |
| 26 | +struct timeout_order timeouts[] = { |
| 27 | + {0, 1000, 2, 0}, {0, 1500, 4, 1}, {0, 500, 0, 2}, {0, 750, 1, 3}, |
| 28 | + {0, 1750, 5, 4}, {0, 2000, 6, 5}, {0, 1250, 3, 6}, |
| 29 | +}; |
| 30 | + |
| 31 | +#define THREAD_PRIORITY 4 |
| 32 | +#define THREAD_STACKSIZE2 (384 + CONFIG_TEST_EXTRA_STACK_SIZE) |
| 33 | +#define NUM_TIMEOUT_THREADS ARRAY_SIZE(timeouts) |
| 34 | +static K_THREAD_STACK_ARRAY_DEFINE(timeout_stacks, NUM_TIMEOUT_THREADS, THREAD_STACKSIZE2); |
| 35 | +static struct k_thread timeout_threads[NUM_TIMEOUT_THREADS]; |
| 36 | +static struct k_sem reply_timeout; |
| 37 | +static struct k_timer timer; |
| 38 | +struct k_fifo timeout_order_fifo; |
| 39 | + |
| 40 | +/* a thread busy waits */ |
| 41 | +static void busy_wait_thread(void *mseconds, void *arg2, void *arg3) |
| 42 | +{ |
| 43 | + uint32_t usecs; |
| 44 | + |
| 45 | + ARG_UNUSED(arg2); |
| 46 | + ARG_UNUSED(arg3); |
| 47 | + |
| 48 | + usecs = POINTER_TO_INT(mseconds) * 1000; |
| 49 | + |
| 50 | + k_busy_wait(usecs); |
| 51 | + |
| 52 | + /* FIXME: Broken on Nios II, see #22956 */ |
| 53 | +#ifndef CONFIG_NIOS2 |
| 54 | + int key = arch_irq_lock(); |
| 55 | + |
| 56 | + k_busy_wait(usecs); |
| 57 | + arch_irq_unlock(key); |
| 58 | +#endif |
| 59 | + |
| 60 | + /* |
| 61 | + * Ideally the test should verify that the correct number of ticks |
| 62 | + * have elapsed. However, when running under QEMU, the tick interrupt |
| 63 | + * may be processed on a very irregular basis, meaning that far |
| 64 | + * fewer than the expected number of ticks may occur for a given |
| 65 | + * number of clock cycles vs. what would ordinarily be expected. |
| 66 | + * |
| 67 | + * Consequently, the best we can do for now to test busy waiting is |
| 68 | + * to invoke the API and verify that it returns. (If it takes way |
| 69 | + * too long, or never returns, the main test thread may be able to |
| 70 | + * time out and report an error.) |
| 71 | + */ |
| 72 | + |
| 73 | + k_sem_give(&reply_timeout); |
| 74 | +} |
| 75 | + |
| 76 | +/* a thread sleeps and times out, then reports through a fifo */ |
| 77 | +static void thread_sleep(void *delta, void *arg2, void *arg3) |
| 78 | +{ |
| 79 | + int64_t timestamp; |
| 80 | + int timeout = POINTER_TO_INT(delta); |
| 81 | + |
| 82 | + ARG_UNUSED(arg2); |
| 83 | + ARG_UNUSED(arg3); |
| 84 | + |
| 85 | + timestamp = k_uptime_get(); |
| 86 | + k_msleep(timeout); |
| 87 | + timestamp = k_uptime_get() - timestamp; |
| 88 | + |
| 89 | + int slop = MAX(k_ticks_to_ms_floor64(2), 1); |
| 90 | + |
| 91 | + if (timestamp < timeout || timestamp > timeout + slop) { |
| 92 | + TC_ERROR("timestamp out of range, got %d\n", (int)timestamp); |
| 93 | + return; |
| 94 | + } |
| 95 | + |
| 96 | + k_sem_give(&reply_timeout); |
| 97 | +} |
| 98 | + |
| 99 | +/* a thread is started with a delay, then it reports that it ran via a fifo */ |
| 100 | +static void delayed_thread(void *num, void *arg2, void *arg3) |
| 101 | +{ |
| 102 | + struct timeout_order *timeout = &timeouts[POINTER_TO_INT(num)]; |
| 103 | + |
| 104 | + ARG_UNUSED(arg2); |
| 105 | + ARG_UNUSED(arg3); |
| 106 | + |
| 107 | + TC_PRINT(" thread (q order: %d, t/o: %d) is running\n", timeout->q_order, timeout->timeout); |
| 108 | + |
| 109 | + k_fifo_put(&timeout_order_fifo, timeout); |
| 110 | +} |
| 111 | + |
| 112 | +/** |
| 113 | + * |
| 114 | + * @brief Initialize kernel objects |
| 115 | + * |
| 116 | + * This routine initializes the kernel objects used in this module's tests. |
| 117 | + * |
| 118 | + */ |
| 119 | +void kernel_init_objects(void) |
| 120 | +{ |
| 121 | + k_sem_init(&reply_timeout, 0, UINT_MAX); |
| 122 | + k_timer_init(&timer, NULL, NULL); |
| 123 | + k_fifo_init(&timeout_order_fifo); |
| 124 | +} |
| 125 | + |
| 126 | +/** |
| 127 | + * @brief Test timeouts |
| 128 | + * |
| 129 | + * @ingroup kernel_context_tests |
| 130 | + * |
| 131 | + * @see k_busy_wait(), k_sleep() |
| 132 | + */ |
| 133 | +void test_busy_wait(void) |
| 134 | +{ |
| 135 | + int32_t timeout; |
| 136 | + int rv; |
| 137 | + |
| 138 | + timeout = 20; /* in ms */ |
| 139 | + |
| 140 | + k_thread_create(&timeout_threads[0], timeout_stacks[0], THREAD_STACKSIZE2, busy_wait_thread, |
| 141 | + INT_TO_POINTER(timeout), NULL, NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, |
| 142 | + K_NO_WAIT); |
| 143 | + |
| 144 | + rv = k_sem_take(&reply_timeout, K_MSEC(timeout * 2 * 2)); |
| 145 | + |
| 146 | + zassert_false(rv, " *** thread timed out waiting for " |
| 147 | + "k_busy_wait()"); |
| 148 | +} |
| 149 | + |
| 150 | +/** |
| 151 | + * @brief Test timeouts |
| 152 | + * |
| 153 | + * @ingroup kernel_context_tests |
| 154 | + * |
| 155 | + * @see k_sleep() |
| 156 | + */ |
| 157 | +void test_k_sleep(void) |
| 158 | +{ |
| 159 | + struct timeout_order *data; |
| 160 | + int32_t timeout; |
| 161 | + int rv; |
| 162 | + int i; |
| 163 | + |
| 164 | + timeout = 50; |
| 165 | + |
| 166 | + k_thread_create(&timeout_threads[0], timeout_stacks[0], THREAD_STACKSIZE2, thread_sleep, |
| 167 | + INT_TO_POINTER(timeout), NULL, NULL, K_PRIO_COOP(THREAD_PRIORITY), 0, |
| 168 | + K_NO_WAIT); |
| 169 | + |
| 170 | + rv = k_sem_take(&reply_timeout, K_MSEC(timeout * 2)); |
| 171 | + zassert_equal(rv, 0, |
| 172 | + " *** thread timed out waiting for thread on " |
| 173 | + "k_sleep()."); |
| 174 | + |
| 175 | + /* test k_thread_create() without cancellation */ |
| 176 | + TC_PRINT("Testing k_thread_create() without cancellation\n"); |
| 177 | + |
| 178 | + for (i = 0; i < NUM_TIMEOUT_THREADS; i++) { |
| 179 | + k_thread_create(&timeout_threads[i], timeout_stacks[i], THREAD_STACKSIZE2, |
| 180 | + delayed_thread, INT_TO_POINTER(i), NULL, NULL, K_PRIO_COOP(5), 0, |
| 181 | + K_MSEC(timeouts[i].timeout)); |
| 182 | + } |
| 183 | + for (i = 0; i < NUM_TIMEOUT_THREADS; i++) { |
| 184 | + data = k_fifo_get(&timeout_order_fifo, K_MSEC(750)); |
| 185 | + zassert_not_null(data, " *** timeout while waiting for" |
| 186 | + " delayed thread"); |
| 187 | + |
| 188 | + zassert_equal(data->timeout_order, i, |
| 189 | + " *** wrong delayed thread ran (got %d, " |
| 190 | + "expected %d)\n", |
| 191 | + data->timeout_order, i); |
| 192 | + |
| 193 | + TC_PRINT(" got thread (q order: %d, t/o: %d) as expected\n", data->q_order, |
| 194 | + data->timeout); |
| 195 | + } |
| 196 | + |
| 197 | + /* ensure no more thread fire */ |
| 198 | + data = k_fifo_get(&timeout_order_fifo, K_MSEC(750)); |
| 199 | + |
| 200 | + zassert_false(data, " *** got something unexpected in the fifo"); |
| 201 | + |
| 202 | + /* test k_thread_create() with cancellation */ |
| 203 | + TC_PRINT("Testing k_thread_create() with cancellations\n"); |
| 204 | + |
| 205 | + int cancellations[] = {0, 3, 4, 6}; |
| 206 | + int num_cancellations = ARRAY_SIZE(cancellations); |
| 207 | + int next_cancellation = 0; |
| 208 | + |
| 209 | + k_tid_t delayed_threads[NUM_TIMEOUT_THREADS]; |
| 210 | + |
| 211 | + for (i = 0; i < NUM_TIMEOUT_THREADS; i++) { |
| 212 | + k_tid_t id; |
| 213 | + |
| 214 | + id = k_thread_create(&timeout_threads[i], timeout_stacks[i], THREAD_STACKSIZE2, |
| 215 | + delayed_thread, INT_TO_POINTER(i), NULL, NULL, K_PRIO_COOP(5), |
| 216 | + 0, K_MSEC(timeouts[i].timeout)); |
| 217 | + |
| 218 | + delayed_threads[i] = id; |
| 219 | + } |
| 220 | + |
| 221 | + for (i = 0; i < NUM_TIMEOUT_THREADS; i++) { |
| 222 | + int j; |
| 223 | + |
| 224 | + if (i == cancellations[next_cancellation]) { |
| 225 | + TC_PRINT(" cancelling " |
| 226 | + "[q order: %d, t/o: %d, t/o order: %d]\n", |
| 227 | + timeouts[i].q_order, timeouts[i].timeout, i); |
| 228 | + |
| 229 | + for (j = 0; j < NUM_TIMEOUT_THREADS; j++) { |
| 230 | + if (timeouts[j].timeout_order == i) { |
| 231 | + break; |
| 232 | + } |
| 233 | + } |
| 234 | + |
| 235 | + if (j < NUM_TIMEOUT_THREADS) { |
| 236 | + k_thread_abort(delayed_threads[j]); |
| 237 | + ++next_cancellation; |
| 238 | + continue; |
| 239 | + } |
| 240 | + } |
| 241 | + |
| 242 | + data = k_fifo_get(&timeout_order_fifo, K_MSEC(2750)); |
| 243 | + |
| 244 | + zassert_not_null(data, " *** timeout while waiting for" |
| 245 | + " delayed thread"); |
| 246 | + |
| 247 | + zassert_equal(data->timeout_order, i, |
| 248 | + " *** wrong delayed thread ran (got %d, " |
| 249 | + "expected %d)\n", |
| 250 | + data->timeout_order, i); |
| 251 | + |
| 252 | + TC_PRINT(" got (q order: %d, t/o: %d, t/o order %d) " |
| 253 | + "as expected\n", |
| 254 | + data->q_order, data->timeout, data->timeout_order); |
| 255 | + } |
| 256 | + |
| 257 | + zassert_equal(num_cancellations, next_cancellation, |
| 258 | + " *** wrong number of cancellations (expected %d, " |
| 259 | + "got %d\n", |
| 260 | + num_cancellations, next_cancellation); |
| 261 | + |
| 262 | + /* ensure no more thread fire */ |
| 263 | + data = k_fifo_get(&timeout_order_fifo, K_MSEC(750)); |
| 264 | + zassert_false(data, " *** got something unexpected in the fifo"); |
| 265 | +} |
0 commit comments