Skip to content

Commit d461596

Browse files
Nicolas Pitreanangl
Nicolas Pitre
authored andcommitted
[nrf fromtree] kernel: move current thread pointer management to core code
Define the generic _current directly and get rid of the generic arch_current_get(). The SMP default implementation is now known as z_smp_current_get(). It is no longer inlined which saves significant binary size (about 10% for some random test case I checked). Introduce z_current_thread_set() and use it in place of arch_current_thread_set() for updating the current thread pointer given this is not necessarily an architecture specific operation. The architecture specific optimization, when enabled, should only care about its own things and not have to also update the generic _current_cpu->current copy. Signed-off-by: Nicolas Pitre <npitre@baylibre.com> (cherry picked from commit 7a3124d)
1 parent cf375a2 commit d461596

File tree

9 files changed

+38
-60
lines changed

9 files changed

+38
-60
lines changed

arch/arm/core/cortex_m/thread.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -522,7 +522,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
522522
{
523523
z_arm_prepare_switch_to_main();
524524

525-
arch_current_thread_set(main_thread);
525+
z_current_thread_set(main_thread);
526526

527527
#if defined(CONFIG_THREAD_LOCAL_STORAGE)
528528
/* On Cortex-M, TLS uses a global variable as pointer to

arch/posix/core/swap.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ int arch_swap(unsigned int key)
5050
_current->callee_saved.thread_status;
5151

5252

53-
arch_current_thread_set(_kernel.ready_q.cache);
53+
z_current_thread_set(_kernel.ready_q.cache);
5454
#if CONFIG_INSTRUMENT_THREAD_SWITCHING
5555
z_thread_mark_switched_in();
5656
#endif
@@ -94,7 +94,7 @@ void arch_switch_to_main_thread(struct k_thread *main_thread, char *stack_ptr,
9494
z_thread_mark_switched_out();
9595
#endif
9696

97-
arch_current_thread_set(_kernel.ready_q.cache);
97+
z_current_thread_set(_kernel.ready_q.cache);
9898

9999
#ifdef CONFIG_INSTRUMENT_THREAD_SWITCHING
100100
z_thread_mark_switched_in();

include/zephyr/arch/arch_inlines.h

-2
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,4 @@
3434
#include <zephyr/arch/sparc/arch_inlines.h>
3535
#endif
3636

37-
#include <zephyr/arch/common/arch_inlines.h>
38-
3937
#endif /* ZEPHYR_INCLUDE_ARCH_INLINES_H_ */

include/zephyr/arch/common/arch_inlines.h

-45
This file was deleted.

include/zephyr/arch/riscv/arch_inlines.h

+3-4
Original file line numberDiff line numberDiff line change
@@ -28,13 +28,12 @@ static ALWAYS_INLINE _cpu_t *arch_curr_cpu(void)
2828
}
2929

3030
#ifdef CONFIG_RISCV_CURRENT_VIA_GP
31+
3132
register struct k_thread *__arch_current_thread __asm__("gp");
3233

3334
#define arch_current_thread() __arch_current_thread
34-
#define arch_current_thread_set(thread) \
35-
do { \
36-
__arch_current_thread = _current_cpu->current = (thread); \
37-
} while (0)
35+
#define arch_current_thread_set(thread) ({ __arch_current_thread = (thread); })
36+
3837
#endif /* CONFIG_RISCV_CURRENT_VIA_GP */
3938

4039
static ALWAYS_INLINE unsigned int arch_num_cpus(void)

include/zephyr/kernel_structs.h

+14-2
Original file line numberDiff line numberDiff line change
@@ -260,16 +260,28 @@ extern atomic_t _cpus_active;
260260
* another SMP CPU.
261261
*/
262262
bool z_smp_cpu_mobile(void);
263-
264263
#define _current_cpu ({ __ASSERT_NO_MSG(!z_smp_cpu_mobile()); \
265264
arch_curr_cpu(); })
266-
#define _current arch_current_thread()
265+
266+
struct k_thread *z_smp_current_get(void);
267+
#define _current z_smp_current_get()
267268

268269
#else
269270
#define _current_cpu (&_kernel.cpus[0])
270271
#define _current _kernel.cpus[0].current
271272
#endif
272273

274+
/* This is always invoked from a context where preemption is disabled */
275+
#define z_current_thread_set(thread) ({ _current_cpu->current = (thread); })
276+
277+
#ifdef CONFIG_ARCH_HAS_CUSTOM_CURRENT_IMPL
278+
#undef _current
279+
#define _current arch_current_thread()
280+
#undef z_current_thread_set
281+
#define z_current_thread_set(thread) \
282+
arch_current_thread_set(({ _current_cpu->current = (thread); }))
283+
#endif
284+
273285
/* kernel wait queue record */
274286
#ifdef CONFIG_WAITQ_SCALABLE
275287

kernel/include/kswap.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ static ALWAYS_INLINE unsigned int do_swap(unsigned int key,
133133
#endif /* CONFIG_SMP */
134134
z_thread_mark_switched_out();
135135
z_sched_switch_spin(new_thread);
136-
arch_current_thread_set(new_thread);
136+
z_current_thread_set(new_thread);
137137

138138
#ifdef CONFIG_TIMESLICING
139139
z_reset_time_slice(new_thread);
@@ -259,6 +259,6 @@ static inline void z_dummy_thread_init(struct k_thread *dummy_thread)
259259
dummy_thread->base.slice_ticks = 0;
260260
#endif /* CONFIG_TIMESLICE_PER_THREAD */
261261

262-
arch_current_thread_set(dummy_thread);
262+
z_current_thread_set(dummy_thread);
263263
}
264264
#endif /* ZEPHYR_KERNEL_INCLUDE_KSWAP_H_ */

kernel/sched.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -797,11 +797,11 @@ struct k_thread *z_swap_next_thread(void)
797797
}
798798

799799
#ifdef CONFIG_USE_SWITCH
800-
/* Just a wrapper around arch_current_thread_set(xxx) with tracing */
800+
/* Just a wrapper around z_current_thread_set(xxx) with tracing */
801801
static inline void set_current(struct k_thread *new_thread)
802802
{
803803
z_thread_mark_switched_out();
804-
arch_current_thread_set(new_thread);
804+
z_current_thread_set(new_thread);
805805
}
806806

807807
/**

kernel/smp.c

+14
Original file line numberDiff line numberDiff line change
@@ -248,3 +248,17 @@ bool z_smp_cpu_mobile(void)
248248
arch_irq_unlock(k);
249249
return !pinned;
250250
}
251+
252+
struct k_thread *z_smp_current_get(void)
253+
{
254+
/*
255+
* _current is a field read from _current_cpu, which can race
256+
* with preemption before it is read. We must lock local
257+
* interrupts when reading it.
258+
*/
259+
unsigned int key = arch_irq_lock();
260+
struct k_thread *t = _current_cpu->current;
261+
262+
arch_irq_unlock(key);
263+
return t;
264+
}

0 commit comments

Comments
 (0)