Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Decouple memory allocations from aligned memory allocations #87003

Merged
merged 6 commits into from
Apr 1, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
31 changes: 28 additions & 3 deletions include/zephyr/sys/sys_heap.h
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,18 @@ void *sys_heap_alloc(struct sys_heap *heap, size_t bytes);
*/
void *sys_heap_aligned_alloc(struct sys_heap *heap, size_t align, size_t bytes);

/** @brief Allocate memory from a sys_heap
*
* This is a wrapper for sys_heap_alloc() whose purpose is to provide the same
* function signature as sys_heap_aligned_alloc().
*
* @param heap Heap from which to allocate
* @param align Ignored placeholder
* @param bytes Number of bytes requested
* @return Pointer to memory the caller can now use
*/
void *sys_heap_noalign_alloc(struct sys_heap *heap, size_t align, size_t bytes);

/** @brief Free memory into a sys_heap
*
* De-allocates a pointer to memory previously returned from
Expand Down Expand Up @@ -167,16 +179,29 @@ void sys_heap_free(struct sys_heap *heap, void *mem);
*
* @param heap Heap from which to allocate
* @param ptr Original pointer returned from a previous allocation
* @param bytes Number of bytes requested for the new block
* @return Pointer to memory the caller can now use, or NULL
*/
void *sys_heap_realloc(struct sys_heap *heap, void *ptr, size_t bytes);

/** @brief Expand the size of an existing allocation
*
* Behaves in all ways like sys_heap_realloc(), except that the returned
* memory (if available) will have a starting address in memory which
* is a multiple of the specified power-of-two alignment value in
* bytes. In-place expansion will be attempted only if the provided memory
* pointer conforms to the specified alignment value otherwise the data will be
* moved to a new memory block.
*
* @param heap Heap from which to allocate
* @param ptr Original pointer returned from a previous allocation
* @param align Alignment in bytes, must be a power of two
* @param bytes Number of bytes requested for the new block
* @return Pointer to memory the caller can now use, or NULL
*/
void *sys_heap_aligned_realloc(struct sys_heap *heap, void *ptr,
size_t align, size_t bytes);

#define sys_heap_realloc(heap, ptr, bytes) \
sys_heap_aligned_realloc(heap, ptr, 0, bytes)

/** @brief Return allocated memory size
*
* Returns the size, in bytes, of a block returned from a successful
Expand Down
2 changes: 1 addition & 1 deletion include/zephyr/tracing/tracing.h
Original file line number Diff line number Diff line change
Expand Up @@ -1753,7 +1753,7 @@
* @param h Heap object
* @param timeout Timeout period
*/
#define sys_port_trace_k_heap_aligned_alloc_blocking(h, timeout)
#define sys_port_trace_k_heap_alloc_helper_blocking(h, timeout)

/**
* @brief Trace Heap align alloc attempt outcome
Expand Down
5 changes: 1 addition & 4 deletions kernel/include/kernel_internal.h
Original file line number Diff line number Diff line change
Expand Up @@ -105,10 +105,7 @@ void *z_thread_aligned_alloc(size_t align, size_t size);
* @return A pointer to the allocated memory, or NULL if there is insufficient
* RAM in the pool or there is no pool to draw memory from
*/
static inline void *z_thread_malloc(size_t size)
{
return z_thread_aligned_alloc(0, size);
}
void *z_thread_malloc(size_t size);


#ifdef CONFIG_USE_SWITCH
Expand Down
42 changes: 32 additions & 10 deletions kernel/kheap.c
Original file line number Diff line number Diff line change
Expand Up @@ -63,22 +63,23 @@ SYS_INIT_NAMED(statics_init_pre, statics_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_
SYS_INIT_NAMED(statics_init_post, statics_init, POST_KERNEL, 0);
#endif /* CONFIG_DEMAND_PAGING && !CONFIG_LINKER_GENERIC_SECTIONS_PRESENT_AT_BOOT */

void *k_heap_aligned_alloc(struct k_heap *heap, size_t align, size_t bytes,
k_timeout_t timeout)
typedef void * (sys_heap_allocator_t)(struct sys_heap *heap, size_t align, size_t bytes);

static void *z_heap_alloc_helper(struct k_heap *heap, size_t align, size_t bytes,
k_timeout_t timeout,
sys_heap_allocator_t *sys_heap_allocator)
{
k_timepoint_t end = sys_timepoint_calc(timeout);
void *ret = NULL;

k_spinlock_key_t key = k_spin_lock(&heap->lock);

SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, aligned_alloc, heap, timeout);

__ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");

bool blocked_alloc = false;

while (ret == NULL) {
ret = sys_heap_aligned_alloc(&heap->heap, align, bytes);
ret = sys_heap_allocator(&heap->heap, align, bytes);

if (!IS_ENABLED(CONFIG_MULTITHREADING) ||
(ret != NULL) || K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Expand All @@ -88,7 +89,7 @@ void *k_heap_aligned_alloc(struct k_heap *heap, size_t align, size_t bytes,
if (!blocked_alloc) {
blocked_alloc = true;

SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_heap, aligned_alloc, heap, timeout);
SYS_PORT_TRACING_OBJ_FUNC_BLOCKING(k_heap, alloc_helper, heap, timeout);
} else {
/**
* @todo Trace attempt to avoid empty trace segments
Expand All @@ -100,8 +101,6 @@ void *k_heap_aligned_alloc(struct k_heap *heap, size_t align, size_t bytes,
key = k_spin_lock(&heap->lock);
}

SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, aligned_alloc, heap, timeout, ret);

k_spin_unlock(&heap->lock, key);
return ret;
}
Expand All @@ -110,13 +109,36 @@ void *k_heap_alloc(struct k_heap *heap, size_t bytes, k_timeout_t timeout)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, alloc, heap, timeout);

void *ret = k_heap_aligned_alloc(heap, sizeof(void *), bytes, timeout);
void *ret = z_heap_alloc_helper(heap, 0, bytes, timeout,
sys_heap_noalign_alloc);

SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, alloc, heap, timeout, ret);

return ret;
}

void *k_heap_aligned_alloc(struct k_heap *heap, size_t align, size_t bytes,
k_timeout_t timeout)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, aligned_alloc, heap, timeout);

void *ret = z_heap_alloc_helper(heap, align, bytes, timeout,
sys_heap_aligned_alloc);

/*
* modules/debug/percepio/TraceRecorder/kernelports/Zephyr/include/tracing_tracerecorder.h
* contains a concealed non-parameterized direct reference to a local
* variable through the SYS_PORT_TRACING_OBJ_FUNC_EXIT macro below
* that is no longer in scope. Provide a dummy stub for compilation
* to still succeed until that module's layering violation is fixed.
*/
bool blocked_alloc = false; ARG_UNUSED(blocked_alloc);

SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap, aligned_alloc, heap, timeout, ret);

return ret;
}

void *k_heap_calloc(struct k_heap *heap, size_t num, size_t size, k_timeout_t timeout)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap, calloc, heap, timeout);
Expand Down Expand Up @@ -148,7 +170,7 @@ void *k_heap_realloc(struct k_heap *heap, void *ptr, size_t bytes, k_timeout_t t
__ASSERT(!arch_is_in_isr() || K_TIMEOUT_EQ(timeout, K_NO_WAIT), "");

while (ret == NULL) {
ret = sys_heap_aligned_realloc(&heap->heap, ptr, sizeof(void *), bytes);
ret = sys_heap_realloc(&heap->heap, ptr, bytes);

if (!IS_ENABLED(CONFIG_MULTITHREADING) ||
(ret != NULL) || K_TIMEOUT_EQ(timeout, K_NO_WAIT)) {
Expand Down
54 changes: 40 additions & 14 deletions kernel/mempool.c
Original file line number Diff line number Diff line change
Expand Up @@ -9,11 +9,19 @@
#include <zephyr/sys/math_extras.h>
#include <zephyr/sys/util.h>

static void *z_heap_aligned_alloc(struct k_heap *heap, size_t align, size_t size)
typedef void * (sys_heap_allocator_t)(struct sys_heap *heap, size_t align, size_t bytes);

static void *z_alloc_helper(struct k_heap *heap, size_t align, size_t size,
sys_heap_allocator_t sys_heap_allocator)
{
void *mem;
struct k_heap **heap_ref;
size_t __align;
k_spinlock_key_t key;

/* A power of 2 as well as 0 is OK */
__ASSERT((align & (align - 1)) == 0,
"align must be a power of 2");
Comment on lines +23 to +24
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we need a minimum alignment? I note that if align is 0, this assert still passes. I think that lib/heap/heap.c uses a minimum alignment of 1 and k_aligned_alloc() had a minimum aligment of sizeof(void *).

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Minimum alignment from heap.c is 4 bytes on 32-bits archs and 8 bytes on 64-bits archs already.


/*
* Adjust the size to make room for our heap reference.
Expand All @@ -26,7 +34,14 @@ static void *z_heap_aligned_alloc(struct k_heap *heap, size_t align, size_t size
}
__align = align | sizeof(heap_ref);

mem = k_heap_aligned_alloc(heap, __align, size, K_NO_WAIT);
/*
* No point calling k_heap_malloc/k_heap_aligned_alloc with K_NO_WAIT.
* Better bypass them and go directly to sys_heap_*() instead.
*/
key = k_spin_lock(&heap->lock);
mem = sys_heap_allocator(&heap->heap, __align, size);
k_spin_unlock(&heap->lock, key);

if (mem == NULL) {
return NULL;
}
Expand Down Expand Up @@ -64,16 +79,9 @@ K_HEAP_DEFINE(_system_heap, K_HEAP_MEM_POOL_SIZE);

void *k_aligned_alloc(size_t align, size_t size)
{
__ASSERT(align / sizeof(void *) >= 1
&& (align % sizeof(void *)) == 0,
"align must be a multiple of sizeof(void *)");

__ASSERT((align & (align - 1)) == 0,
"align must be a power of 2");

SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_aligned_alloc, _SYSTEM_HEAP);

void *ret = z_heap_aligned_alloc(_SYSTEM_HEAP, align, size);
void *ret = z_alloc_helper(_SYSTEM_HEAP, align, size, sys_heap_aligned_alloc);

SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_aligned_alloc, _SYSTEM_HEAP, ret);

Expand All @@ -84,7 +92,7 @@ void *k_malloc(size_t size)
{
SYS_PORT_TRACING_OBJ_FUNC_ENTER(k_heap_sys, k_malloc, _SYSTEM_HEAP);

void *ret = k_aligned_alloc(sizeof(void *), size);
void *ret = z_alloc_helper(_SYSTEM_HEAP, 0, size, sys_heap_noalign_alloc);

SYS_PORT_TRACING_OBJ_FUNC_EXIT(k_heap_sys, k_malloc, _SYSTEM_HEAP, ret);

Expand Down Expand Up @@ -117,6 +125,7 @@ void *k_calloc(size_t nmemb, size_t size)
void *k_realloc(void *ptr, size_t size)
{
struct k_heap *heap, **heap_ref;
k_spinlock_key_t key;
void *ret;

if (size == 0) {
Expand All @@ -137,7 +146,13 @@ void *k_realloc(void *ptr, size_t size)
return NULL;
}

ret = k_heap_realloc(heap, ptr, size, K_NO_WAIT);
/*
* No point calling k_heap_realloc() with K_NO_WAIT here.
* Better bypass it and go directly to sys_heap_realloc() instead.
*/
key = k_spin_lock(&heap->lock);
ret = sys_heap_realloc(&heap->heap, ptr, size);
k_spin_unlock(&heap->lock, key);

if (ret != NULL) {
heap_ref = ret;
Expand All @@ -157,7 +172,8 @@ void k_thread_system_pool_assign(struct k_thread *thread)
#define _SYSTEM_HEAP NULL
#endif /* K_HEAP_MEM_POOL_SIZE */

void *z_thread_aligned_alloc(size_t align, size_t size)
static void *z_thread_alloc_helper(size_t align, size_t size,
sys_heap_allocator_t sys_heap_allocator)
{
void *ret;
struct k_heap *heap;
Expand All @@ -169,10 +185,20 @@ void *z_thread_aligned_alloc(size_t align, size_t size)
}

if (heap != NULL) {
ret = z_heap_aligned_alloc(heap, align, size);
ret = z_alloc_helper(heap, align, size, sys_heap_allocator);
} else {
ret = NULL;
}

return ret;
}

void *z_thread_aligned_alloc(size_t align, size_t size)
{
return z_thread_alloc_helper(align, size, sys_heap_aligned_alloc);
}

void *z_thread_malloc(size_t size)
{
return z_thread_alloc_helper(0, size, sys_heap_noalign_alloc);
}
Loading
Loading