Skip to content

Commit

Permalink
ace: zephyr: alloc: Use virtual memory heap for buffers
Browse files Browse the repository at this point in the history
Use virtual memory heap to allocate buffers for ace platforms. The new
buffer allocation mechanism uses buffers of a predefined size. Each core
have a dedicated region of the virtual address space from which buffers are
allocated.

Signed-off-by: Adrian Warecki <[email protected]>
  • Loading branch information
softwarecki authored and kv2019i committed Dec 3, 2024
1 parent dbf7b4c commit fceac69
Show file tree
Hide file tree
Showing 3 changed files with 121 additions and 1 deletion.
8 changes: 8 additions & 0 deletions zephyr/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -82,4 +82,12 @@ config SOF_ZEPHYR_NO_SOF_CLOCK
Do not use SOF clk.h interface to set the DSP clock frequency.
Requires implementation of platform/lib/clk.h.

config VIRTUAL_HEAP
bool "Use virtual memory heap to allocate a buffers"
default y if ACE
default n
depends on ACE
help
Enabling this option will use the virtual memory heap allocator to allocate buffers.
It is based on a set of buffers whose size is predetermined.
endif
2 changes: 1 addition & 1 deletion zephyr/include/sof/lib/regions_mm.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
* either be spanned on specifically configured heap or have
* individual configs with bigger block sizes.
*/
#define MAX_MEMORY_ALLOCATORS_COUNT 8
#define MAX_MEMORY_ALLOCATORS_COUNT 10

/* vmh_get_default_heap_config() function will try to split the region
* down the given count. Only applicable when API client did not
Expand Down
112 changes: 112 additions & 0 deletions zephyr/lib/alloc.c
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,18 @@
#include <sof/trace/trace.h>
#include <rtos/symbol.h>
#include <rtos/wait.h>
#if CONFIG_VIRTUAL_HEAP
#include <sof/lib/regions_mm.h>

struct vmh_heap *virtual_buffers_heap[CONFIG_MP_MAX_NUM_CPUS];
struct k_spinlock vmh_lock;

#undef HEAPMEM_SIZE
/* Buffers are allocated from virtual space so we can safely reduce the heap size.
*/
#define HEAPMEM_SIZE 0x40000
#endif /* CONFIG_VIRTUAL_HEAP */


/* Zephyr includes */
#include <zephyr/init.h>
Expand Down Expand Up @@ -193,6 +205,89 @@ static void l3_heap_free(struct k_heap *h, void *mem)

#endif

#if CONFIG_VIRTUAL_HEAP
static void *virtual_heap_alloc(struct vmh_heap *heap, uint32_t flags, uint32_t caps, size_t bytes,
uint32_t align)
{
void *mem = vmh_alloc(heap, bytes);

if (!mem)
return NULL;

assert(IS_ALIGNED(mem, align));

if (flags & SOF_MEM_FLAG_COHERENT)
return sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)mem);

return mem;
}

/**
* Checks whether pointer is from virtual memory range.
* @param ptr Pointer to memory being checked.
* @return True if pointer falls into virtual memory region, false otherwise.
*/
static bool is_virtual_heap_pointer(void *ptr)
{
uintptr_t virtual_heap_start = POINTER_TO_UINT(sys_cache_cached_ptr_get(&heapmem)) +
HEAPMEM_SIZE;
uintptr_t virtual_heap_end = CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE;

if (!is_cached(ptr))
ptr = (__sparse_force void *)sys_cache_cached_ptr_get(ptr);

return ((POINTER_TO_UINT(ptr) >= virtual_heap_start) &&
(POINTER_TO_UINT(ptr) < virtual_heap_end));
}

static void virtual_heap_free(void *ptr)
{
struct vmh_heap *const heap = virtual_buffers_heap[cpu_get_id()];
int ret;

ptr = (__sparse_force void *)sys_cache_cached_ptr_get(ptr);

ret = vmh_free(heap, ptr);
if (ret)
tr_err(&zephyr_tr, "Unable to free %p! %d", ptr, ret);
}

static const struct vmh_heap_config static_hp_buffers = {
{
{ 128, 32},
{ 512, 8},
{ 1024, 44},
{ 2048, 8},
{ 4096, 11},
{ 8192, 10},
{ 65536, 3},
{ 131072, 1},
{ 524288, 1} /* buffer for kpb */
},
};

static int virtual_heap_init(void)
{
int core;

k_spinlock_init(&vmh_lock);

for (core = 0; core < CONFIG_MP_MAX_NUM_CPUS; core++) {
struct vmh_heap *heap = vmh_init_heap(&static_hp_buffers, MEM_REG_ATTR_CORE_HEAP,
core, false);
if (!heap)
tr_err(&zephyr_tr, "Unable to init virtual heap for core %d!", core);

virtual_buffers_heap[core] = heap;
}

return 0;
}

SYS_INIT(virtual_heap_init, POST_KERNEL, 1);

#endif /* CONFIG_VIRTUAL_HEAP */

static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes)
{
k_spinlock_key_t key;
Expand Down Expand Up @@ -384,6 +479,9 @@ EXPORT_SYMBOL(rzalloc);
void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes,
uint32_t align)
{
#if CONFIG_VIRTUAL_HEAP
struct vmh_heap *virtual_heap;
#endif
struct k_heap *heap;

/* choose a heap */
Expand All @@ -399,6 +497,13 @@ void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes,
heap = &sof_heap;
}

#if CONFIG_VIRTUAL_HEAP
/* Use virtual heap if it is available */
virtual_heap = virtual_buffers_heap[cpu_get_id()];
if (virtual_heap)
return virtual_heap_alloc(virtual_heap, flags, caps, bytes, align);
#endif /* CONFIG_VIRTUAL_HEAP */

if (flags & SOF_MEM_FLAG_COHERENT)
return heap_alloc_aligned(heap, align, bytes);

Expand All @@ -421,6 +526,13 @@ void rfree(void *ptr)
}
#endif

#if CONFIG_VIRTUAL_HEAP
if (is_virtual_heap_pointer(ptr)) {
virtual_heap_free(ptr);
return;
}
#endif

heap_free(&sof_heap, ptr);
}
EXPORT_SYMBOL(rfree);
Expand Down

0 comments on commit fceac69

Please sign in to comment.