diff --git a/zephyr/Kconfig b/zephyr/Kconfig index 535cb6bc59ea..af6864444fda 100644 --- a/zephyr/Kconfig +++ b/zephyr/Kconfig @@ -82,4 +82,12 @@ config SOF_ZEPHYR_NO_SOF_CLOCK Do not use SOF clk.h interface to set the DSP clock frequency. Requires implementation of platform/lib/clk.h. +config VIRTUAL_HEAP + bool "Use virtual memory heap to allocate a buffers" + default y if ACE + default n + depends on ACE + help + Enabling this option will use the virtual memory heap allocator to allocate buffers. + It is based on a set of buffers whose size is predetermined. endif diff --git a/zephyr/include/sof/lib/regions_mm.h b/zephyr/include/sof/lib/regions_mm.h index 7e9d3a1a40a7..3cc8766f4d78 100644 --- a/zephyr/include/sof/lib/regions_mm.h +++ b/zephyr/include/sof/lib/regions_mm.h @@ -36,7 +36,7 @@ * either be spanned on specifically configured heap or have * individual configs with bigger block sizes. */ -#define MAX_MEMORY_ALLOCATORS_COUNT 8 +#define MAX_MEMORY_ALLOCATORS_COUNT 10 /* vmh_get_default_heap_config() function will try to split the region * down the given count. Only applicable when API client did not diff --git a/zephyr/lib/alloc.c b/zephyr/lib/alloc.c index 4a6ea68cfaad..91881d94c51f 100644 --- a/zephyr/lib/alloc.c +++ b/zephyr/lib/alloc.c @@ -18,6 +18,18 @@ #include #include #include +#if CONFIG_VIRTUAL_HEAP +#include + +struct vmh_heap *virtual_buffers_heap[CONFIG_MP_MAX_NUM_CPUS]; +struct k_spinlock vmh_lock; + +#undef HEAPMEM_SIZE +/* Buffers are allocated from virtual space so we can safely reduce the heap size. + */ +#define HEAPMEM_SIZE 0x40000 +#endif /* CONFIG_VIRTUAL_HEAP */ + /* Zephyr includes */ #include @@ -193,6 +205,98 @@ static void l3_heap_free(struct k_heap *h, void *mem) #endif +#if CONFIG_VIRTUAL_HEAP +static void *virtual_heap_alloc(struct vmh_heap *heap, uint32_t flags, uint32_t caps, size_t bytes, + uint32_t align) +{ + void *mem; + + //K_SPINLOCK(&vmh_lock) { + // heap->core_id = cpu_get_id(); + mem = vmh_alloc(heap, bytes); + //} + + if (!mem) + return NULL; + + assert(IS_ALIGNED(mem, align)); + + if (flags & SOF_MEM_FLAG_COHERENT) + return sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)mem); + + return mem; +} + +/** + * Checks whether pointer is from virtual memory range. + * @param ptr Pointer to memory being checked. + * @return True if pointer falls into virtual memory region, false otherwise. + */ +static bool is_virtual_heap_pointer(void *ptr) +{ + uintptr_t virtual_heap_start = POINTER_TO_UINT(sys_cache_cached_ptr_get(&heapmem)) + + HEAPMEM_SIZE; + uintptr_t virtual_heap_end = CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE; + + if (!is_cached(ptr)) + ptr = (__sparse_force void *)sys_cache_cached_ptr_get(ptr); + + return ((POINTER_TO_UINT(ptr) >= virtual_heap_start) && + (POINTER_TO_UINT(ptr) < virtual_heap_end)); +} + +static void virtual_heap_free(void *ptr) +{ + struct vmh_heap *const heap = virtual_buffers_heap[cpu_get_id()]; + int ret; + + ptr = (__sparse_force void *)sys_cache_cached_ptr_get(ptr); + + //K_SPINLOCK(&vmh_lock) { + //virtual_buffers_heap->core_id = cpu_get_id(); + ret = vmh_free(heap, ptr); + //} + + if (ret) + tr_err(&zephyr_tr, "Unable to free %p! %d", ptr, ret); +} + +static const struct vmh_heap_config static_hp_buffers = { + { + { 128, 32}, + { 512, 8}, + { 1024, 44}, + { 2048, 8}, + { 4096, 11}, + { 8192, 10}, + { 65536, 3}, + { 131072, 1}, + { 524288, 1} /* buffer for kpb */ + }, +}; + +static int virtual_heap_init(void) +{ + int core; + + k_spinlock_init(&vmh_lock); + + for (core = 0; core < CONFIG_MP_MAX_NUM_CPUS; core++) { + struct vmh_heap *heap = vmh_init_heap(&static_hp_buffers, MEM_REG_ATTR_CORE_HEAP, + core, false); + if (!heap) + tr_err(&zephyr_tr, "Unable to init virtual heap for core %d!", core); + + virtual_buffers_heap[core] = heap; + } + + return 0; +} + +SYS_INIT(virtual_heap_init, POST_KERNEL, 1); + +#endif /* CONFIG_VIRTUAL_HEAP */ + static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes) { k_spinlock_key_t key; @@ -384,6 +488,9 @@ EXPORT_SYMBOL(rzalloc); void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes, uint32_t align) { +#if CONFIG_VIRTUAL_HEAP + struct vmh_heap *virtual_heap; +#endif struct k_heap *heap; /* choose a heap */ @@ -399,6 +506,14 @@ void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes, heap = &sof_heap; } +#if CONFIG_VIRTUAL_HEAP + /* Use virtual heap if it is available */ + virtual_heap = virtual_buffers_heap[cpu_get_id()]; + if (virtual_heap) { + return virtual_heap_alloc(virtual_heap, flags, caps, bytes, align); + } +#endif /* CONFIG_VIRTUAL_HEAP */ + if (flags & SOF_MEM_FLAG_COHERENT) return heap_alloc_aligned(heap, align, bytes); @@ -421,6 +536,13 @@ void rfree(void *ptr) } #endif +#if CONFIG_VIRTUAL_HEAP + if (is_virtual_heap_pointer(ptr)) { + virtual_heap_free(ptr); + return; + } +#endif + heap_free(&sof_heap, ptr); } EXPORT_SYMBOL(rfree); @@ -432,7 +554,6 @@ static int heap_init(void) #if CONFIG_L3_HEAP sys_heap_init(&l3_heap.heap, UINT_TO_POINTER(get_l3_heap_start()), get_l3_heap_size()); #endif - return 0; }