From c4cb1fc7943f0ee75ffe91d1fab9d9543e44696a Mon Sep 17 00:00:00 2001 From: Adrian Warecki Date: Thu, 13 Jun 2024 15:59:03 +0200 Subject: [PATCH 1/2] kpb: Increase number of memory pools The current design constraints of vmh allocator dictate that a buffer size must be a power of two and each group of buffers must have a size that is a multiple of the page size. The current configuration of memory regions provides 1MB of virtual space for buffers. The combination of these factors currently makes it impossible to allocate bigger buffer for the kbp. It is therefore necessary to increase the number of memory pools of which kbp use to allocade necessary buffer. Signed-off-by: Adrian Warecki --- src/include/sof/audio/kpb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/include/sof/audio/kpb.h b/src/include/sof/audio/kpb.h index dbec1207c467..7cf786796ed8 100644 --- a/src/include/sof/audio/kpb.h +++ b/src/include/sof/audio/kpb.h @@ -46,7 +46,7 @@ struct comp_buffer; #define KPB_MAX_SINK_CNT (1 + KPB_MAX_NO_OF_CLIENTS) #define KPB_NO_OF_HISTORY_BUFFERS 2 /**< no of internal buffers */ #define KPB_ALLOCATION_STEP 0x100 -#define KPB_NO_OF_MEM_POOLS 3 +#define KPB_NO_OF_MEM_POOLS 5 #define KPB_BYTES_TO_FRAMES(bytes, sample_width, channels_number) \ ((bytes) / ((KPB_SAMPLE_CONTAINER_SIZE(sample_width) / 8) * \ (channels_number))) From 6e98c8af54aafb944d5d515b11115f1d81d16acd Mon Sep 17 00:00:00 2001 From: Adrian Warecki Date: Wed, 27 Mar 2024 15:08:47 +0100 Subject: [PATCH 2/2] ace: zephyr: alloc: Use virtual memory heap for buffers Use virtual memory heap to allocate buffers for ace platforms. The new buffer allocation mechanism uses buffers of a predefined size. Each core have a dedicated region of the virtual address space from which buffers are allocated. Signed-off-by: Adrian Warecki --- zephyr/Kconfig | 8 ++ zephyr/include/sof/lib/regions_mm.h | 2 +- zephyr/lib/alloc.c | 112 ++++++++++++++++++++++++++++ 3 files changed, 121 insertions(+), 1 deletion(-) diff --git a/zephyr/Kconfig b/zephyr/Kconfig index 535cb6bc59ea..af6864444fda 100644 --- a/zephyr/Kconfig +++ b/zephyr/Kconfig @@ -82,4 +82,12 @@ config SOF_ZEPHYR_NO_SOF_CLOCK Do not use SOF clk.h interface to set the DSP clock frequency. Requires implementation of platform/lib/clk.h. +config VIRTUAL_HEAP + bool "Use virtual memory heap to allocate a buffers" + default y if ACE + default n + depends on ACE + help + Enabling this option will use the virtual memory heap allocator to allocate buffers. + It is based on a set of buffers whose size is predetermined. endif diff --git a/zephyr/include/sof/lib/regions_mm.h b/zephyr/include/sof/lib/regions_mm.h index 7e9d3a1a40a7..3cc8766f4d78 100644 --- a/zephyr/include/sof/lib/regions_mm.h +++ b/zephyr/include/sof/lib/regions_mm.h @@ -36,7 +36,7 @@ * either be spanned on specifically configured heap or have * individual configs with bigger block sizes. */ -#define MAX_MEMORY_ALLOCATORS_COUNT 8 +#define MAX_MEMORY_ALLOCATORS_COUNT 10 /* vmh_get_default_heap_config() function will try to split the region * down the given count. Only applicable when API client did not diff --git a/zephyr/lib/alloc.c b/zephyr/lib/alloc.c index 4a6ea68cfaad..f525618d7c15 100644 --- a/zephyr/lib/alloc.c +++ b/zephyr/lib/alloc.c @@ -18,6 +18,18 @@ #include #include #include +#if CONFIG_VIRTUAL_HEAP +#include + +struct vmh_heap *virtual_buffers_heap[CONFIG_MP_MAX_NUM_CPUS]; +struct k_spinlock vmh_lock; + +#undef HEAPMEM_SIZE +/* Buffers are allocated from virtual space so we can safely reduce the heap size. + */ +#define HEAPMEM_SIZE 0x40000 +#endif /* CONFIG_VIRTUAL_HEAP */ + /* Zephyr includes */ #include @@ -193,6 +205,89 @@ static void l3_heap_free(struct k_heap *h, void *mem) #endif +#if CONFIG_VIRTUAL_HEAP +static void *virtual_heap_alloc(struct vmh_heap *heap, uint32_t flags, uint32_t caps, size_t bytes, + uint32_t align) +{ + void *mem = vmh_alloc(heap, bytes); + + if (!mem) + return NULL; + + assert(IS_ALIGNED(mem, align)); + + if (flags & SOF_MEM_FLAG_COHERENT) + return sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)mem); + + return mem; +} + +/** + * Checks whether pointer is from virtual memory range. + * @param ptr Pointer to memory being checked. + * @return True if pointer falls into virtual memory region, false otherwise. + */ +static bool is_virtual_heap_pointer(void *ptr) +{ + uintptr_t virtual_heap_start = POINTER_TO_UINT(sys_cache_cached_ptr_get(&heapmem)) + + HEAPMEM_SIZE; + uintptr_t virtual_heap_end = CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE; + + if (!is_cached(ptr)) + ptr = (__sparse_force void *)sys_cache_cached_ptr_get(ptr); + + return ((POINTER_TO_UINT(ptr) >= virtual_heap_start) && + (POINTER_TO_UINT(ptr) < virtual_heap_end)); +} + +static void virtual_heap_free(void *ptr) +{ + struct vmh_heap *const heap = virtual_buffers_heap[cpu_get_id()]; + int ret; + + ptr = (__sparse_force void *)sys_cache_cached_ptr_get(ptr); + + ret = vmh_free(heap, ptr); + if (ret) + tr_err(&zephyr_tr, "Unable to free %p! %d", ptr, ret); +} + +static const struct vmh_heap_config static_hp_buffers = { + { + { 128, 32}, + { 512, 8}, + { 1024, 44}, + { 2048, 8}, + { 4096, 11}, + { 8192, 10}, + { 65536, 3}, + { 131072, 1}, + { 524288, 1} /* buffer for kpb */ + }, +}; + +static int virtual_heap_init(void) +{ + int core; + + k_spinlock_init(&vmh_lock); + + for (core = 0; core < CONFIG_MP_MAX_NUM_CPUS; core++) { + struct vmh_heap *heap = vmh_init_heap(&static_hp_buffers, MEM_REG_ATTR_CORE_HEAP, + core, false); + if (!heap) + tr_err(&zephyr_tr, "Unable to init virtual heap for core %d!", core); + + virtual_buffers_heap[core] = heap; + } + + return 0; +} + +SYS_INIT(virtual_heap_init, POST_KERNEL, 1); + +#endif /* CONFIG_VIRTUAL_HEAP */ + static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes) { k_spinlock_key_t key; @@ -384,6 +479,9 @@ EXPORT_SYMBOL(rzalloc); void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes, uint32_t align) { +#if CONFIG_VIRTUAL_HEAP + struct vmh_heap *virtual_heap; +#endif struct k_heap *heap; /* choose a heap */ @@ -399,6 +497,13 @@ void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes, heap = &sof_heap; } +#if CONFIG_VIRTUAL_HEAP + /* Use virtual heap if it is available */ + virtual_heap = virtual_buffers_heap[cpu_get_id()]; + if (virtual_heap) + return virtual_heap_alloc(virtual_heap, flags, caps, bytes, align); +#endif /* CONFIG_VIRTUAL_HEAP */ + if (flags & SOF_MEM_FLAG_COHERENT) return heap_alloc_aligned(heap, align, bytes); @@ -421,6 +526,13 @@ void rfree(void *ptr) } #endif +#if CONFIG_VIRTUAL_HEAP + if (is_virtual_heap_pointer(ptr)) { + virtual_heap_free(ptr); + return; + } +#endif + heap_free(&sof_heap, ptr); } EXPORT_SYMBOL(rfree);