Skip to content

Commit 80bb55f

Browse files
committed
ace: zephyr: alloc: Use virtual memory heap for buffers
The buffer allocation method for ace platform has been changed. They are allocated using the virtual memory heap. This consists of a set of buffers with a predefined size. Signed-off-by: Adrian Warecki <adrian.warecki@intel.com>
1 parent 9cc9183 commit 80bb55f

File tree

3 files changed

+108
-2
lines changed

3 files changed

+108
-2
lines changed

zephyr/Kconfig

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -77,4 +77,12 @@ config SOF_BOOT_TEST
7777
initialized. After that SOF will continue running and be usable as
7878
usual.
7979

80+
config VIRTUAL_HEAP
81+
bool "Use virtual memory heap to allocate a buffers"
82+
default y if ACE
83+
default n
84+
depends on ACE
85+
help
86+
Enabling this option will use the virtual memory heap allocator to allocate buffers.
87+
It is based on a set of buffers whose size is predetermined.
8088
endif

zephyr/include/sof/lib/regions_mm.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@
3636
* either be spanned on specifically configured heap or have
3737
* individual configs with bigger block sizes.
3838
*/
39-
#define MAX_MEMORY_ALLOCATORS_COUNT 8
39+
#define MAX_MEMORY_ALLOCATORS_COUNT 10
4040

4141
/* vmh_get_default_heap_config() function will try to split the region
4242
* down the given count. Only applicable when API client did not

zephyr/lib/alloc.c

Lines changed: 99 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,13 @@
1818
#include <sof/trace/trace.h>
1919
#include <rtos/symbol.h>
2020
#include <rtos/wait.h>
21+
#if CONFIG_VIRTUAL_HEAP
22+
#include <sof/lib/regions_mm.h>
23+
24+
struct vmh_heap *virtual_buffers_heap;
25+
struct k_spinlock vmh_lock;
26+
#endif /* CONFIG_VIRTUAL_HEAP */
27+
2128

2229
/* Zephyr includes */
2330
#include <zephyr/init.h>
@@ -189,6 +196,85 @@ static void l3_heap_free(struct k_heap *h, void *mem)
189196

190197
#endif
191198

199+
#if CONFIG_VIRTUAL_HEAP
200+
static void *virtual_heap_alloc(struct vmh_heap *heap, uint32_t flags, uint32_t caps, size_t bytes,
201+
uint32_t align)
202+
{
203+
void *mem;
204+
205+
K_SPINLOCK(&vmh_lock) {
206+
heap->core_id = cpu_get_id();
207+
mem = vmh_alloc(heap, bytes);
208+
}
209+
210+
if (!mem)
211+
return NULL;
212+
213+
assert(IS_ALIGNED(mem, align));
214+
215+
if (flags & SOF_MEM_FLAG_COHERENT)
216+
return sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)mem);
217+
218+
return mem;
219+
}
220+
221+
/**
222+
* Checks whether pointer is from virtual memory range.
223+
* @param ptr Pointer to memory being checked.
224+
* @return True if pointer falls into virtual memory region, false otherwise.
225+
*/
226+
static bool is_virtual_heap_pointer(void *ptr)
227+
{
228+
uintptr_t virtual_heap_start = POINTER_TO_UINT(sys_cache_cached_ptr_get(&heapmem)) +
229+
HEAPMEM_SIZE;
230+
uintptr_t virtual_heap_end = CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_SIZE;
231+
232+
if (!is_cached(ptr))
233+
ptr = sys_cache_cached_ptr_get((__sparse_force void *)ptr);
234+
235+
return ((POINTER_TO_UINT(ptr) >= virtual_heap_start) &&
236+
(POINTER_TO_UINT(ptr) < virtual_heap_end));
237+
}
238+
239+
static void virtual_heap_free(void *ptr)
240+
{
241+
ptr = sys_cache_cached_ptr_get((__sparse_force void *)ptr);
242+
243+
K_SPINLOCK(&vmh_lock) {
244+
vmh_free(virtual_buffers_heap, ptr);
245+
}
246+
}
247+
248+
static const struct vmh_heap_config static_hp_buffers = {
249+
{
250+
{ 128, 32},
251+
{ 512, 8},
252+
{ 1024, 16},
253+
{ 2048, 8},
254+
{ 4096, 11},
255+
{ 8192, 13},
256+
{ 65536, 3},
257+
{ 131072, 1},
258+
{ 524288, 1} /* buffer for kpb */
259+
},
260+
};
261+
262+
static int virtual_heap_init(void)
263+
{
264+
k_spinlock_init(&vmh_lock);
265+
266+
virtual_buffers_heap = vmh_init_heap(&static_hp_buffers, MEM_REG_ATTR_SHARED_HEAP, 0,
267+
false);
268+
if (!virtual_buffers_heap)
269+
tr_err(&zephyr_tr, "Unable to init virtual buffers heap!");
270+
271+
return 0;
272+
}
273+
274+
SYS_INIT(virtual_heap_init, POST_KERNEL, 1);
275+
276+
#endif /* CONFIG_VIRTUAL_HEAP */
277+
192278
static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes)
193279
{
194280
k_spinlock_key_t key;
@@ -395,6 +481,12 @@ void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes,
395481
heap = &sof_heap;
396482
}
397483

484+
#if CONFIG_VIRTUAL_HEAP
485+
/* Use virtual heap if it is available */
486+
if (virtual_buffers_heap)
487+
return virtual_heap_alloc(virtual_buffers_heap, flags, caps, bytes, align);
488+
#endif /* CONFIG_VIRTUAL_HEAP */
489+
398490
if (flags & SOF_MEM_FLAG_COHERENT)
399491
return heap_alloc_aligned(heap, align, bytes);
400492

@@ -417,6 +509,13 @@ void rfree(void *ptr)
417509
}
418510
#endif
419511

512+
#if CONFIG_VIRTUAL_HEAP
513+
if (is_virtual_heap_pointer(ptr)) {
514+
virtual_heap_free(ptr);
515+
return;
516+
}
517+
#endif
518+
420519
heap_free(&sof_heap, ptr);
421520
}
422521
EXPORT_SYMBOL(rfree);
@@ -428,7 +527,6 @@ static int heap_init(void)
428527
#if CONFIG_L3_HEAP
429528
sys_heap_init(&l3_heap.heap, UINT_TO_POINTER(get_l3_heap_start()), get_l3_heap_size());
430529
#endif
431-
432530
return 0;
433531
}
434532

0 commit comments

Comments
 (0)