-
Notifications
You must be signed in to change notification settings - Fork 349
zephyr: reimplement cached heap zone on single Zephyr sys_heap #4857
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
a8963fc
a51406c
cc32696
e01d8c9
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,3 +1,12 @@ | ||
| if SOF | ||
| rsource "../Kconfig.sof" | ||
|
|
||
| config SOF_ZEPHYR_HEAP_CACHED | ||
| bool "Cached Zephyr heap for SOF memory non-shared zones" | ||
| default y if CAVS | ||
| default n | ||
| help | ||
| Enable cached heap by mapping cached SOF memory zones to different | ||
| Zephyr sys_heap objects and enable caching for non-shared zones. | ||
|
|
||
| endif | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -56,14 +56,9 @@ DECLARE_TR_CTX(zephyr_tr, SOF_UUID(zephyr_uuid), LOG_LEVEL_INFO); | |
|
|
||
| /* The Zephyr heap */ | ||
|
|
||
| /* use cached heap for non-shared allocations */ | ||
| /*#define ENABLE_CACHED_HEAP 1*/ | ||
|
|
||
| #ifdef CONFIG_IMX | ||
| #define HEAPMEM_SIZE (HEAP_SYSTEM_SIZE + HEAP_RUNTIME_SIZE + HEAP_BUFFER_SIZE) | ||
|
|
||
| #undef ENABLE_CACHED_HEAP | ||
|
|
||
| /* | ||
| * Include heapmem variable in .heap_mem section, otherwise the HEAPMEM_SIZE is | ||
| * duplicated in two sections and the sdram0 region overflows. | ||
|
|
@@ -81,34 +76,22 @@ __section(".heap_mem") static uint8_t __aligned(64) heapmem[HEAPMEM_SIZE]; | |
| #if (CONFIG_HP_MEMORY_BANKS < 16) | ||
| /* e.g. APL */ | ||
| #if defined __XCC__ | ||
| #define HEAP_SIZE 0x28000 | ||
| #define HEAPMEM_SIZE 0x28000 | ||
| #else | ||
| #define HEAP_SIZE 0x30000 | ||
| #define HEAPMEM_SIZE 0x30000 | ||
|
||
| #endif | ||
| #elif (CONFIG_HP_MEMORY_BANKS < 30) | ||
| /* e.g. JSL */ | ||
| #define HEAP_SIZE 0x80000 | ||
| #define HEAPMEM_SIZE 0x80000 | ||
| #elif (CONFIG_HP_MEMORY_BANKS < 45) | ||
| /* e.g. TGL-H */ | ||
| #define HEAP_SIZE 0x100000 | ||
| #define HEAPMEM_SIZE 0x100000 | ||
| #else | ||
| /* e.g. CNL/ICL/TGL */ | ||
| #define HEAP_SIZE 0x200000 | ||
| #endif | ||
|
|
||
| #ifdef ENABLE_CACHED_HEAP | ||
| /* hard code the cached portion at the moment */ | ||
| #define HEAP_SYSTEM_CACHED_SIZE (HEAP_SIZE / 2) | ||
| #else | ||
| #define HEAP_SYSTEM_CACHED_SIZE 0 | ||
| #define HEAPMEM_SIZE 0x200000 | ||
| #endif | ||
| #define HEAPMEM_SIZE (HEAP_SIZE - HEAP_SYSTEM_CACHED_SIZE) | ||
|
|
||
| static uint8_t __aligned(PLATFORM_DCACHE_ALIGN)heapmem[HEAPMEM_SIZE]; | ||
| #ifdef ENABLE_CACHED_HEAP | ||
| static uint8_t __aligned(PLATFORM_DCACHE_ALIGN)heapmem_cached[HEAP_SYSTEM_CACHED_SIZE]; | ||
| static struct k_heap sof_heap_cached; | ||
| #endif | ||
|
|
||
| #endif | ||
|
|
||
|
|
@@ -119,61 +102,65 @@ static int statics_init(const struct device *unused) | |
| ARG_UNUSED(unused); | ||
|
|
||
| sys_heap_init(&sof_heap.heap, heapmem, HEAPMEM_SIZE); | ||
| #ifdef ENABLE_CACHED_HEAP | ||
| sys_heap_init(&sof_heap_cached.heap, heapmem_cached, HEAP_SYSTEM_CACHED_SIZE); | ||
| #endif | ||
|
|
||
| return 0; | ||
| } | ||
|
|
||
| SYS_INIT(statics_init, PRE_KERNEL_1, CONFIG_KERNEL_INIT_PRIORITY_OBJECTS); | ||
|
|
||
| static void *heap_alloc_aligned(struct k_heap *h, size_t align, size_t bytes) | ||
| static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes) | ||
| { | ||
| void *ret = NULL; | ||
|
|
||
| k_spinlock_key_t key = k_spin_lock(&h->lock); | ||
|
|
||
| ret = sys_heap_aligned_alloc(&h->heap, align, bytes); | ||
| k_spinlock_key_t key; | ||
| void *ret; | ||
|
|
||
| key = k_spin_lock(&h->lock); | ||
| ret = sys_heap_aligned_alloc(&h->heap, min_align, bytes); | ||
| k_spin_unlock(&h->lock, key); | ||
|
|
||
| return ret; | ||
| } | ||
|
|
||
| static void *heap_alloc_aligned_cached(struct k_heap *h, size_t min_align, size_t bytes) | ||
| { | ||
| #ifdef ENABLE_CACHED_HEAP | ||
| unsigned int align = MAX(PLATFORM_DCACHE_ALIGN, min_align); | ||
| unsigned int aligned_size = ALIGN_UP(bytes, align); | ||
| void *ptr; | ||
|
|
||
| /* | ||
| * Zephyr sys_heap stores metadata at start of each | ||
| * heap allocation. To ensure no allocated cached buffer | ||
| * overlaps the same cacheline with the metadata chunk, | ||
| * align both allocation start and size of allocation | ||
|
||
| * to cacheline. | ||
| * to cacheline. As cached and non-cached allocations are | ||
| * mixed, same rules need to be followed for both type of | ||
| * allocations. | ||
| */ | ||
| ptr = heap_alloc_aligned(h, align, aligned_size); | ||
| if (ptr) { | ||
| ptr = uncache_to_cache(ptr); | ||
|
|
||
| /* | ||
| * Heap can be used by different cores, so cache | ||
| * needs to be invalidated before next user | ||
| */ | ||
| z_xtensa_cache_inv(ptr, aligned_size); | ||
| } | ||
| #ifdef CONFIG_SOF_ZEPHYR_HEAP_CACHED | ||
| min_align = MAX(PLATFORM_DCACHE_ALIGN, min_align); | ||
| bytes = ALIGN_UP(bytes, min_align); | ||
| #endif | ||
|
|
||
| return ptr; | ||
| #else | ||
| return heap_alloc_aligned(&sof_heap, min_align, bytes); | ||
| ptr = heap_alloc_aligned(h, min_align, bytes); | ||
|
|
||
| #ifdef CONFIG_SOF_ZEPHYR_HEAP_CACHED | ||
| if (ptr) | ||
| ptr = z_soc_cached_ptr(ptr); | ||
| #endif | ||
|
|
||
| return ptr; | ||
| } | ||
|
|
||
| static void heap_free(struct k_heap *h, void *mem) | ||
| { | ||
| k_spinlock_key_t key = k_spin_lock(&h->lock); | ||
| #ifdef CONFIG_SOF_ZEPHYR_HEAP_CACHED | ||
| void *mem_uncached; | ||
|
|
||
| if (is_cached(mem)) { | ||
| mem_uncached = z_soc_uncached_ptr(mem); | ||
| z_xtensa_cache_flush_inv(mem, sys_heap_usable_size(&h->heap, mem_uncached)); | ||
|
|
||
| mem = mem_uncached; | ||
| } | ||
| #endif | ||
|
|
||
| sys_heap_free(&h->heap, mem); | ||
|
|
||
|
|
@@ -182,7 +169,7 @@ static void heap_free(struct k_heap *h, void *mem) | |
|
|
||
| static inline bool zone_is_cached(enum mem_zone zone) | ||
| { | ||
| #ifndef ENABLE_CACHED_HEAP | ||
| #ifndef CONFIG_SOF_ZEPHYR_HEAP_CACHED | ||
| return false; | ||
| #endif | ||
|
|
||
|
|
@@ -197,11 +184,11 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes) | |
| if (zone_is_cached(zone)) | ||
| return heap_alloc_aligned_cached(&sof_heap, 0, bytes); | ||
|
|
||
| #ifdef ENABLE_CACHED_HEAP | ||
| return heap_alloc_aligned(&sof_heap_shared, 8, bytes); | ||
| #else | ||
| return heap_alloc_aligned(&sof_heap, 8, bytes); | ||
| #endif | ||
| /* | ||
| * XTOS alloc implementation has used dcache alignment, | ||
| * so SOF application code is expecting this behaviour. | ||
| */ | ||
| return heap_alloc_aligned(&sof_heap, PLATFORM_DCACHE_ALIGN, bytes); | ||
| } | ||
|
|
||
| /* Use SOF_MEM_ZONE_BUFFER at the moment */ | ||
|
|
@@ -275,16 +262,6 @@ void rfree(void *ptr) | |
| if (!ptr) | ||
| return; | ||
|
|
||
| #ifdef ENABLE_CACHED_HEAP | ||
| /* select heap based on address range */ | ||
| if (is_uncached(ptr)) { | ||
| heap_free(&sof_heap_shared, ptr); | ||
| return; | ||
| } | ||
|
|
||
| ptr = cache_to_uncache(ptr); | ||
| #endif | ||
|
|
||
| heap_free(&sof_heap, ptr); | ||
| } | ||
|
|
||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.