From 15d0fdf80497c16424423e7c8b4b7ba5923a63ac Mon Sep 17 00:00:00 2001 From: Adrian Bonislawski Date: Mon, 18 Aug 2025 09:35:54 +0200 Subject: [PATCH 1/2] west.yml: update zephyr to af974c307477 af974c307477 soc: intel_adsp: ace: add IMR info registers 35a1e62035fd soc: intel_adsp: ace40: extend hwreg1 MMU mapping Signed-off-by: Adrian Bonislawski --- west.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/west.yml b/west.yml index e3978203db80..be312634d0bc 100644 --- a/west.yml +++ b/west.yml @@ -43,7 +43,7 @@ manifest: - name: zephyr repo-path: zephyr - revision: f3b9d1871104b0d69abf6182ef7d262652b13729 + revision: af974c307477f4e0e093abbfca768419f14a865f remote: zephyrproject # Import some projects listed in zephyr/west.yml@revision From 76252e7d407ae60a2e5caf8a28e929edc8c8865d Mon Sep 17 00:00:00 2001 From: Adrian Bonislawski Date: Mon, 18 Aug 2025 09:23:09 +0200 Subject: [PATCH 2/2] zephyr: alloc: ace: calculate L3 heap size based on actual IMR size Updates the L3 heap management to dynamically calculate heap size based on the actual IMR size reported by hardware registers instead of using hardcoded values. Only initializes the L3 heap when the IMR is actually available and being used, as determined by the ace_imr_used() function, improving robustness by preventing the initialization of unavailable memory regions. Adds proper memory mapping when MMU is enabled, which maps the physical L3 heap memory to a virtual address with appropriate permissions (read/write with write-back caching). MMU mapping is required because it is no longer a fixed region with fixed mapping in Zephyr. This change makes the L3 heap allocation more flexible and adaptable to different hardware configurations. Signed-off-by: Adrian Bonislawski --- zephyr/lib/alloc.c | 24 +++++++++++++++++++----- 1 file changed, 19 insertions(+), 5 deletions(-) diff --git a/zephyr/lib/alloc.c b/zephyr/lib/alloc.c index 946a07b9124a..c93ffcd80dab 100644 --- a/zephyr/lib/alloc.c +++ b/zephyr/lib/alloc.c @@ -21,6 +21,10 @@ #define SHARED_BUFFER_HEAP_MEM_SIZE 0 +#if CONFIG_L3_HEAP && CONFIG_MMU +#include +#endif + #if CONFIG_VIRTUAL_HEAP #include #include @@ -217,7 +221,9 @@ static inline size_t get_l3_heap_size(void) * - IMR base address * - actual IMR heap start */ - return ROUND_DOWN(IMR_L3_HEAP_SIZE, L3_MEM_PAGE_SIZE); + size_t offset = IMR_L3_HEAP_BASE - L3_MEM_BASE_ADDR; + + return ROUND_DOWN(ace_imr_get_mem_size() - offset, L3_MEM_PAGE_SIZE); } void l3_heap_save(void) @@ -651,11 +657,19 @@ static int heap_init(void) #endif #if CONFIG_L3_HEAP - if (l3_heap_copy.heap.heap) + if (l3_heap_copy.heap.heap) { l3_heap = l3_heap_copy; - else - sys_heap_init(&l3_heap.heap, UINT_TO_POINTER(get_l3_heap_start()), - get_l3_heap_size()); + } else if (ace_imr_used()) { + void *l3_heap_start = UINT_TO_POINTER(get_l3_heap_start()); + size_t l3_heap_size = get_l3_heap_size(); +#if CONFIG_MMU + void *cached_ptr = sys_cache_cached_ptr_get(l3_heap_start); + uintptr_t va = POINTER_TO_UINT(cached_ptr); + + arch_mem_map(l3_heap_start, va, l3_heap_size, K_MEM_PERM_RW | K_MEM_CACHE_WB); +#endif + sys_heap_init(&l3_heap.heap, l3_heap_start, l3_heap_size); + } #endif return 0;