@@ -115,8 +115,7 @@ static inline uintptr_t get_l3_heap_start(void)
115115 * - main_fw_load_offset
116116 * - main fw size in manifest
117117 */
118- return (uintptr_t )z_soc_uncached_ptr ((__sparse_force void __sparse_cache * )
119- ROUND_UP (IMR_L3_HEAP_BASE , L3_MEM_PAGE_SIZE ));
118+ return (uintptr_t )(ROUND_UP (IMR_L3_HEAP_BASE , L3_MEM_PAGE_SIZE ));
120119}
121120
122121/**
@@ -144,14 +143,50 @@ static bool is_l3_heap_pointer(void *ptr)
144143 uintptr_t l3_heap_start = get_l3_heap_start ();
145144 uintptr_t l3_heap_end = l3_heap_start + get_l3_heap_size ();
146145
147- if (is_cached (ptr ))
148- ptr = z_soc_uncached_ptr ((__sparse_force void __sparse_cache * )ptr );
149-
150146 if ((POINTER_TO_UINT (ptr ) >= l3_heap_start ) && (POINTER_TO_UINT (ptr ) < l3_heap_end ))
151147 return true;
152148
153149 return false;
154150}
151+
152+ static void * l3_heap_alloc_aligned (struct k_heap * h , size_t min_align , size_t bytes )
153+ {
154+ k_spinlock_key_t key ;
155+ void * ret ;
156+ #if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4
157+ struct sys_memory_stats stats ;
158+ #endif
159+ if (!cpu_is_primary (arch_proc_id ())) {
160+ tr_err (& zephyr_tr , "L3_HEAP available only for primary core!" );
161+ return NULL ;
162+ }
163+
164+ key = k_spin_lock (& h -> lock );
165+ ret = sys_heap_aligned_alloc (& h -> heap , min_align , bytes );
166+ k_spin_unlock (& h -> lock , key );
167+
168+ #if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4
169+ sys_heap_runtime_stats_get (& h -> heap , & stats );
170+ tr_info (& zephyr_tr , "heap allocated: %u free: %u max allocated: %u" ,
171+ stats .allocated_bytes , stats .free_bytes , stats .max_allocated_bytes );
172+ #endif
173+
174+ return ret ;
175+ }
176+
177+ static void l3_heap_free (struct k_heap * h , void * mem )
178+ {
179+ if (!cpu_is_primary (arch_proc_id ())) {
180+ tr_err (& zephyr_tr , "L3_HEAP available only for primary core!" );
181+ return ;
182+ }
183+
184+ k_spinlock_key_t key = k_spin_lock (& h -> lock );
185+
186+ sys_heap_free (& h -> heap , mem );
187+ k_spin_unlock (& h -> lock , key );
188+ }
189+
155190#endif
156191
157192static void * heap_alloc_aligned (struct k_heap * h , size_t min_align , size_t bytes )
@@ -250,6 +285,17 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes)
250285 if (caps & SOF_MEM_CAPS_L3 ) {
251286#if CONFIG_L3_HEAP
252287 heap = & l3_heap ;
288+ /* Uncached L3_HEAP should be not used */
289+ if (!zone_is_cached (zone )) {
290+ tr_err (& zephyr_tr , "L3_HEAP available for cached zones only!" );
291+ return NULL ;
292+ }
293+ ptr = (__sparse_force void * )l3_heap_alloc_aligned (heap , 0 , bytes );
294+
295+ if (!ptr && zone == SOF_MEM_ZONE_SYS )
296+ k_panic ();
297+
298+ return ptr ;
253299#else
254300 k_panic ();
255301#endif
@@ -334,10 +380,24 @@ EXPORT_SYMBOL(rzalloc);
334380void * rballoc_align (uint32_t flags , uint32_t caps , size_t bytes ,
335381 uint32_t align )
336382{
383+ struct k_heap * heap ;
384+
385+ /* choose a heap */
386+ if (caps & SOF_MEM_CAPS_L3 ) {
387+ #if CONFIG_L3_HEAP
388+ heap = & l3_heap ;
389+ return (__sparse_force void * )l3_heap_alloc_aligned (heap , align , bytes );
390+ #else
391+ k_panic ();
392+ #endif
393+ } else {
394+ heap = & sof_heap ;
395+ }
396+
337397 if (flags & SOF_MEM_FLAG_COHERENT )
338- return heap_alloc_aligned (& sof_heap , align , bytes );
398+ return heap_alloc_aligned (heap , align , bytes );
339399
340- return (__sparse_force void * )heap_alloc_aligned_cached (& sof_heap , align , bytes );
400+ return (__sparse_force void * )heap_alloc_aligned_cached (heap , align , bytes );
341401}
342402EXPORT_SYMBOL (rballoc_align );
343403
@@ -351,7 +411,7 @@ void rfree(void *ptr)
351411
352412#if CONFIG_L3_HEAP
353413 if (is_l3_heap_pointer (ptr )) {
354- heap_free (& l3_heap , ptr );
414+ l3_heap_free (& l3_heap , ptr );
355415 return ;
356416 }
357417#endif
0 commit comments