From 2b31d225c7d7f5d9ec90c40df3d9dcd04226f4e6 Mon Sep 17 00:00:00 2001 From: Andy Ross Date: Wed, 6 Oct 2021 07:10:18 -0700 Subject: [PATCH] zephyr: Correct heap cache management The heap code was invalidating blocks on allocation, but that's the wrong side of the pipe. By definition, new heap memory will/should/must be written before being used (because the memory is undefined), so any cached contents are irrelevant as they'll be overwritten. But when the user is finished with the block and frees it, there may still be live dirty cache lines in the region on the current CPU. Those must be invalidated, otherwise they will be evicted from the cache at some point in the future, on top of the memory region now being used for different purposes on another CPU. Remove the invalidate on allocation. Add it back in free. Leverage a new Zephyr sys_heap_usable_size() routine to get the size so we don't have to store it in an extra header. Signed-off-by: Andy Ross --- zephyr/wrapper.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/zephyr/wrapper.c b/zephyr/wrapper.c index 1bbe36fd025a..d0c42c42384d 100644 --- a/zephyr/wrapper.c +++ b/zephyr/wrapper.c @@ -153,12 +153,6 @@ static void *heap_alloc_aligned_cached(struct k_heap *h, size_t min_align, size_ ptr = heap_alloc_aligned(h, align, aligned_size); if (ptr) { ptr = uncache_to_cache(ptr); - - /* - * Heap can be used by different cores, so cache - * needs to be invalidated before next user - */ - z_xtensa_cache_inv(ptr, aligned_size); } return ptr; @@ -171,6 +165,11 @@ static void heap_free(struct k_heap *h, void *mem) { k_spinlock_key_t key = k_spin_lock(&h->lock); +#ifdef ENABLE_CACHED_HEAP + z_xtensa_cache_flush_inv(z_soc_cached_ptr(mem), + sys_heap_usable_size(h, mem)); +#endif + sys_heap_free(&h->heap, mem); k_spin_unlock(&h->lock, key);