From 132ef2f3e4602bb5e98147364c15ae635019fd98 Mon Sep 17 00:00:00 2001 From: Adrian Warecki Date: Wed, 2 Oct 2024 15:54:11 +0200 Subject: [PATCH 1/2] regions_mm: Invalidate cache when freeing memory Platforms based on xtensa have a non-coherent cache between cores. Before releasing a memory block, it is necessary to invalidate the cache. This memory block can be allocated by another core and performing cache writeback by the previous owner will destroy current content of the main memory. Invalidate cache when freeing allocated memory block. Signed-off-by: Adrian Warecki --- zephyr/lib/regions_mm.c | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/zephyr/lib/regions_mm.c b/zephyr/lib/regions_mm.c index f60f8875a1c9..94e39a0910e6 100644 --- a/zephyr/lib/regions_mm.c +++ b/zephyr/lib/regions_mm.c @@ -617,6 +617,16 @@ int vmh_free(struct vmh_heap *heap, void *ptr) if (retval) return retval; + /* Platforms based on xtensa have a non-coherent cache between cores. Before releasing + * a memory block, it is necessary to invalidate the cache. This memory block can be + * allocated by another core and performing cache writeback by the previous owner will + * destroy current content of the main memory. The cache is invalidated by the + * sys_mm_drv_unmap_region function, when a memory page is unmapped. There is no need to + * invalidate it when releasing buffers of at least a page in size. + */ + if (size_to_free < CONFIG_MM_DRV_PAGE_SIZE) + sys_cache_data_invd_range(ptr, size_to_free); + return vmh_unmap_region(heap->physical_blocks_allocators[mem_block_iter], ptr, size_to_free); } From 516176b8487f2db80ca5957c0d3f9ca16aa7e58c Mon Sep 17 00:00:00 2001 From: Adrian Warecki Date: Wed, 2 Oct 2024 15:57:11 +0200 Subject: [PATCH 2/2] regions_mm: Determine size of the mapped memory region before unmapping The memory block allocated by the allocator may be larger than a requested one. If the allocated block size exceeds the memory page size, only enough pages are mapped to satisfy the request. Thanks to this optimization, it don't map memory pages that will not be used. When freeing memory, the allocator knows only size of the allocated block. Information about the number of pages actually mapped isn't stored anywhere. Before unmapping memory, determine the number of mapped pages to avoid error when trying to unmap unmapped memory. Signed-off-by: Adrian Warecki --- zephyr/lib/regions_mm.c | 45 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/zephyr/lib/regions_mm.c b/zephyr/lib/regions_mm.c index 94e39a0910e6..7b391284d824 100644 --- a/zephyr/lib/regions_mm.c +++ b/zephyr/lib/regions_mm.c @@ -279,6 +279,44 @@ static bool vmh_get_map_region_boundaries(struct sys_mem_blocks *blocks, const v return true; } +/** + * @brief Determine the size of the mapped memory region. + * + * This function calculates the size of a mapped memory region starting from the given address. + * It uses a binary search algorithm to find the boundary of the mapped region by checking if + * pages are mapped or not. + * + * @param addr Starting address of the memory region. + * @param size Pointer to the size of the memory region. This value will be updated to reflect + * the size of the mapped region. + * + * @retval None + */ +static void vmh_get_mapped_size(void *addr, size_t *size) +{ + int ret; + uintptr_t check, unused; + uintptr_t bottom, top; + + if (*size <= CONFIG_MM_DRV_PAGE_SIZE) + return; + + bottom = (POINTER_TO_UINT(addr)); + top = bottom + *size; + check = top - CONFIG_MM_DRV_PAGE_SIZE; + while (top - bottom > CONFIG_MM_DRV_PAGE_SIZE) { + ret = sys_mm_drv_page_phys_get(UINT_TO_POINTER(check), &unused); + if (!ret) + bottom = check; /* Page is mapped */ + else + top = check; /* Page is unmapped */ + + check = ALIGN_DOWN(bottom / 2 + top / 2, CONFIG_MM_DRV_PAGE_SIZE); + } + + *size = top - POINTER_TO_UINT(addr); +} + /** * @brief Maps memory pages for a memory region if they have not been previously mapped for other * allocations. @@ -326,8 +364,11 @@ static int vmh_unmap_region(struct sys_mem_blocks *region, void *ptr, size_t siz const size_t block_size = 1 << region->info.blk_sz_shift; uintptr_t begin; - if (block_size >= CONFIG_MM_DRV_PAGE_SIZE) - return sys_mm_drv_unmap_region(ptr, ALIGN_UP(size, CONFIG_MM_DRV_PAGE_SIZE)); + if (block_size >= CONFIG_MM_DRV_PAGE_SIZE) { + size = ALIGN_UP(size, CONFIG_MM_DRV_PAGE_SIZE); + vmh_get_mapped_size(ptr, &size); + return sys_mm_drv_unmap_region(ptr, size); + } if (vmh_get_map_region_boundaries(region, ptr, size, &begin, &size)) return sys_mm_drv_unmap_region((void *)begin, size);