Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
55 changes: 53 additions & 2 deletions zephyr/lib/regions_mm.c
Original file line number Diff line number Diff line change
Expand Up @@ -279,6 +279,44 @@ static bool vmh_get_map_region_boundaries(struct sys_mem_blocks *blocks, const v
return true;
}

/**
* @brief Determine the size of the mapped memory region.
*
* This function calculates the size of a mapped memory region starting from the given address.
* It uses a binary search algorithm to find the boundary of the mapped region by checking if
* pages are mapped or not.
*
* @param addr Starting address of the memory region.
* @param size Pointer to the size of the memory region. This value will be updated to reflect
* the size of the mapped region.
*
* @retval None
*/
static void vmh_get_mapped_size(void *addr, size_t *size)
{
int ret;
uintptr_t check, unused;
uintptr_t bottom, top;

if (*size <= CONFIG_MM_DRV_PAGE_SIZE)
return;

bottom = (POINTER_TO_UINT(addr));
top = bottom + *size;
check = top - CONFIG_MM_DRV_PAGE_SIZE;
while (top - bottom > CONFIG_MM_DRV_PAGE_SIZE) {
ret = sys_mm_drv_page_phys_get(UINT_TO_POINTER(check), &unused);
if (!ret)
bottom = check; /* Page is mapped */
else
top = check; /* Page is unmapped */

check = ALIGN_DOWN(bottom / 2 + top / 2, CONFIG_MM_DRV_PAGE_SIZE);
}

*size = top - POINTER_TO_UINT(addr);
}

/**
* @brief Maps memory pages for a memory region if they have not been previously mapped for other
* allocations.
Expand Down Expand Up @@ -326,8 +364,11 @@ static int vmh_unmap_region(struct sys_mem_blocks *region, void *ptr, size_t siz
const size_t block_size = 1 << region->info.blk_sz_shift;
uintptr_t begin;

if (block_size >= CONFIG_MM_DRV_PAGE_SIZE)
return sys_mm_drv_unmap_region(ptr, ALIGN_UP(size, CONFIG_MM_DRV_PAGE_SIZE));
if (block_size >= CONFIG_MM_DRV_PAGE_SIZE) {
size = ALIGN_UP(size, CONFIG_MM_DRV_PAGE_SIZE);
vmh_get_mapped_size(ptr, &size);
return sys_mm_drv_unmap_region(ptr, size);
}

if (vmh_get_map_region_boundaries(region, ptr, size, &begin, &size))
return sys_mm_drv_unmap_region((void *)begin, size);
Expand Down Expand Up @@ -617,6 +658,16 @@ int vmh_free(struct vmh_heap *heap, void *ptr)
if (retval)
return retval;

/* Platforms based on xtensa have a non-coherent cache between cores. Before releasing
* a memory block, it is necessary to invalidate the cache. This memory block can be
* allocated by another core and performing cache writeback by the previous owner will
* destroy current content of the main memory. The cache is invalidated by the
* sys_mm_drv_unmap_region function, when a memory page is unmapped. There is no need to
* invalidate it when releasing buffers of at least a page in size.
*/
if (size_to_free < CONFIG_MM_DRV_PAGE_SIZE)
sys_cache_data_invd_range(ptr, size_to_free);

return vmh_unmap_region(heap->physical_blocks_allocators[mem_block_iter], ptr,
size_to_free);
}
Expand Down