diff --git a/posix/include/sof/lib/vregion.h b/posix/include/sof/lib/vregion.h new file mode 100644 index 000000000000..908ef6ca93da --- /dev/null +++ b/posix/include/sof/lib/vregion.h @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: BSD-3-Clause +// Copyright(c) 2025 Intel Corporation. + +/* Pre Allocated Contiguous Virtual Region */ +#ifndef __SOF_LIB_VREGION_H__ +#define __SOF_LIB_VREGION_H__ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct vregion; + +/** + * @brief Create a new virtual region instance. + * + * Create a new virtual region instance with specified static, dynamic, and shared static sizes + * plus an optional read-only text partition and optional shared static partition. + * Total size is the sum of static, dynamic, shared static, and text sizes. + * + * @param[in] lifetime_size Size of the virtual region lifetime partition. + * @param[in] interim_size Size of the virtual region interim partition. + * @param[in] lifetime_shared_size Size of the virtual region shared lifetime partition. + * @param[in] interim_shared_size Size of the virtual region shared interim partition. + * @param[in] text_size Size of the optional read-only text partition. + * @return struct vregion* Pointer to the new virtual region instance, or NULL on failure. + */ +struct vregion *vregion_create(size_t lifetime_size, size_t interim_size, + size_t lifetime_shared_size, size_t interim_shared_size, + size_t text_size); + +/** + * @brief Destroy a virtual region instance. + * + * Free all associated resources and deallocate the virtual region instance. + * + * @param[in] vr Pointer to the virtual region instance to destroy. + */ +void vregion_destroy(struct vregion *vr); + +/** + * @brief Memory types for virtual region allocations. + * Used to specify the type of memory allocation within a virtual region. + */ +enum vregion_mem_type { + VREGION_MEM_TYPE_INTERIM, /* interim allocation that can be freed */ + VREGION_MEM_TYPE_LIFETIME, /* lifetime allocation */ + VREGION_MEM_TYPE_INTERIM_SHARED, /* shared interim allocation */ + VREGION_MEM_TYPE_LIFETIME_SHARED /* shared lifetime allocation */ +}; +void *vregion_alloc(struct vregion *vr, enum vregion_mem_type type, size_t size); + +/** + * @brief Allocate aligned memory from the specified virtual region. + * + * Allocate aligned memory from the specified virtual region based on the memory type. + * + * @param[in] vr Pointer to the virtual region instance. + * @param[in] type Type of memory to allocate (static, dynamic, or shared static). + * @param[in] size Size of memory to allocate in bytes. + * @param[in] alignment Alignment of memory to allocate in bytes. + * @return void* Pointer to the allocated memory, or NULL on failure. + */ +void *vregion_alloc_align(struct vregion *vr, enum vregion_mem_type type, + size_t size, size_t alignment); + +/** + * @brief Free memory allocated from the specified virtual region. + * + * Free memory previously allocated from the specified virtual region. + * + * @param[in] vr Pointer to the virtual region instance. + * @param[in] ptr Pointer to the memory to free. + */ +void vregion_free(struct vregion *vr, void *ptr); + +/** + * @brief Log virtual region memory usage. + * + * @param[in] vr Pointer to the virtual region instance. + */ +void vregion_info(struct vregion *vr); + +#ifdef __cplusplus +} +#endif + +#endif /* __SOF_LIB_VREGION_H__ */ diff --git a/scripts/sof-testbench-helper.sh b/scripts/sof-testbench-helper.sh index 5de7d8d43a7b..d1726c5c1755 100755 --- a/scripts/sof-testbench-helper.sh +++ b/scripts/sof-testbench-helper.sh @@ -32,8 +32,11 @@ usage() { } if [ -z "${SOF_WORKSPACE}" ]; then - echo "Error: environment variable SOF_WORKSPACE need to be set to top level sof directory" - exit 1 + # fallback to the script directory default path + SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd) + SOF_REPO=$(dirname "$SCRIPT_DIR") + SOF_WORKSPACE="$SOF_REPO/../" + echo "Using default SOF environment at $SOF_WORKSPACE" fi OUTWAV= diff --git a/scripts/vscode-task.sh b/scripts/vscode-task.sh index b5a3ba1cecd5..c507b00bd15c 100755 --- a/scripts/vscode-task.sh +++ b/scripts/vscode-task.sh @@ -5,17 +5,16 @@ # Simple helper script for vscode task support. # Current vscode tasks have difficulty executing multiple commands. -# check if Zephyr environment is set up -if [ ! -z "$ZEPHYR_BASE" ]; then - VENV_DIR="$ZEPHYR_BASE/.venv" - echo "Using Zephyr environment at $ZEPHYR_BASE" -elif [ ! -z "$SOF_WORKSPACE" ]; then - VENV_DIR="$SOF_WORKSPACE/zephyr/.venv" - echo "Using SOF/Zephyr environment at $SOF_WORKSPACE" +# check if SOF workspace environment is set up +if [ ! -z "$SOF_WORKSPACE" ]; then + VENV_DIR="$SOF_WORKSPACE/.venv" + echo "Using SOF environment at $SOF_WORKSPACE" else - # fallback to the zephyr default from the getting started guide - VENV_DIR="$HOME/zephyrproject/.venv" - echo "Using default Zephyr environment at $VENV_DIR" + # fallback to the script directory default path + SCRIPT_DIR=$(cd "$(dirname "$0")" && pwd) + SOF_REPO=$(dirname "$SCRIPT_DIR") + VENV_DIR="$SOF_REPO/../.venv" + echo "Using default SOF environment at $VENV_DIR" fi # start the virtual environment diff --git a/scripts/xtensa-build-zephyr.py b/scripts/xtensa-build-zephyr.py index 7610f81c1b81..ac70acc1fe0b 100755 --- a/scripts/xtensa-build-zephyr.py +++ b/scripts/xtensa-build-zephyr.py @@ -1159,6 +1159,16 @@ def install_platform(platform, sof_output_dir, platf_build_environ, platform_wco os.makedirs(alias_key_dir, exist_ok=True) symlink_or_copy(install_key_dir, output_fwname, alias_key_dir, alias_fwname) + + # Also create the "plain" sof-.ri symlink in the + # sof//sof-ipc4/ directory, so that when + # copying the entire sof//sof-ipc4 directory to + # the target, all platforms are there. + alias_vendor_dir = pathlib.Path(sof_output_dir, p_alias).parent + alias_ipc4_dir = pathlib.Path(alias_vendor_dir, p_alias) + alias_install_key_dir = alias_ipc4_dir / "community" + os.makedirs(alias_ipc4_dir, exist_ok=True) + symlink_or_copy(alias_install_key_dir, alias_fwname, alias_ipc4_dir, alias_fwname) else: # non deployable builds and IPC3 deployable builds are using the same symlink scheme # The production key is usually different diff --git a/src/audio/module_adapter/CMakeLists.txt b/src/audio/module_adapter/CMakeLists.txt index e341e90d872c..36e41d5682f8 100644 --- a/src/audio/module_adapter/CMakeLists.txt +++ b/src/audio/module_adapter/CMakeLists.txt @@ -1,9 +1,16 @@ # SPDX-License-Identifier: BSD-3-Clause if(CONFIG_IPC_MAJOR_3) - add_local_sources(sof module_adapter.c module_adapter_ipc3.c module/generic.c) + add_local_sources(sof module_adapter.c module_adapter_ipc3.c module/generic.c + module/memory-common.c module/memory-heap.c) elseif(CONFIG_IPC_MAJOR_4) - add_local_sources(sof module_adapter.c module_adapter_ipc4.c module/generic.c) + add_local_sources(sof module_adapter.c module_adapter_ipc4.c module/generic.c + module/memory-common.c) +if(CONFIG_SOF_VREGIONS) + add_local_sources(sof module/memory-regions.c) +else() + add_local_sources(sof module/memory-heap.c) +endif() endif() is_zephyr(zephyr) diff --git a/src/audio/module_adapter/module/cadence.c b/src/audio/module_adapter/module/cadence.c index 05717eab6932..64a6e9f50927 100644 --- a/src/audio/module_adapter/module/cadence.c +++ b/src/audio/module_adapter/module/cadence.c @@ -293,8 +293,8 @@ static int cadence_codec_init(struct processing_module *mod) } /* allocate memory for runtime set up config */ - codec->cfg.data = rmalloc(SOF_MEM_FLAG_USER, - cfg->param_size); + codec->cfg.data = mod_alloc_ext(mod, SOF_MEM_FLAG_USER, + cfg->param_size, 0); if (!codec->cfg.data) { comp_err(dev, "failed to alloc runtime setup config"); ret = -ENOMEM; @@ -326,7 +326,7 @@ static int cadence_codec_init(struct processing_module *mod) return 0; free_cfg2: - rfree(codec->cfg.data); + mod_free(mod, codec->cfg.data); free_cfg: rfree(setup_cfg->data); free: @@ -866,7 +866,7 @@ static int cadence_codec_free(struct processing_module *mod) { struct cadence_codec_data *cd = module_get_private_data(mod); - rfree(cd->setup_cfg.data); + mod_free(mod, cd->setup_cfg.data); mod_free_all(mod); rfree(cd->self); rfree(cd); diff --git a/src/audio/module_adapter/module/generic.c b/src/audio/module_adapter/module/generic.c index 1f0862d2a590..59bacc29e17e 100644 --- a/src/audio/module_adapter/module/generic.c +++ b/src/audio/module_adapter/module/generic.c @@ -47,13 +47,13 @@ int module_load_config(struct comp_dev *dev, const void *cfg, size_t size) if (!dst->data) { /* No space for config available yet, allocate now */ - dst->data = rballoc(SOF_MEM_FLAG_USER, size); + dst->data = mod_alloc_ext(mod, SOF_MEM_FLAG_USER, size, DCACHE_LINE_SIZE); } else if (dst->size != size) { /* The size allocated for previous config doesn't match the new one. * Free old container and allocate new one. */ - rfree(dst->data); - dst->data = rballoc(SOF_MEM_FLAG_USER, size); + mod_free(mod, dst->data); + dst->data = mod_alloc_ext(mod, SOF_MEM_FLAG_USER, size, DCACHE_LINE_SIZE); } if (!dst->data) { comp_err(dev, "failed to allocate space for setup config."); @@ -71,351 +71,6 @@ int module_load_config(struct comp_dev *dev, const void *cfg, size_t size) return ret; } -static void mod_resource_init(struct processing_module *mod) -{ - struct module_data *md = &mod->priv; - /* Init memory list */ - list_init(&md->resources.res_list); - list_init(&md->resources.free_cont_list); - list_init(&md->resources.cont_chunk_list); - md->resources.heap_usage = 0; - md->resources.heap_high_water_mark = 0; -} - -int module_init(struct processing_module *mod) -{ - int ret; - struct comp_dev *dev = mod->dev; - const struct module_interface *const interface = dev->drv->adapter_ops; - - comp_dbg(dev, "entry"); - -#if CONFIG_IPC_MAJOR_3 - if (mod->priv.state == MODULE_INITIALIZED) - return 0; - if (mod->priv.state > MODULE_INITIALIZED) - return -EPERM; -#endif - if (!interface) { - comp_err(dev, "module interface not defined"); - return -EIO; - } - - /* check interface, there must be one and only one of processing procedure */ - if (!interface->init || - (!!interface->process + !!interface->process_audio_stream + - !!interface->process_raw_data < 1)) { - comp_err(dev, "comp is missing mandatory interfaces"); - return -EIO; - } - - mod_resource_init(mod); -#if CONFIG_MODULE_MEMORY_API_DEBUG && defined(__ZEPHYR__) - mod->priv.resources.rsrc_mngr = k_current_get(); -#endif - /* Now we can proceed with module specific initialization */ - ret = interface->init(mod); - if (ret) { - comp_err(dev, "error %d: module specific init failed", ret); - mod_free_all(mod); - return ret; - } - - comp_dbg(dev, "done"); -#if CONFIG_IPC_MAJOR_3 - mod->priv.state = MODULE_INITIALIZED; -#endif - - return 0; -} - -struct container_chunk { - struct list_item chunk_list; - struct module_resource containers[CONFIG_MODULE_MEMORY_API_CONTAINER_CHUNK_SIZE]; -}; - -static struct module_resource *container_get(struct processing_module *mod) -{ - struct module_resources *res = &mod->priv.resources; - struct k_heap *mod_heap = res->heap; - struct module_resource *container; - - if (list_is_empty(&res->free_cont_list)) { - struct container_chunk *chunk = sof_heap_alloc(mod_heap, 0, sizeof(*chunk), 0); - int i; - - if (!chunk) { - comp_err(mod->dev, "allocating more containers failed"); - return NULL; - } - - memset(chunk, 0, sizeof(*chunk)); - - list_item_append(&chunk->chunk_list, &res->cont_chunk_list); - for (i = 0; i < ARRAY_SIZE(chunk->containers); i++) - list_item_append(&chunk->containers[i].list, &res->free_cont_list); - } - - container = list_first_item(&res->free_cont_list, struct module_resource, list); - list_item_del(&container->list); - return container; -} - -static void container_put(struct processing_module *mod, struct module_resource *container) -{ - struct module_resources *res = &mod->priv.resources; - - list_item_append(&container->list, &res->free_cont_list); -} - -/** - * Allocates aligned buffer memory block for module. - * @param mod Pointer to the module this memory block is allocated for. - * @param bytes Size in bytes. - * @param alignment Alignment in bytes. - * @return Pointer to the allocated memory or NULL if failed. - * - * The allocated memory is automatically freed when the module is - * unloaded. The back-end, rballoc(), always aligns the memory to - * PLATFORM_DCACHE_ALIGN at the minimum. - */ -void *mod_balloc_align(struct processing_module *mod, size_t size, size_t alignment) -{ - struct module_resources *res = &mod->priv.resources; - struct module_resource *container; - - MEM_API_CHECK_THREAD(res); - - container = container_get(mod); - if (!container) - return NULL; - - if (!size) { - comp_err(mod->dev, "requested allocation of 0 bytes."); - container_put(mod, container); - return NULL; - } - - /* Allocate buffer memory for module */ - void *ptr = sof_heap_alloc(res->heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_LARGE_BUFFER, - size, alignment); - - if (!ptr) { - comp_err(mod->dev, "Failed to alloc %zu bytes %zu alignment for comp %#x.", - size, alignment, dev_comp_id(mod->dev)); - container_put(mod, container); - return NULL; - } - /* Store reference to allocated memory */ - container->ptr = ptr; - container->size = size; - container->type = MOD_RES_HEAP; - list_item_prepend(&container->list, &res->res_list); - - res->heap_usage += size; - if (res->heap_usage > res->heap_high_water_mark) - res->heap_high_water_mark = res->heap_usage; - - return ptr; -} -EXPORT_SYMBOL(mod_balloc_align); - -/** - * Allocates aligned memory block with flags for module. - * @param mod Pointer to the module this memory block is allocated for. - * @param flags Allocator flags. - * @param bytes Size in bytes. - * @param alignment Alignment in bytes. - * @return Pointer to the allocated memory or NULL if failed. - * - * The allocated memory is automatically freed when the module is unloaded. - */ -void *mod_alloc_ext(struct processing_module *mod, uint32_t flags, size_t size, size_t alignment) -{ - struct module_resources *res = &mod->priv.resources; - struct module_resource *container; - - MEM_API_CHECK_THREAD(res); - - container = container_get(mod); - if (!container) - return NULL; - - if (!size) { - comp_err(mod->dev, "requested allocation of 0 bytes."); - container_put(mod, container); - return NULL; - } - - /* Allocate memory for module */ - void *ptr = sof_heap_alloc(res->heap, flags, size, alignment); - - if (!ptr) { - comp_err(mod->dev, "Failed to alloc %zu bytes %zu alignment for comp %#x.", - size, alignment, dev_comp_id(mod->dev)); - container_put(mod, container); - return NULL; - } - /* Store reference to allocated memory */ - container->ptr = ptr; - container->size = size; - container->type = MOD_RES_HEAP; - list_item_prepend(&container->list, &res->res_list); - - res->heap_usage += size; - if (res->heap_usage > res->heap_high_water_mark) - res->heap_high_water_mark = res->heap_usage; - - return ptr; -} -EXPORT_SYMBOL(mod_alloc_ext); - -/** - * Creates a blob handler and releases it when the module is unloaded - * @param mod Pointer to module this memory block is allocated for. - * @return Pointer to the created data blob handler - * - * Like comp_data_blob_handler_new() but the handler is automatically freed. - */ -#if CONFIG_COMP_BLOB -struct comp_data_blob_handler *mod_data_blob_handler_new(struct processing_module *mod) -{ - struct module_resources *res = &mod->priv.resources; - struct comp_data_blob_handler *bhp; - struct module_resource *container; - - MEM_API_CHECK_THREAD(res); - - container = container_get(mod); - if (!container) - return NULL; - - bhp = comp_data_blob_handler_new_ext(mod->dev, false, NULL, NULL); - if (!bhp) { - container_put(mod, container); - return NULL; - } - - container->bhp = bhp; - container->size = 0; - container->type = MOD_RES_BLOB_HANDLER; - list_item_prepend(&container->list, &res->res_list); - - return bhp; -} -EXPORT_SYMBOL(mod_data_blob_handler_new); -#endif - -/** - * Make a module associated shared SRAM copy of DRAM read-only data. - * @param mod Pointer to module this copy is allocated for. - * @return Pointer to the SRAM copy. - * - * Like fast_get() but the handler is automatically freed. - */ -#if CONFIG_FAST_GET -const void *mod_fast_get(struct processing_module *mod, const void * const dram_ptr, size_t size) -{ - struct module_resources *res = &mod->priv.resources; - struct module_resource *container; - const void *ptr; - - MEM_API_CHECK_THREAD(res); - - container = container_get(mod); - if (!container) - return NULL; - - ptr = fast_get(dram_ptr, size); - if (!ptr) { - container_put(mod, container); - return NULL; - } - - container->sram_ptr = ptr; - container->size = 0; - container->type = MOD_RES_FAST_GET; - list_item_prepend(&container->list, &res->res_list); - - return ptr; -} -EXPORT_SYMBOL(mod_fast_get); -#endif - -static int free_contents(struct processing_module *mod, struct module_resource *container) -{ - struct module_resources *res = &mod->priv.resources; - - switch (container->type) { - case MOD_RES_HEAP: - sof_heap_free(res->heap, container->ptr); - res->heap_usage -= container->size; - return 0; -#if CONFIG_COMP_BLOB - case MOD_RES_BLOB_HANDLER: - comp_data_blob_handler_free(container->bhp); - return 0; -#endif -#if CONFIG_FAST_GET - case MOD_RES_FAST_GET: - fast_put(container->sram_ptr); - return 0; -#endif - default: - comp_err(mod->dev, "Unknown resource type: %d", container->type); - } - return -EINVAL; -} - -/** - * Frees the memory block removes it from module's book keeping. - * @param mod Pointer to module this memory block was allocated for. - * @param ptr Pointer to the memory block. - */ -int mod_free(struct processing_module *mod, const void *ptr) -{ - struct module_resources *res = &mod->priv.resources; - struct module_resource *container; - struct list_item *res_list; - - MEM_API_CHECK_THREAD(res); - if (!ptr) - return 0; - - /* Find which container keeps this memory */ - list_for_item(res_list, &res->res_list) { - container = container_of(res_list, struct module_resource, list); - if (container->ptr == ptr) { - int ret = free_contents(mod, container); - - list_item_del(&container->list); - container_put(mod, container); - return ret; - } - } - - comp_err(mod->dev, "error: could not find memory pointed by %p", ptr); - - return -EINVAL; -} -EXPORT_SYMBOL(mod_free); - -#if CONFIG_COMP_BLOB -void mod_data_blob_handler_free(struct processing_module *mod, struct comp_data_blob_handler *dbh) -{ - mod_free(mod, (void *)dbh); -} -EXPORT_SYMBOL(mod_data_blob_handler_free); -#endif - -#if CONFIG_FAST_GET -void mod_fast_put(struct processing_module *mod, const void *sram_ptr) -{ - mod_free(mod, sram_ptr); -} -EXPORT_SYMBOL(mod_fast_put); -#endif - int module_prepare(struct processing_module *mod, struct sof_source **sources, int num_of_sources, struct sof_sink **sinks, int num_of_sinks) @@ -445,7 +100,7 @@ int module_prepare(struct processing_module *mod, * as it has been applied during the procedure - it is safe to * free it. */ - rfree(md->cfg.data); + mod_free(mod, md->cfg.data); md->cfg.avail = false; md->cfg.data = NULL; @@ -567,7 +222,7 @@ int module_reset(struct processing_module *mod) md->cfg.avail = false; md->cfg.size = 0; - rfree(md->cfg.data); + mod_free(mod, md->cfg.data); md->cfg.data = NULL; #if CONFIG_IPC_MAJOR_3 @@ -580,74 +235,6 @@ int module_reset(struct processing_module *mod) return 0; } -/** - * Frees all the resources registered for this module - * @param mod Pointer to module that should have its resource freed. - * - * This function is called automatically when the module is unloaded. - */ -void mod_free_all(struct processing_module *mod) -{ - struct module_resources *res = &mod->priv.resources; - struct k_heap *mod_heap = res->heap; - struct list_item *list; - struct list_item *_list; - - MEM_API_CHECK_THREAD(res); - /* Free all contents found in used containers */ - list_for_item(list, &res->res_list) { - struct module_resource *container = - container_of(list, struct module_resource, list); - - free_contents(mod, container); - } - - /* - * We do not need to remove the containers from res_list in - * the loop above or go through free_cont_list as all the - * containers are anyway freed in the loop below, and the list - * heads are reinitialized when mod_resource_init() is called. - */ - list_for_item_safe(list, _list, &res->cont_chunk_list) { - struct container_chunk *chunk = - container_of(list, struct container_chunk, chunk_list); - - list_item_del(&chunk->chunk_list); - sof_heap_free(mod_heap, chunk); - } - - /* Make sure resource lists and accounting are reset */ - mod_resource_init(mod); -} -EXPORT_SYMBOL(mod_free_all); - -int module_free(struct processing_module *mod) -{ - const struct module_interface *const ops = mod->dev->drv->adapter_ops; - struct module_data *md = &mod->priv; - int ret = 0; - - if (ops->free) { - ret = ops->free(mod); - if (ret) - comp_warn(mod->dev, "error: %d", ret); - } - - /* Free all memory shared by module_adapter & module */ - md->cfg.avail = false; - md->cfg.size = 0; - rfree(md->cfg.data); - md->cfg.data = NULL; - if (md->runtime_params) { - rfree(md->runtime_params); - md->runtime_params = NULL; - } -#if CONFIG_IPC_MAJOR_3 - md->state = MODULE_DISABLED; -#endif - return ret; -} - /* * \brief Set module configuration - Common method to assemble large configuration message * \param[in] mod - struct processing_module pointer @@ -706,7 +293,8 @@ int module_set_configuration(struct processing_module *mod, } /* Allocate buffer for new params */ - md->runtime_params = rballoc(SOF_MEM_FLAG_USER, md->new_cfg_size); + md->runtime_params = mod_alloc_ext(mod, SOF_MEM_FLAG_USER, md->new_cfg_size, + DCACHE_LINE_SIZE); if (!md->runtime_params) { comp_err(dev, "space allocation for new params failed"); return -ENOMEM; @@ -747,7 +335,7 @@ int module_set_configuration(struct processing_module *mod, md->new_cfg_size = 0; if (md->runtime_params) - rfree(md->runtime_params); + mod_free(mod, md->runtime_params); md->runtime_params = NULL; return ret; diff --git a/src/audio/module_adapter/module/memory-common.c b/src/audio/module_adapter/module/memory-common.c new file mode 100644 index 000000000000..5e4361cb8caf --- /dev/null +++ b/src/audio/module_adapter/module/memory-common.c @@ -0,0 +1,105 @@ +// SPDX-License-Identifier: BSD-3-Clause +// +// Copyright(c) 2020 Intel Corporation. All rights reserved. +// +// Author: Marcin Rajwa + +/* + * \file generic.c + * \brief Generic Codec API + * \author Marcin Rajwa + * + */ + +#include +#include +#include +#include + +LOG_MODULE_DECLARE(module_adapter, CONFIG_SOF_LOG_LEVEL); + +int module_init(struct processing_module *mod) +{ + int ret; + struct comp_dev *dev = mod->dev; + const struct module_interface *const interface = dev->drv->adapter_ops; + + comp_dbg(dev, "entry"); + +#if CONFIG_IPC_MAJOR_3 + if (mod->priv.state == MODULE_INITIALIZED) + return 0; + if (mod->priv.state > MODULE_INITIALIZED) + return -EPERM; +#endif + if (!interface) { + comp_err(dev, "module interface not defined"); + return -EIO; + } + + /* check interface, there must be one and only one of processing procedure */ + if (!interface->init || + (!!interface->process + !!interface->process_audio_stream + + !!interface->process_raw_data < 1)) { + comp_err(dev, "comp is missing mandatory interfaces"); + return -EIO; + } + + /* Now we can proceed with module specific initialization */ + ret = interface->init(mod); + if (ret) { + comp_err(dev, "error %d: module specific init failed", ret); + mod_free_all(mod); + return ret; + } + + comp_dbg(dev, "done"); +#if CONFIG_IPC_MAJOR_3 + mod->priv.state = MODULE_INITIALIZED; +#endif + + return 0; +} + +#if CONFIG_COMP_BLOB +void mod_data_blob_handler_free(struct processing_module *mod, struct comp_data_blob_handler *dbh) +{ + mod_free(mod, (void *)dbh); +} +EXPORT_SYMBOL(mod_data_blob_handler_free); +#endif + +#if CONFIG_FAST_GET +void mod_fast_put(struct processing_module *mod, const void *sram_ptr) +{ + mod_free(mod, sram_ptr); +} +EXPORT_SYMBOL(mod_fast_put); +#endif + +int module_free(struct processing_module *mod) +{ + const struct module_interface *const ops = mod->dev->drv->adapter_ops; + struct module_data *md = &mod->priv; + int ret = 0; + + if (ops->free) { + ret = ops->free(mod); + if (ret) + comp_warn(mod->dev, "error: %d", ret); + } + + /* Free all memory shared by module_adapter & module */ + md->cfg.avail = false; + md->cfg.size = 0; + mod_free(mod, md->cfg.data); + md->cfg.data = NULL; + if (md->runtime_params) { + mod_free(mod, md->runtime_params); + md->runtime_params = NULL; + } +#if CONFIG_IPC_MAJOR_3 + md->state = MODULE_DISABLED; +#endif + return ret; +} diff --git a/src/audio/module_adapter/module/memory-heap.c b/src/audio/module_adapter/module/memory-heap.c new file mode 100644 index 000000000000..e9f16b7c3ee1 --- /dev/null +++ b/src/audio/module_adapter/module/memory-heap.c @@ -0,0 +1,457 @@ +// SPDX-License-Identifier: BSD-3-Clause +// +// Copyright(c) 2020 Intel Corporation. All rights reserved. +// +// Author: Marcin Rajwa + +/* + * \file generic.c + * \brief Generic Codec API + * \author Marcin Rajwa + * + */ + +#include + +#include +#include +#include + +/* The __ZEPHYR__ condition is to keep cmocka tests working */ +#if CONFIG_MODULE_MEMORY_API_DEBUG && defined(__ZEPHYR__) +#define MEM_API_CHECK_THREAD(res) __ASSERT((res)->rsrc_mngr == k_current_get(), \ + "Module memory API operation from wrong thread") +#else +#define MEM_API_CHECK_THREAD(res) +#endif + +LOG_MODULE_DECLARE(module_adapter, CONFIG_SOF_LOG_LEVEL); + +void mod_resource_init(struct processing_module *mod) +{ + struct module_data *md = &mod->priv; + /* Init memory list */ + list_init(&md->resources.res_list); + list_init(&md->resources.free_cont_list); + list_init(&md->resources.cont_chunk_list); + md->resources.heap_usage = 0; + md->resources.heap_high_water_mark = 0; +} + +struct container_chunk { + struct list_item chunk_list; + struct module_resource containers[CONFIG_MODULE_MEMORY_API_CONTAINER_CHUNK_SIZE]; +}; + +static struct module_resource *container_get(struct processing_module *mod) +{ + struct module_resources *res = &mod->priv.resources; + struct module_resource *container; + + if (list_is_empty(&res->free_cont_list)) { + struct container_chunk *chunk = rzalloc(SOF_MEM_FLAG_USER, sizeof(*chunk)); + int i; + + if (!chunk) { + comp_err(mod->dev, "allocating more containers failed"); + return NULL; + } + + list_item_append(&chunk->chunk_list, &res->cont_chunk_list); + for (i = 0; i < ARRAY_SIZE(chunk->containers); i++) + list_item_append(&chunk->containers[i].list, &res->free_cont_list); + } + + container = list_first_item(&res->free_cont_list, struct module_resource, list); + list_item_del(&container->list); + return container; +} + +static void container_put(struct processing_module *mod, struct module_resource *container) +{ + struct module_resources *res = &mod->priv.resources; + + list_item_append(&container->list, &res->free_cont_list); +} + +/** + * Allocates aligned buffer memory block for module. + * @param mod Pointer to the module this memory block is allocated for. + * @param bytes Size in bytes. + * @param alignment Alignment in bytes. + * @return Pointer to the allocated memory or NULL if failed. + * + * The allocated memory is automatically freed when the module is + * unloaded. The back-end, rballoc(), always aligns the memory to + * PLATFORM_DCACHE_ALIGN at the minimum. + */ +void *mod_balloc_align(struct processing_module *mod, size_t size, size_t alignment) +{ + struct module_resources *res = &mod->priv.resources; + struct module_resource *container; + void *ptr; + + MEM_API_CHECK_THREAD(res); + + container = container_get(mod); + if (!container) + return NULL; + + if (!size) { + comp_err(mod->dev, "requested allocation of 0 bytes."); + container_put(mod, container); + return NULL; + } + + /* Allocate memory for module */ + ptr = rballoc_align(SOF_MEM_FLAG_USER, size, alignment); + + if (!ptr) { + comp_err(mod->dev, "Failed to alloc %zu bytes %zu alignment for comp %#x.", + size, alignment, dev_comp_id(mod->dev)); + container_put(mod, container); + return NULL; + } + /* Store reference to allocated memory */ + container->ptr = ptr; + container->size = size; + container->type = MOD_RES_HEAP; + list_item_prepend(&container->list, &res->res_list); + + res->heap_usage += size; + if (res->heap_usage > res->heap_high_water_mark) + res->heap_high_water_mark = res->heap_usage; + + return ptr; +} +EXPORT_SYMBOL(mod_balloc_align); + +/** + * Allocates aligned memory block with flags for module. + * @param mod Pointer to the module this memory block is allocated for. + * @param flags Allocator flags. + * @param bytes Size in bytes. + * @param alignment Alignment in bytes. + * @return Pointer to the allocated memory or NULL if failed. + * + * The allocated memory is automatically freed when the module is unloaded. + */ +void *mod_alloc_ext(struct processing_module *mod, uint32_t flags, size_t size, size_t alignment) +{ + struct module_resources *res = &mod->priv.resources; + struct module_resource *container; + void *ptr; + + MEM_API_CHECK_THREAD(res); + + container = container_get(mod); + if (!container) + return NULL; + + if (!size) { + comp_err(mod->dev, "requested allocation of 0 bytes."); + container_put(mod, container); + return NULL; + } + + /* Allocate memory for module */ + ptr = rmalloc_align(flags, size, alignment); + + if (!ptr) { + comp_err(mod->dev, "Failed to alloc %zu bytes %zu alignment for comp %#x.", + size, alignment, dev_comp_id(mod->dev)); + container_put(mod, container); + return NULL; + } + /* Store reference to allocated memory */ + container->ptr = ptr; + container->size = size; + container->type = MOD_RES_HEAP; + list_item_prepend(&container->list, &res->res_list); + + res->heap_usage += size; + if (res->heap_usage > res->heap_high_water_mark) + res->heap_high_water_mark = res->heap_usage; + + return ptr; +} +EXPORT_SYMBOL(mod_alloc_ext); + +/** + * Creates a blob handler and releases it when the module is unloaded + * @param mod Pointer to module this memory block is allocated for. + * @return Pointer to the created data blob handler + * + * Like comp_data_blob_handler_new() but the handler is automatically freed. + */ +#if CONFIG_COMP_BLOB +struct comp_data_blob_handler * +mod_data_blob_handler_new(struct processing_module *mod) +{ + struct module_resources *res = &mod->priv.resources; + struct comp_data_blob_handler *bhp; + struct module_resource *container; + + MEM_API_CHECK_THREAD(res); + + container = container_get(mod); + if (!container) + return NULL; + + bhp = comp_data_blob_handler_new_ext(mod->dev, false, NULL, NULL); + if (!bhp) { + container_put(mod, container); + return NULL; + } + + container->bhp = bhp; + container->size = 0; + container->type = MOD_RES_BLOB_HANDLER; + list_item_prepend(&container->list, &res->res_list); + + return bhp; +} +EXPORT_SYMBOL(mod_data_blob_handler_new); +#endif + +/** + * Make a module associated shared SRAM copy of DRAM read-only data. + * @param mod Pointer to module this copy is allocated for. + * @return Pointer to the SRAM copy. + * + * Like fast_get() but the handler is automatically freed. + */ +#if CONFIG_FAST_GET +const void *mod_fast_get(struct processing_module *mod, const void * const dram_ptr, size_t size) +{ + struct module_resources *res = &mod->priv.resources; + struct module_resource *container; + const void *ptr; + + MEM_API_CHECK_THREAD(res); + + container = container_get(mod); + if (!container) + return NULL; + + ptr = fast_get(mod, dram_ptr, size); + if (!ptr) { + container_put(mod, container); + return NULL; + } + + container->sram_ptr = ptr; + container->size = 0; + container->type = MOD_RES_FAST_GET; + list_item_prepend(&container->list, &res->res_list); + + return ptr; +} +EXPORT_SYMBOL(mod_fast_get); +#endif + +static int free_contents(struct processing_module *mod, struct module_resource *container) +{ + struct module_resources *res = &mod->priv.resources; + + switch (container->type) { + case MOD_RES_HEAP: + rfree(container->ptr); + res->heap_usage -= container->size; + return 0; +#if CONFIG_COMP_BLOB + case MOD_RES_BLOB_HANDLER: + comp_data_blob_handler_free(container->bhp); + return 0; +#endif +#if CONFIG_FAST_GET + case MOD_RES_FAST_GET: + fast_put(mod, container->sram_ptr); + return 0; +#endif + default: + comp_err(mod->dev, "Unknown resource type: %d", container->type); + } + return -EINVAL; +} + +/** + * Frees the memory block removes it from module's book keeping. + * @param mod Pointer to module this memory block was allocated for. + * @param ptr Pointer to the memory block. + */ +int mod_free(struct processing_module *mod, const void *ptr) +{ + struct module_resources *res = &mod->priv.resources; + struct module_resource *container; + struct list_item *res_list; + + MEM_API_CHECK_THREAD(res); + if (!ptr) + return 0; + + /* Find which container keeps this memory */ + list_for_item(res_list, &res->res_list) { + container = container_of(res_list, struct module_resource, list); + if (container->ptr == ptr) { + int ret = free_contents(mod, container); + + list_item_del(&container->list); + container_put(mod, container); + return ret; + } + } + + comp_err(mod->dev, "error: could not find memory pointed by %p", ptr); + + return -EINVAL; +} +EXPORT_SYMBOL(mod_free); + +/** + * Frees all the resources registered for this module + * @param mod Pointer to module that should have its resource freed. + * + * This function is called automatically when the module is unloaded. + */ +void mod_free_all(struct processing_module *mod) +{ + struct module_resources *res = &mod->priv.resources; + struct list_item *list; + struct list_item *_list; + + MEM_API_CHECK_THREAD(res); + + /* Free all contents found in used containers */ + list_for_item(list, &res->res_list) { + struct module_resource *container = + container_of(list, struct module_resource, list); + + free_contents(mod, container); + } + + /* + * We do not need to remove the containers from res_list in + * the loop above or go through free_cont_list as all the + * containers are anyway freed in the loop below, and the list + * heads are reinitialized when mod_resource_init() is called. + */ + list_for_item_safe(list, _list, &res->cont_chunk_list) { + struct container_chunk *chunk = + container_of(list, struct container_chunk, chunk_list); + + list_item_del(&chunk->chunk_list); + rfree(chunk); + } + + /* Make sure resource lists and accounting are reset */ + mod_resource_init(mod); +} +EXPORT_SYMBOL(mod_free_all); + +#if CONFIG_MM_DRV +#define PAGE_SZ CONFIG_MM_DRV_PAGE_SIZE +#else +#include +#define PAGE_SZ HOST_PAGE_SIZE +#endif + +static struct k_heap *module_adapter_dp_heap_new(const struct comp_ipc_config *config) +{ + /* src-lite with 8 channels has been seen allocating 14k in one go */ + /* FIXME: the size will be derived from configuration */ + const size_t heap_size = 20 * 1024; + + /* Keep uncached to match the default SOF heap! */ + uint8_t *mod_heap_mem = rballoc_align(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, + heap_size, PAGE_SZ); + + if (!mod_heap_mem) + return NULL; + + struct k_heap *mod_heap = (struct k_heap *)mod_heap_mem; + const size_t heap_prefix_size = ALIGN_UP(sizeof(*mod_heap), 8); + void *mod_heap_buf = mod_heap_mem + heap_prefix_size; + + k_heap_init(mod_heap, mod_heap_buf, heap_size - heap_prefix_size); + + return mod_heap; +} + +struct processing_module *module_adapter_mem_alloc(const struct comp_driver *drv, + const struct comp_ipc_config *config, + struct pipeline *pipeline) +{ + struct k_heap *mod_heap; + /* + * For DP shared modules the struct processing_module object must be + * accessible from all cores. Unfortunately at this point there's no + * information of components the module will be bound to. So we need to + * allocate shared memory for each DP module. + * To be removed when pipeline 2.0 is ready. + */ + uint32_t flags = config->proc_domain == COMP_PROCESSING_DOMAIN_DP ? + SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT : SOF_MEM_FLAG_USER; + + if (config->proc_domain == COMP_PROCESSING_DOMAIN_DP && IS_ENABLED(CONFIG_USERSPACE) && + !IS_ENABLED(CONFIG_SOF_USERSPACE_USE_DRIVER_HEAP)) { + mod_heap = module_adapter_dp_heap_new(config); + if (!mod_heap) { + comp_cl_err(drv, "Failed to allocate DP module heap"); + return NULL; + } + } else { + mod_heap = drv->user_heap; + } + + struct processing_module *mod = sof_heap_alloc(mod_heap, flags, sizeof(*mod), 0); + + if (!mod) { + comp_cl_err(drv, "failed to allocate memory for module"); + goto emod; + } + + memset(mod, 0, sizeof(*mod)); + mod->priv.resources.heap = mod_heap; + + /* + * Would be difficult to optimize the allocation to use cache. Only if + * the whole currently active topology is running on the primary core, + * then it can be cached. Effectively it can be only cached in + * single-core configurations. + */ + struct comp_dev *dev = sof_heap_alloc(mod_heap, SOF_MEM_FLAG_COHERENT, sizeof(*dev), 0); + + if (!dev) { + comp_cl_err(drv, "failed to allocate memory for comp_dev"); + goto err; + } + + memset(dev, 0, sizeof(*dev)); + comp_init(drv, dev, sizeof(*dev)); + dev->ipc_config = *config; + mod->dev = dev; + dev->mod = mod; + + return mod; + +err: + sof_heap_free(mod_heap, mod); +emod: + if (mod_heap != drv->user_heap) + rfree(mod_heap); + + return NULL; +} + +void module_adapter_mem_free(struct processing_module *mod) +{ + struct k_heap *mod_heap = mod->priv.resources.heap; + +#if CONFIG_IPC_MAJOR_4 + sof_heap_free(mod_heap, mod->priv.cfg.input_pins); +#endif + sof_heap_free(mod_heap, mod->dev); + sof_heap_free(mod_heap, mod); +} + diff --git a/src/audio/module_adapter/module/memory-regions.c b/src/audio/module_adapter/module/memory-regions.c new file mode 100644 index 000000000000..22f6f71f75c8 --- /dev/null +++ b/src/audio/module_adapter/module/memory-regions.c @@ -0,0 +1,254 @@ +// SPDX-License-Identifier: BSD-3-Clause +// +// Copyright(c) 2020 Intel Corporation. All rights reserved. +// +// Author: Marcin Rajwa + +/* + * \file generic.c + * \brief Generic Codec API + * \author Marcin Rajwa + * + */ + +#include + +#include +#include +#include + +LOG_MODULE_DECLARE(module_adapter, CONFIG_SOF_LOG_LEVEL); + +/** + * Allocates aligned buffer memory block for module. + * @param mod Pointer to the module this memory block is allocated for. + * @param bytes Size in bytes. + * @param alignment Alignment in bytes. + * @return Pointer to the allocated memory or NULL if failed. + * + * The allocated memory is automatically freed when the module is + * unloaded. The back-end, rballoc(), always aligns the memory to + * PLATFORM_DCACHE_ALIGN at the minimum. + */ +void *mod_balloc_align(struct processing_module *mod, size_t size, size_t alignment) +{ + void *ptr; + + if (!size) { + comp_err(mod->dev, "requested allocation of 0 bytes."); + return NULL; + } + + /* do we need to use the dynamic heap or the static heap? */ + struct vregion *vregion = module_get_vregion(mod); + if (mod->priv.state != MODULE_INITIALIZED) { + /* lifetime allocator */ + ptr = vregion_alloc_align(vregion, VREGION_MEM_TYPE_LIFETIME, size, alignment); + } else { + /* interim allocator */ + ptr = vregion_alloc_align(vregion, VREGION_MEM_TYPE_INTERIM, size, alignment); + } + + if (!ptr) { + comp_err(mod->dev, "Failed to alloc %zu bytes %zu alignment for comp %#x.", + size, alignment, dev_comp_id(mod->dev)); + return NULL; + } + + return ptr; +} +EXPORT_SYMBOL(mod_balloc_align); + +/** + * Allocates aligned memory block with flags for module. + * @param mod Pointer to the module this memory block is allocated for. + * @param flags Allocator flags. + * @param bytes Size in bytes. + * @param alignment Alignment in bytes. + * @return Pointer to the allocated memory or NULL if failed. + * + * The allocated memory is automatically freed when the module is unloaded. + */ +void *mod_alloc_ext(struct processing_module *mod, uint32_t flags, size_t size, size_t alignment) +{ + void *ptr; + + if (!size) { + comp_err(mod->dev, "requested allocation of 0 bytes."); + return NULL; + } + + /* do we need to use the dynamic heap or the static heap? */ + struct vregion *vregion = module_get_vregion(mod); + if (mod->priv.state != MODULE_INITIALIZED) { + /* static allocator */ + ptr = vregion_alloc_align(vregion, VREGION_MEM_TYPE_LIFETIME, size, alignment); + } else { + /* dynamic allocator */ + ptr = vregion_alloc_align(vregion, VREGION_MEM_TYPE_INTERIM, size, alignment); + } + + if (!ptr) { + comp_err(mod->dev, "Failed to alloc %zu bytes %zu alignment for comp %#x.", + size, alignment, dev_comp_id(mod->dev)); + return NULL; + } + + return ptr; +} +EXPORT_SYMBOL(mod_alloc_ext); + + +/** + * Creates a blob handler and releases it when the module is unloaded + * @param mod Pointer to module this memory block is allocated for. + * @return Pointer to the created data blob handler + * + * Like comp_data_blob_handler_new() but the handler is automatically freed. + */ +#if CONFIG_COMP_BLOB +struct comp_data_blob_handler * +mod_data_blob_handler_new(struct processing_module *mod) +{ + struct comp_data_blob_handler *bhp; + + bhp = comp_data_blob_handler_new_ext(mod->dev, false, NULL, NULL); + if (!bhp) { + return NULL; + } + + return bhp; +} +EXPORT_SYMBOL(mod_data_blob_handler_new); +#endif + +/** + * Make a module associated shared SRAM copy of DRAM read-only data. + * @param mod Pointer to module this copy is allocated for. + * @return Pointer to the SRAM copy. + * + * Like fast_get() but the handler is automatically freed. + */ +#if CONFIG_FAST_GET +const void *mod_fast_get(struct processing_module *mod, const void * const dram_ptr, size_t size) +{ + const void *ptr; + + ptr = fast_get(mod, dram_ptr, size); + if (!ptr) { + return NULL; + } + + return ptr; +} +EXPORT_SYMBOL(mod_fast_get); +#endif + +/** + * Frees the memory block removes it from module's book keeping. + * @param mod Pointer to module this memory block was allocated for. + * @param ptr Pointer to the memory block. + */ +int mod_free(struct processing_module *mod, const void *ptr) +{ + vregion_free(module_get_vregion(mod), (__sparse_force void *)ptr); + return 0; +} +EXPORT_SYMBOL(mod_free); + +/** + * Frees all the resources registered for this module + * @param mod Pointer to module that should have its resource freed. + * + * This function is called automatically when the module is unloaded. + */ +void mod_free_all(struct processing_module *mod) +{ + // TODO free the vregion for the module ? +} +EXPORT_SYMBOL(mod_free_all); + +struct processing_module *module_adapter_mem_alloc(const struct comp_driver *drv, + const struct comp_ipc_config *config, + struct pipeline *pipeline) +{ + struct comp_dev *dev = comp_alloc(drv, sizeof(*dev)); + struct processing_module *mod; + struct vregion *vregion; + + if (!dev) { + comp_cl_err(drv, "failed to allocate memory for comp_dev"); + return NULL; + } + + /* check for pipeline */ + if (!pipeline || !pipeline->vregion) { + comp_cl_err(drv, "failed to get pipeline"); + return NULL; + } + + /* TODO: determine if our user domain is different from the LL pipeline domain */ + if (config->proc_domain == COMP_PROCESSING_DOMAIN_DP) { + // TODO: get the text, heap, stack and shared sizes from topology too + /* create a vregion region for all resources */ + size_t interim_size = 0x4000; /* 16kB scratch */ + size_t lifetime_size = 0x20000; /* 128kB batch */ + size_t shared_size = 0x4000; /* 16kB shared */ + size_t text_size = 0x4000; /* 16kB text */ + + vregion = vregion_create(lifetime_size, interim_size, shared_size, + 0, text_size); + if (!vregion) { + //comp_err(dev, "failed to create vregion for DP module"); + goto err; + } + } else { + /* LL modules use pipeline vregion */ + vregion = pipeline->vregion; + } + + /* allocate module in correct vregion*/ + //TODO: add coherent flag for cross core DP modules + mod = vregion_alloc(vregion, VREGION_MEM_TYPE_LIFETIME, sizeof(*mod)); + if (!mod) { + comp_err(dev, "failed to allocate memory for module"); + goto err; + } + + if (!mod) { + comp_err(dev, "failed to allocate memory for module"); + goto err; + } + + dev->ipc_config = *config; + mod->dev = dev; + dev->mod = mod; + + /* set virtual region for DP module only otherwise we use pipeline vregion */ + if (config->proc_domain == COMP_PROCESSING_DOMAIN_DP) + mod->vregion = vregion; + + return mod; + +err: + //module_driver_heap_free(drv->user_heap, dev); // TODO + + return NULL; +} + +void module_adapter_mem_free(struct processing_module *mod) +{ + struct vregion *vregion = module_get_vregion(mod); + +#if CONFIG_IPC_MAJOR_4 + mod_free(mod, mod->priv.cfg.input_pins); +#endif + + mod_free(mod, mod->priv.cfg.input_pins); + //mod_free(mod, mod->dev); + vregion_free(vregion, mod); + + /* free the vregion if its a separate instance from the pipeline */ + if (mod->dev->pipeline->vregion != vregion) + vregion_destroy(vregion); +} diff --git a/src/audio/module_adapter/module_adapter.c b/src/audio/module_adapter/module_adapter.c index cf36da4e1034..05e90394d014 100644 --- a/src/audio/module_adapter/module_adapter.c +++ b/src/audio/module_adapter/module_adapter.c @@ -45,111 +45,6 @@ struct comp_dev *module_adapter_new(const struct comp_driver *drv, return module_adapter_new_ext(drv, config, spec, NULL, NULL); } -#if CONFIG_MM_DRV -#define PAGE_SZ CONFIG_MM_DRV_PAGE_SIZE -#else -#include -#define PAGE_SZ HOST_PAGE_SIZE -#endif - -static struct k_heap *module_adapter_dp_heap_new(const struct comp_ipc_config *config) -{ - /* src-lite with 8 channels has been seen allocating 14k in one go */ - /* FIXME: the size will be derived from configuration */ - const size_t heap_size = 20 * 1024; - - /* Keep uncached to match the default SOF heap! */ - uint8_t *mod_heap_mem = rballoc_align(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, - heap_size, PAGE_SZ); - - if (!mod_heap_mem) - return NULL; - - struct k_heap *mod_heap = (struct k_heap *)mod_heap_mem; - const size_t heap_prefix_size = ALIGN_UP(sizeof(*mod_heap), 8); - void *mod_heap_buf = mod_heap_mem + heap_prefix_size; - - k_heap_init(mod_heap, mod_heap_buf, heap_size - heap_prefix_size); - - return mod_heap; -} - -static struct processing_module *module_adapter_mem_alloc(const struct comp_driver *drv, - const struct comp_ipc_config *config) -{ - struct k_heap *mod_heap; - /* - * For DP shared modules the struct processing_module object must be - * accessible from all cores. Unfortunately at this point there's no - * information of components the module will be bound to. So we need to - * allocate shared memory for each DP module. - * To be removed when pipeline 2.0 is ready. - */ - uint32_t flags = config->proc_domain == COMP_PROCESSING_DOMAIN_DP ? - SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT : SOF_MEM_FLAG_USER; - - if (config->proc_domain == COMP_PROCESSING_DOMAIN_DP && IS_ENABLED(CONFIG_USERSPACE) && - !IS_ENABLED(CONFIG_SOF_USERSPACE_USE_DRIVER_HEAP)) { - mod_heap = module_adapter_dp_heap_new(config); - if (!mod_heap) { - comp_cl_err(drv, "Failed to allocate DP module heap"); - return NULL; - } - } else { - mod_heap = drv->user_heap; - } - - struct processing_module *mod = sof_heap_alloc(mod_heap, flags, sizeof(*mod), 0); - - if (!mod) { - comp_cl_err(drv, "failed to allocate memory for module"); - goto emod; - } - - memset(mod, 0, sizeof(*mod)); - mod->priv.resources.heap = mod_heap; - - /* - * Would be difficult to optimize the allocation to use cache. Only if - * the whole currently active topology is running on the primary core, - * then it can be cached. Effectively it can be only cached in - * single-core configurations. - */ - struct comp_dev *dev = sof_heap_alloc(mod_heap, SOF_MEM_FLAG_COHERENT, sizeof(*dev), 0); - - if (!dev) { - comp_cl_err(drv, "failed to allocate memory for comp_dev"); - goto err; - } - - memset(dev, 0, sizeof(*dev)); - comp_init(drv, dev, sizeof(*dev)); - dev->ipc_config = *config; - mod->dev = dev; - dev->mod = mod; - - return mod; - -err: - sof_heap_free(mod_heap, mod); -emod: - if (mod_heap != drv->user_heap) - rfree(mod_heap); - - return NULL; -} - -static void module_adapter_mem_free(struct processing_module *mod) -{ - struct k_heap *mod_heap = mod->priv.resources.heap; - -#if CONFIG_IPC_MAJOR_4 - sof_heap_free(mod_heap, mod->priv.cfg.input_pins); -#endif - sof_heap_free(mod_heap, mod->dev); - sof_heap_free(mod_heap, mod); -} - /* * \brief Create a module adapter component. * \param[in] drv - component driver pointer. @@ -169,6 +64,7 @@ struct comp_dev *module_adapter_new_ext(const struct comp_driver *drv, int ret; struct module_config *dst; const struct module_interface *const interface = drv->adapter_ops; + struct pipeline *pipeline = NULL; comp_cl_dbg(drv, "start"); @@ -178,22 +74,49 @@ struct comp_dev *module_adapter_new_ext(const struct comp_driver *drv, return NULL; } - struct processing_module *mod = module_adapter_mem_alloc(drv, config); +#if CONFIG_IPC_MAJOR_4 + struct ipc_comp_dev *ipc_pipe; + struct ipc *ipc = ipc_get(); + + /* set the pipeline pointer if ipc_pipe is valid */ + ipc_pipe = ipc_get_comp_by_ppl_id(ipc, COMP_TYPE_PIPELINE, config->pipeline_id, + IPC_COMP_IGNORE_REMOTE); + if (ipc_pipe) + pipeline = ipc_pipe->pipeline; +#endif + struct processing_module *mod = module_adapter_mem_alloc(drv, config, pipeline); if (!mod) return NULL; + dst = &mod->priv.cfg; module_set_private_data(mod, mod_priv); list_init(&mod->raw_data_buffers_list); +#if !CONFIG_SOF_VREGIONS + mod_resource_init(mod); +#endif +#if CONFIG_MODULE_MEMORY_API_DEBUG && defined(__ZEPHYR__) + mod->priv.resources.rsrc_mngr = k_current_get(); +#endif #if CONFIG_USERSPACE mod->user_ctx = user_ctx; #endif /* CONFIG_USERSPACE */ struct comp_dev *dev = mod->dev; - dst = &mod->priv.cfg; - ret = module_adapter_init_data(dev, dst, config, spec); +#if CONFIG_IPC_MAJOR_4 + /* set up ipc4 configuration items if needed from topology */ + if (ipc_pipe) { + dev->pipeline = pipeline; + + /* LL modules have the same period as the pipeline */ + if (dev->ipc_config.proc_domain == COMP_PROCESSING_DOMAIN_LL) + dev->period = ipc_pipe->pipeline->period; + } +#endif + + ret = module_adapter_init_data(mod, dev, dst, config, spec); if (ret) { comp_err(dev, "%d: module init data failed", ret); @@ -214,22 +137,6 @@ struct comp_dev *module_adapter_new_ext(const struct comp_driver *drv, else goto err; -#if CONFIG_IPC_MAJOR_4 - struct ipc_comp_dev *ipc_pipe; - struct ipc *ipc = ipc_get(); - - /* set the pipeline pointer if ipc_pipe is valid */ - ipc_pipe = ipc_get_comp_by_ppl_id(ipc, COMP_TYPE_PIPELINE, config->pipeline_id, - IPC_COMP_IGNORE_REMOTE); - if (ipc_pipe) { - dev->pipeline = ipc_pipe->pipeline; - - /* LL modules have the same period as the pipeline */ - if (dev->ipc_config.proc_domain == COMP_PROCESSING_DOMAIN_LL) - dev->period = ipc_pipe->pipeline->period; - } -#endif - /* Init processing module */ ret = module_init(mod); if (ret) { @@ -437,15 +344,20 @@ int module_adapter_prepare(struct comp_dev *dev) module_adapter_check_data(mod, dev, sink); + /* get memory flags for input and output */ memory_flags = user_get_buffer_memory_region(dev->drv); + /* allocate memory for input buffers */ if (mod->max_sources) { mod->input_buffers = - rzalloc(memory_flags, sizeof(*mod->input_buffers) * mod->max_sources); + mod_alloc_ext(mod, memory_flags, + sizeof(*mod->input_buffers) * mod->max_sources, 0); + if (!mod->input_buffers) { comp_err(dev, "failed to allocate input buffers"); return -ENOMEM; } + memset(mod->input_buffers, 0, sizeof(*mod->input_buffers) * mod->max_sources); } else { mod->input_buffers = NULL; } @@ -453,12 +365,15 @@ int module_adapter_prepare(struct comp_dev *dev) /* allocate memory for output buffers */ if (mod->max_sinks) { mod->output_buffers = - rzalloc(memory_flags, sizeof(*mod->output_buffers) * mod->max_sinks); + mod_alloc_ext(mod, memory_flags, + sizeof(*mod->output_buffers) * mod->max_sinks, 0); + if (!mod->output_buffers) { comp_err(dev, "failed to allocate output buffers"); ret = -ENOMEM; goto in_out_free; } + memset(mod->output_buffers, 0, sizeof(*mod->output_buffers) * mod->max_sinks); } else { mod->output_buffers = NULL; } @@ -519,7 +434,9 @@ int module_adapter_prepare(struct comp_dev *dev) size_t size = MAX(mod->deep_buff_bytes, mod->period_bytes); list_for_item(blist, &dev->bsource_list) { - mod->input_buffers[i].data = rballoc(memory_flags, size); + mod->input_buffers[i].data = mod_alloc_ext(mod, memory_flags, size, + DCACHE_LINE_SIZE); + if (!mod->input_buffers[i].data) { comp_err(mod->dev, "Failed to alloc input buffer data"); ret = -ENOMEM; @@ -531,7 +448,9 @@ int module_adapter_prepare(struct comp_dev *dev) /* allocate memory for output buffer data */ i = 0; list_for_item(blist, &dev->bsink_list) { - mod->output_buffers[i].data = rballoc(memory_flags, md->mpd.out_buff_size); + mod->output_buffers[i].data = mod_alloc_ext(mod, memory_flags, + md->mpd.out_buff_size, + DCACHE_LINE_SIZE); if (!mod->output_buffers[i].data) { comp_err(mod->dev, "Failed to alloc output buffer data"); ret = -ENOMEM; @@ -597,16 +516,16 @@ int module_adapter_prepare(struct comp_dev *dev) out_data_free: for (i = 0; i < mod->num_of_sinks; i++) - rfree(mod->output_buffers[i].data); + mod_free(mod, mod->output_buffers[i].data); in_data_free: for (i = 0; i < mod->num_of_sources; i++) - rfree(mod->input_buffers[i].data); + mod_free(mod, mod->input_buffers[i].data); in_out_free: - rfree(mod->output_buffers); + mod_free(mod, mod->output_buffers); mod->output_buffers = NULL; - rfree(mod->input_buffers); + mod_free(mod, mod->input_buffers); mod->input_buffers = NULL; return ret; } @@ -625,12 +544,12 @@ int module_adapter_params(struct comp_dev *dev, struct sof_ipc_stream_params *pa comp_err(dev, "comp_verify_params() failed."); return ret; } -#endif /* allocate stream_params each time */ - mod_free(mod, mod->stream_params); + if (mod->stream_params) + mod_free(mod, mod->stream_params); - mod->stream_params = mod_alloc(mod, sizeof(*mod->stream_params) + params->ext_data_length); + mod->stream_params = mod_zalloc(mod, sizeof(*mod->stream_params) + params->ext_data_length); if (!mod->stream_params) return -ENOMEM; @@ -647,7 +566,23 @@ int module_adapter_params(struct comp_dev *dev, struct sof_ipc_stream_params *pa if (ret < 0) return ret; } +#endif +#if CONFIG_IPC_MAJOR_4 + /* allocate stream_params once for IPC4 as no use of extended data */ + assert(params->ext_data_length == 0); + if (mod->stream_params) + goto copy_params; + mod->stream_params = mod_zalloc(mod, sizeof(*mod->stream_params)); + if (!mod->stream_params) + return -ENOMEM; + +copy_params: + ret = memcpy_s(mod->stream_params, sizeof(struct sof_ipc_stream_params), + params, sizeof(struct sof_ipc_stream_params)); + if (ret < 0) + return ret; +#endif return 0; } EXPORT_SYMBOL(module_adapter_params); @@ -1310,15 +1245,14 @@ int module_adapter_reset(struct comp_dev *dev) if (IS_PROCESSING_MODE_RAW_DATA(mod)) { for (i = 0; i < mod->num_of_sinks; i++) - rfree((__sparse_force void *)mod->output_buffers[i].data); + mod_free(mod, (__sparse_force void *)mod->output_buffers[i].data); for (i = 0; i < mod->num_of_sources; i++) - rfree((__sparse_force void *)mod->input_buffers[i].data); + mod_free(mod, (__sparse_force void *)mod->input_buffers[i].data); } if (IS_PROCESSING_MODE_RAW_DATA(mod) || IS_PROCESSING_MODE_AUDIO_STREAM(mod)) { - rfree(mod->output_buffers); - rfree(mod->input_buffers); - + mod_free(mod, mod->output_buffers); + mod_free(mod, mod->input_buffers); mod->num_of_sources = 0; mod->num_of_sinks = 0; } @@ -1364,9 +1298,10 @@ void module_adapter_free(struct comp_dev *dev) buffer_free(buffer); } - mod_free(mod, mod->stream_params); mod_free_all(mod); + mod_free(mod, mod->stream_params); + module_adapter_mem_free(mod); } EXPORT_SYMBOL(module_adapter_free); diff --git a/src/audio/module_adapter/module_adapter_ipc3.c b/src/audio/module_adapter/module_adapter_ipc3.c index 30aa8492c25d..f5f4c48d3b05 100644 --- a/src/audio/module_adapter/module_adapter_ipc3.c +++ b/src/audio/module_adapter/module_adapter_ipc3.c @@ -33,7 +33,8 @@ LOG_MODULE_DECLARE(module_adapter, CONFIG_SOF_LOG_LEVEL); * * \return: 0 - no error; < 0, error happened. */ -int module_adapter_init_data(struct comp_dev *dev, +int module_adapter_init_data(struct processing_module *mod, + struct comp_dev *dev, struct module_config *dst, const struct comp_ipc_config *config, const void *spec) diff --git a/src/audio/module_adapter/module_adapter_ipc4.c b/src/audio/module_adapter/module_adapter_ipc4.c index 54ee3dd94f5e..d6717b0b1e23 100644 --- a/src/audio/module_adapter/module_adapter_ipc4.c +++ b/src/audio/module_adapter/module_adapter_ipc4.c @@ -104,7 +104,7 @@ module_ext_init_decode(struct comp_dev *dev, struct module_config *dst, * * \return: 0 - no error; < 0, error happened. */ -int module_adapter_init_data(struct comp_dev *dev, +int module_adapter_init_data(struct processing_module *mod, struct comp_dev *dev, struct module_config *dst, const struct comp_ipc_config *config, const void *spec) @@ -136,9 +136,14 @@ int module_adapter_init_data(struct comp_dev *dev, if (cfgsz == (sizeof(*cfg) + pinsz)) { dst->nb_input_pins = n_in; dst->nb_output_pins = n_out; +#if !CONFIG_SOF_VREGIONS dst->input_pins = sof_heap_alloc(dev->mod->priv.resources.heap, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, pinsz, 0); +#else + dst->input_pins = mod_alloc_ext(mod, SOF_MEM_FLAG_COHERENT, + pinsz, DCACHE_LINE_SIZE); +#endif if (!dst->input_pins) return -ENOMEM; diff --git a/src/audio/pipeline/pipeline-graph.c b/src/audio/pipeline/pipeline-graph.c index 678b8095289f..62e8086c0cb9 100644 --- a/src/audio/pipeline/pipeline-graph.c +++ b/src/audio/pipeline/pipeline-graph.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -127,6 +128,21 @@ struct pipeline *pipeline_new(uint32_t pipeline_id, uint32_t priority, uint32_t return NULL; } +#if CONFIG_SOF_VREGIONS + /* create a vregion region for all resources */ + // TODO: make lifetime_size and interim sizes configurable from topology + // TODO: get the text and shared size from topology too + size_t lifetime_size = 0x20000; /* 128kB lifetime */ + size_t interim_size = 0x4000; /* 16kB interim */ + size_t shared_size = 0x4000; /* 16kB shared */ + + p->vregion = vregion_create(lifetime_size, interim_size, shared_size, 0, 0); + if (!p->vregion) { + pipe_err(p, "pipeline_new(): vregion_create() failed."); + goto free; + } +#endif + /* init pipeline */ p->comp_id = comp_id; p->priority = priority; @@ -236,6 +252,10 @@ int pipeline_free(struct pipeline *p) pipeline_posn_offset_put(p->posn_offset); +#if CONFIG_SOF_VREGIONS + /* free vregion region */ + vregion_destroy(p->vregion); +#endif /* now free the pipeline */ rfree(p); diff --git a/src/include/module/module/base.h b/src/include/module/module/base.h index 6d7dea657314..ec681fb5f068 100644 --- a/src/include/module/module/base.h +++ b/src/include/module/module/base.h @@ -85,6 +85,12 @@ struct processing_module { struct module_data priv; /**< module private data */ uint32_t period_bytes; /** pipeline period bytes */ + /* virtual region iff not using the parent pipeline region. + * i.e. a DP module in a different memory domain from rest of pipeline. + */ + struct vregion *vregion; + struct comp_dev *dev; + /* * Fields below can only be accessed by the SOF and must be moved to a new structure. * Below #ifdef is a temporary solution used until work on separating a common interface @@ -101,7 +107,6 @@ struct processing_module { * This is a temporary change in order to support the trace messages in the modules. This * will be removed once the trace API is updated. */ - struct comp_dev *dev; uint32_t deep_buff_bytes; /**< copy start threshold */ uint32_t output_buffer_size; /**< size of local buffer to save produced samples */ @@ -192,4 +197,12 @@ struct processing_module { #endif /* SOF_MODULE_PRIVATE */ }; +static inline struct vregion *module_get_vregion(struct processing_module *mod) +{ + if (mod->vregion) + return mod->vregion; + else + return mod->dev->pipeline->vregion; +} + #endif /* __MODULE_MODULE_BASE__ */ diff --git a/src/include/sof/audio/module_adapter/module/generic.h b/src/include/sof/audio/module_adapter/module/generic.h index 535227336b9e..78a6747c8d73 100644 --- a/src/include/sof/audio/module_adapter/module/generic.h +++ b/src/include/sof/audio/module_adapter/module/generic.h @@ -227,6 +227,10 @@ static inline void *mod_zalloc(struct processing_module *mod, size_t size) } int mod_free(struct processing_module *mod, const void *ptr); +void module_adapter_mem_free(struct processing_module *mod); +struct processing_module *module_adapter_mem_alloc(const struct comp_driver *drv, + const struct comp_ipc_config *config, + struct pipeline *pipeline); #if CONFIG_COMP_BLOB struct comp_data_blob_handler *mod_data_blob_handler_new(struct processing_module *mod); void mod_data_blob_handler_free(struct processing_module *mod, struct comp_data_blob_handler *dbh); @@ -235,6 +239,7 @@ void mod_data_blob_handler_free(struct processing_module *mod, struct comp_data_ const void *mod_fast_get(struct processing_module *mod, const void * const dram_ptr, size_t size); void mod_fast_put(struct processing_module *mod, const void *sram_ptr); #endif +void mod_resource_init(struct processing_module *mod); void mod_free_all(struct processing_module *mod); int module_prepare(struct processing_module *mod, struct sof_source **sources, int num_of_sources, @@ -413,7 +418,7 @@ void module_update_buffer_position(struct input_stream_buffer *input_buffers, struct output_stream_buffer *output_buffers, uint32_t frames); -int module_adapter_init_data(struct comp_dev *dev, +int module_adapter_init_data(struct processing_module *mod, struct comp_dev *dev, struct module_config *dst, const struct comp_ipc_config *config, const void *spec); diff --git a/src/include/sof/audio/pipeline.h b/src/include/sof/audio/pipeline.h index 5221d330e0f1..8462135bc3bc 100644 --- a/src/include/sof/audio/pipeline.h +++ b/src/include/sof/audio/pipeline.h @@ -10,6 +10,7 @@ #include #include +#include #include #include #include @@ -64,6 +65,9 @@ struct pipeline { uint32_t time_domain; /**< scheduling time domain */ uint32_t attributes; /**< pipeline attributes from IPC extension msg/ */ + /* pipeline resource management */ + struct vregion *vregion; + /* runtime status */ int32_t xrun_bytes; /* last xrun length */ uint32_t status; /* pipeline status */ diff --git a/src/include/sof/lib/fast-get.h b/src/include/sof/lib/fast-get.h index ffbac19cf1b8..0448b533eed9 100644 --- a/src/include/sof/lib/fast-get.h +++ b/src/include/sof/lib/fast-get.h @@ -10,7 +10,9 @@ #include -const void *fast_get(const void * const dram_ptr, size_t size); -void fast_put(const void *sram_ptr); +struct processing_module; + +const void *fast_get(struct processing_module *mod, const void * const dram_ptr, size_t size); +void fast_put(struct processing_module *mod, const void *sram_ptr); #endif /* __SOF_LIB_FAST_GET_H__ */ diff --git a/src/ipc/ipc4/helper.c b/src/ipc/ipc4/helper.c index 3aee3b69313f..17cb6071abf9 100644 --- a/src/ipc/ipc4/helper.c +++ b/src/ipc/ipc4/helper.c @@ -1179,10 +1179,9 @@ EXPORT_SYMBOL(ipc4_audio_format_to_stream_params); void ipc4_base_module_cfg_to_stream_params(const struct ipc4_base_module_cfg *base_cfg, struct sof_ipc_stream_params *params) { - memset(params, 0, sizeof(struct sof_ipc_stream_params)); - params->buffer.size = base_cfg->obs * 2; - ipc4_audio_format_to_stream_params(&base_cfg->audio_fmt, params); + + params->buffer.size = base_cfg->obs * 2; } EXPORT_SYMBOL(ipc4_base_module_cfg_to_stream_params); diff --git a/src/schedule/zephyr_dp_schedule.c b/src/schedule/zephyr_dp_schedule.c index 6181a34a1a4e..7c70e483e612 100644 --- a/src/schedule/zephyr_dp_schedule.c +++ b/src/schedule/zephyr_dp_schedule.c @@ -391,7 +391,13 @@ static int scheduler_dp_task_free(void *data, struct task *task) #endif /* free task stack */ +#if CONFIG_SOF_VREGIONS + struct vregion *vregion = module_get_vregion(pdata->mod); + vregion_free(vregion, pdata->p_stack); + ret = 0; +#else ret = user_stack_free((__sparse_force void *)pdata->p_stack); +#endif pdata->p_stack = NULL; /* all other memory has been allocated as a single malloc, will be freed later by caller */ @@ -579,7 +585,9 @@ int scheduler_dp_task_init(struct task **task, { void __sparse_cache *p_stack = NULL; struct k_heap *const user_heap = mod->dev->drv->user_heap; - +#if CONFIG_SOF_VREGIONS + struct vregion *vregion = module_get_vregion(mod); +#endif /* memory allocation helper structure */ struct { struct task task; @@ -591,6 +599,21 @@ int scheduler_dp_task_init(struct task **task, /* must be called on the same core the task will be binded to */ assert(cpu_get_id() == core); +#if CONFIG_SOF_VREGIONS + //TODO: add check if vregion is in correct memory domain/coherent + task_memory = vregion_alloc_align(vregion, VREGION_MEM_TYPE_LIFETIME_SHARED, + sizeof(*task_memory), CONFIG_DCACHE_LINE_SIZE); + if (!task_memory) { + tr_err(&dp_tr, "vregion task memory alloc failed"); + return -ENOMEM; + } + p_stack = vregion_alloc_align(vregion, VREGION_MEM_TYPE_LIFETIME, + stack_size, CONFIG_DCACHE_LINE_SIZE); + if (!p_stack) { + tr_err(&dp_tr, "vregion stack alloc failed"); + return -ENOMEM; + } +#else /* * allocate memory * to avoid multiple malloc operations allocate all required memory as a single structure @@ -613,7 +636,7 @@ int scheduler_dp_task_init(struct task **task, ret = -ENOMEM; goto err; } - +#endif /* CONFIG_SOF_VREGION */ /* internal SOF task init */ ret = schedule_task_init(&task_memory->task, uid, SOF_SCHEDULE_DP, 0, ops->run, mod, core, options); diff --git a/test/cmocka/src/audio/eq_fir/CMakeLists.txt b/test/cmocka/src/audio/eq_fir/CMakeLists.txt index 305a6846966c..90eeff6cb80e 100644 --- a/test/cmocka/src/audio/eq_fir/CMakeLists.txt +++ b/test/cmocka/src/audio/eq_fir/CMakeLists.txt @@ -24,6 +24,8 @@ add_library(audio_for_eq_fir STATIC ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module_adapter.c ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module_adapter_ipc3.c ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/generic.c + ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/memory-common.c + ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/memory-heap.c ${PROJECT_SOURCE_DIR}/src/audio/buffers/comp_buffer.c ${PROJECT_SOURCE_DIR}/src/audio/buffers/audio_buffer.c ${PROJECT_SOURCE_DIR}/src/audio/source_api_helper.c diff --git a/test/cmocka/src/audio/eq_iir/CMakeLists.txt b/test/cmocka/src/audio/eq_iir/CMakeLists.txt index aa704a1af92b..a0af9662694a 100644 --- a/test/cmocka/src/audio/eq_iir/CMakeLists.txt +++ b/test/cmocka/src/audio/eq_iir/CMakeLists.txt @@ -27,6 +27,8 @@ add_library(audio_for_eq_iir STATIC ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module_adapter.c ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module_adapter_ipc3.c ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/generic.c + ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/memory-common.c + ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/memory-heap.c ${PROJECT_SOURCE_DIR}/src/audio/buffers/comp_buffer.c ${PROJECT_SOURCE_DIR}/src/audio/buffers/audio_buffer.c ${PROJECT_SOURCE_DIR}/src/audio/source_api_helper.c diff --git a/test/cmocka/src/audio/mixer/CMakeLists.txt b/test/cmocka/src/audio/mixer/CMakeLists.txt index ea8cad0bd79e..ef80cf741a27 100644 --- a/test/cmocka/src/audio/mixer/CMakeLists.txt +++ b/test/cmocka/src/audio/mixer/CMakeLists.txt @@ -13,6 +13,8 @@ cmocka_test(mixer ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module_adapter.c ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module_adapter_ipc3.c ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/generic.c + ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/memory-common.c + ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/memory-heap.c ${PROJECT_SOURCE_DIR}/src/audio/buffers/comp_buffer.c ${PROJECT_SOURCE_DIR}/src/audio/buffers/audio_buffer.c ${PROJECT_SOURCE_DIR}/src/audio/source_api_helper.c diff --git a/test/cmocka/src/audio/mixer/mixer_test.c b/test/cmocka/src/audio/mixer/mixer_test.c index bf6a7ad6b48d..fbea57d4afd9 100644 --- a/test/cmocka/src/audio/mixer/mixer_test.c +++ b/test/cmocka/src/audio/mixer/mixer_test.c @@ -80,7 +80,7 @@ static int test_setup(void **state) module_adapter_test_setup(test_data); mod_data = &test_data->mod->priv; - md = test_malloc(sizeof(*md)); + md = mod_alloc(test_data->mod, sizeof(*md)); mod_data->private = md; md->mix_func = mixer_get_processing_function(test_data->mod->dev, test_data->sinks[0]); @@ -95,7 +95,7 @@ static int test_teardown(void **state) struct processing_module_test_data *test_data = *state; struct mixer_data *md = module_get_private_data(test_data->mod); - test_free(md); + mod_free(test_data->mod, md); module_adapter_test_free(test_data); test_free(test_data); diff --git a/test/cmocka/src/audio/module_adapter_test.c b/test/cmocka/src/audio/module_adapter_test.c index 81ec6cadc1b7..127091271624 100644 --- a/test/cmocka/src/audio/module_adapter_test.c +++ b/test/cmocka/src/audio/module_adapter_test.c @@ -29,13 +29,14 @@ int module_adapter_test_setup(struct processing_module_test_data *test_data) dev->frames = parameters->frames; mod->dev = dev; dev->mod = mod; + mod_resource_init(mod); - test_data->sinks = test_calloc(test_data->num_sinks, sizeof(struct comp_buffer *)); - test_data->sources = test_calloc(test_data->num_sources, sizeof(struct comp_buffer *)); + test_data->sinks = mod_alloc(mod, test_data->num_sinks * sizeof(struct comp_buffer *)); + test_data->sources = mod_alloc(mod, test_data->num_sources * sizeof(struct comp_buffer *)); - test_data->input_buffers = test_calloc(test_data->num_sources, + test_data->input_buffers = mod_alloc(mod, test_data->num_sources * sizeof(struct input_stream_buffer *)); - test_data->output_buffers = test_calloc(test_data->num_sinks, + test_data->output_buffers = mod_alloc(mod, test_data->num_sinks * sizeof(struct output_stream_buffer *)); list_init(&dev->bsource_list); @@ -48,7 +49,7 @@ int module_adapter_test_setup(struct processing_module_test_data *test_data) for (i = 0; i < test_data->num_sinks; i++) { test_data->sinks[i] = create_test_sink(dev, 0, parameters->sink_format, parameters->channels, size); - test_data->output_buffers[i] = test_malloc(sizeof(struct output_stream_buffer)); + test_data->output_buffers[i] = mod_alloc(mod, sizeof(struct output_stream_buffer)); test_data->output_buffers[i]->data = &test_data->sinks[i]->stream; } @@ -58,7 +59,7 @@ int module_adapter_test_setup(struct processing_module_test_data *test_data) for (i = 0; i < test_data->num_sources; i++) { test_data->sources[i] = create_test_source(dev, 0, parameters->source_format, parameters->channels, size); - test_data->input_buffers[i] = test_malloc(sizeof(struct input_stream_buffer)); + test_data->input_buffers[i] = mod_alloc(mod, sizeof(struct input_stream_buffer)); test_data->input_buffers[i]->data = &test_data->sources[i]->stream; } @@ -69,22 +70,24 @@ int module_adapter_test_setup(struct processing_module_test_data *test_data) void module_adapter_test_free(struct processing_module_test_data *test_data) { + struct processing_module *mod = test_data->mod; int i; for (i = 0; i < test_data->num_sinks; i++) { free_test_sink(test_data->sinks[i]); - test_free(test_data->output_buffers[i]); + mod_free(mod, test_data->output_buffers[i]); } for (i = 0; i < test_data->num_sources; i++) { free_test_source(test_data->sources[i]); - test_free(test_data->input_buffers[i]); + mod_free(mod, test_data->input_buffers[i]); } - test_free(test_data->input_buffers); - test_free(test_data->output_buffers); - test_free(test_data->sinks); - test_free(test_data->sources); + mod_free(mod, test_data->input_buffers); + mod_free(mod, test_data->output_buffers); + mod_free(mod, test_data->sinks); + mod_free(mod, test_data->sources); + mod_free_all(mod); test_free(test_data->mod->dev); test_free(test_data->mod); } diff --git a/test/cmocka/src/audio/mux/CMakeLists.txt b/test/cmocka/src/audio/mux/CMakeLists.txt index 67b10f77270d..827a2aef740c 100644 --- a/test/cmocka/src/audio/mux/CMakeLists.txt +++ b/test/cmocka/src/audio/mux/CMakeLists.txt @@ -29,6 +29,8 @@ add_library( ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module_adapter.c ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module_adapter_ipc3.c ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/generic.c + ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/memory-common.c + ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/memory-heap.c ) sof_append_relative_path_definitions(audio_mux) diff --git a/test/cmocka/src/audio/mux/demux_copy.c b/test/cmocka/src/audio/mux/demux_copy.c index 8e331446bbf4..37b1ff407864 100644 --- a/test/cmocka/src/audio/mux/demux_copy.c +++ b/test/cmocka/src/audio/mux/demux_copy.c @@ -188,8 +188,8 @@ static int teardown_test_case(void **state) struct test_data *td = *((struct test_data **)state); int i; - rfree(td->mod->input_buffers); - rfree(td->mod->output_buffers); + mod_free(td->mod, td->mod->input_buffers); + mod_free(td->mod, td->mod->output_buffers); free_test_source(td->source); diff --git a/test/cmocka/src/audio/mux/mux_copy.c b/test/cmocka/src/audio/mux/mux_copy.c index 66b21b0df27c..bfd2398251aa 100644 --- a/test/cmocka/src/audio/mux/mux_copy.c +++ b/test/cmocka/src/audio/mux/mux_copy.c @@ -211,8 +211,8 @@ static int teardown_test_case(void **state) struct test_data *td = *((struct test_data **)state); int i; - rfree(td->mod->input_buffers); - rfree(td->mod->output_buffers); + mod_free(td->mod, td->mod->input_buffers); + mod_free(td->mod, td->mod->output_buffers); for (i = 0; i < MUX_MAX_STREAMS; ++i) free_test_source(td->sources[i]); diff --git a/test/cmocka/src/audio/pipeline/pipeline_connection_mocks.c b/test/cmocka/src/audio/pipeline/pipeline_connection_mocks.c index de05f8e3b284..04fd5b810c5b 100644 --- a/test/cmocka/src/audio/pipeline/pipeline_connection_mocks.c +++ b/test/cmocka/src/audio/pipeline/pipeline_connection_mocks.c @@ -8,6 +8,7 @@ #include "pipeline_connection_mocks.h" extern struct schedulers *schedulers; +struct schedule_data *sch; struct scheduler_ops schedule_mock_ops = { .schedule_task_free = &schedule_task_mock_free, @@ -42,7 +43,7 @@ struct pipeline_connect_data *get_standard_connect_objects(void) schedulers = calloc(sizeof(struct schedulers), 1); list_init(&schedulers->list); - struct schedule_data *sch = calloc(sizeof(struct schedule_data), 1); + sch = calloc(sizeof(struct schedule_data), 1); list_init(&sch->list); sch->type = SOF_SCHEDULE_EDF; @@ -93,4 +94,6 @@ void free_standard_connect_objects(struct pipeline_connect_data *data) free(data->second); free(data->b1); free(data->b2); + free(sch); + free(schedulers); } diff --git a/test/cmocka/src/audio/volume/CMakeLists.txt b/test/cmocka/src/audio/volume/CMakeLists.txt index d89927578222..bae97abb6035 100644 --- a/test/cmocka/src/audio/volume/CMakeLists.txt +++ b/test/cmocka/src/audio/volume/CMakeLists.txt @@ -23,6 +23,8 @@ add_library(audio_for_volume STATIC ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module_adapter.c ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module_adapter_ipc3.c ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/generic.c + ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/memory-common.c + ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/memory-heap.c ${PROJECT_SOURCE_DIR}/src/audio/buffers/comp_buffer.c ${PROJECT_SOURCE_DIR}/src/audio/buffers/audio_buffer.c ${PROJECT_SOURCE_DIR}/src/audio/source_api_helper.c @@ -40,6 +42,7 @@ add_library(audio_for_volume STATIC ${PROJECT_SOURCE_DIR}/src/audio/pipeline/pipeline-schedule.c ${PROJECT_SOURCE_DIR}/src/audio/pipeline/pipeline-stream.c ${PROJECT_SOURCE_DIR}/src/audio/pipeline/pipeline-xrun.c + ${PROJECT_SOURCE_DIR}/src/audio/data_blob.c ${PROJECT_SOURCE_DIR}/src/audio/component.c ${PROJECT_SOURCE_DIR}/src/math/numbers.c ) diff --git a/test/cmocka/src/lib/fast-get/CMakeLists.txt b/test/cmocka/src/lib/fast-get/CMakeLists.txt index 640f821cf4f8..0b401c237a63 100644 --- a/test/cmocka/src/lib/fast-get/CMakeLists.txt +++ b/test/cmocka/src/lib/fast-get/CMakeLists.txt @@ -2,9 +2,39 @@ cmocka_test(fast-get-tests fast-get-tests.c + ${PROJECT_SOURCE_DIR}/src/audio/volume/volume.c + ${PROJECT_SOURCE_DIR}/src/audio/volume/volume_ipc3.c + ${PROJECT_SOURCE_DIR}/src/audio/volume/volume_generic.c + ${PROJECT_SOURCE_DIR}/test/cmocka/src/audio/module_adapter_test.c ${PROJECT_SOURCE_DIR}/zephyr/lib/fast-get.c ${PROJECT_SOURCE_DIR}/src/lib/alloc.c ${PROJECT_SOURCE_DIR}/src/platform/library/lib/memory.c + ${PROJECT_SOURCE_DIR}/src/audio/component.c + ${PROJECT_SOURCE_DIR}/src/audio/buffers/comp_buffer.c + ${PROJECT_SOURCE_DIR}/src/audio/buffers/audio_buffer.c + ${PROJECT_SOURCE_DIR}/src/audio/source_api_helper.c + ${PROJECT_SOURCE_DIR}/src/audio/sink_api_helper.c + ${PROJECT_SOURCE_DIR}/src/audio/sink_source_utils.c + ${PROJECT_SOURCE_DIR}/src/audio/audio_stream.c + ${PROJECT_SOURCE_DIR}/src/module/audio/source_api.c + ${PROJECT_SOURCE_DIR}/src/module/audio/sink_api.c + ${PROJECT_SOURCE_DIR}/src/math/numbers.c + ${PROJECT_SOURCE_DIR}/src/ipc/ipc3/helper.c + ${PROJECT_SOURCE_DIR}/src/ipc/ipc-helper.c + ${PROJECT_SOURCE_DIR}/src/ipc/ipc-common.c + ${PROJECT_SOURCE_DIR}/test/cmocka/src/notifier_mocks.c + ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module_adapter.c + ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module_adapter_ipc3.c + ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/generic.c + ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/memory-common.c + ${PROJECT_SOURCE_DIR}/src/audio/module_adapter/module/memory-heap.c + ${PROJECT_SOURCE_DIR}/src/audio/data_blob.c + ${PROJECT_SOURCE_DIR}/src/audio/pipeline/pipeline-graph.c + ${PROJECT_SOURCE_DIR}/src/audio/pipeline/pipeline-params.c + ${PROJECT_SOURCE_DIR}/src/audio/pipeline/pipeline-schedule.c + ${PROJECT_SOURCE_DIR}/src/audio/pipeline/pipeline-stream.c + ${PROJECT_SOURCE_DIR}/src/audio/pipeline/pipeline-xrun.c ) target_link_libraries(fast-get-tests PRIVATE "-Wl,--wrap=rzalloc,--wrap=rmalloc,--wrap=rfree") +target_include_directories(fast-get-tests PRIVATE ${PROJECT_SOURCE_DIR}/src/audio) \ No newline at end of file diff --git a/test/cmocka/src/lib/fast-get/fast-get-tests.c b/test/cmocka/src/lib/fast-get/fast-get-tests.c index b7041abd827b..705be51b71b6 100644 --- a/test/cmocka/src/lib/fast-get/fast-get-tests.c +++ b/test/cmocka/src/lib/fast-get/fast-get-tests.c @@ -4,12 +4,19 @@ // // Author: Jyri Sarha +#include "../../util.h" +#include "../../audio/module_adapter.h" + +#include +#include +#include #include #include #include #include #include #include +#include #include #include @@ -66,65 +73,116 @@ static const int testdata[33][100] = { { 33 }, }; +struct test_data { + struct comp_dev *dev; + struct processing_module *mod; + struct comp_data *cd; + struct processing_module_test_data *vol_state; +}; + +struct test_data _td; + +static int setup(void **state) +{ + struct test_data *td = &_td; + struct module_data *md; + struct vol_data *cd; + + /* allocate new state */ + td->vol_state = test_malloc(sizeof(struct processing_module_test_data)); + td->vol_state->num_sources = 1; + td->vol_state->num_sinks = 1; + module_adapter_test_setup(td->vol_state); + + /* allocate and set new data */ + cd = test_malloc(sizeof(*cd)); + md = &td->vol_state->mod->priv; + md->private = cd; + cd->is_passthrough = false; + td->dev = td->vol_state->mod->dev; + td->mod = td->vol_state->mod; + + /* malloc memory to store current volume 4 times to ensure the address + * is 8-byte aligned for multi-way xtensa intrinsic operations. + */ + const size_t vol_size = sizeof(int32_t) * SOF_IPC_MAX_CHANNELS * 4; + + cd->vol = test_malloc(vol_size); + cd->ramp_type = SOF_VOLUME_LINEAR; + *state = td; + + return 0; +} + +static int teardown(void **state) +{ + struct test_data *td = &_td; + struct vol_data *cd = module_get_private_data(td->vol_state->mod); + + test_free(cd->vol); + test_free(cd); + module_adapter_test_free(td->vol_state); + test_free(td->vol_state); + + return 0; +} + static void test_simple_fast_get_put(void **state) { + struct test_data *td = *((struct test_data **)state); const void *ret; - (void)state; /* unused */ - - ret = fast_get(testdata[0], sizeof(testdata[0])); + ret = fast_get(td->mod, testdata[0], sizeof(testdata[0])); assert(ret); assert(!memcmp(ret, testdata[0], sizeof(testdata[0]))); - fast_put(ret); + fast_put(td->mod, ret); } static void test_fast_get_size_missmatch_test(void **state) { + struct test_data *td = *((struct test_data **)state); const void *ret[2]; - (void)state; /* unused */ - - ret[0] = fast_get(testdata[0], sizeof(testdata[0])); + ret[0] = fast_get(td->mod, testdata[0], sizeof(testdata[0])); assert(ret[0]); assert(!memcmp(ret[0], testdata[0], sizeof(testdata[0]))); - ret[1] = fast_get(testdata[0], sizeof(testdata[0]) + 1); + /* this test is designed to test size mismatch handling */ + ret[1] = fast_get(td->mod, testdata[0], sizeof(testdata[0]) + 1); assert(!ret[1]); - - fast_put(ret); + fast_put(td->mod, ret); } static void test_over_32_fast_gets_and_puts(void **state) { + struct test_data *td = *((struct test_data **)state); const void *copy[ARRAY_SIZE(testdata)]; int i; - (void)state; /* unused */ - for (i = 0; i < ARRAY_SIZE(copy); i++) - copy[i] = fast_get(testdata[i], sizeof(testdata[0])); + copy[i] = fast_get(td->mod, testdata[i], sizeof(testdata[i])); for (i = 0; i < ARRAY_SIZE(copy); i++) - assert(!memcmp(copy[i], testdata[i], sizeof(testdata[0]))); + assert(!memcmp(copy[i], testdata[i], sizeof(testdata[i]))); for (i = 0; i < ARRAY_SIZE(copy); i++) - fast_put(copy[i]); + fast_put(td->mod, copy[i]); } static void test_fast_get_refcounting(void **state) { + struct test_data *td = *((struct test_data **)state); const void *copy[2][ARRAY_SIZE(testdata)]; int i; - (void)state; /* unused */ for (i = 0; i < ARRAY_SIZE(copy[0]); i++) - copy[0][i] = fast_get(testdata[i], sizeof(testdata[0])); + copy[0][i] = fast_get(td->mod, testdata[i], sizeof(testdata[0])); for (i = 0; i < ARRAY_SIZE(copy[0]); i++) - copy[1][i] = fast_get(testdata[i], sizeof(testdata[0])); + copy[1][i] = fast_get(td->mod, testdata[i], sizeof(testdata[0])); for (i = 0; i < ARRAY_SIZE(copy[0]); i++) assert(copy[0][i] == copy[1][i]); @@ -133,18 +191,18 @@ static void test_fast_get_refcounting(void **state) assert(!memcmp(copy[0][i], testdata[i], sizeof(testdata[0]))); for (i = 0; i < ARRAY_SIZE(copy[0]); i++) - fast_put(copy[0][i]); + fast_put(td->mod, copy[0][i]); for (i = 0; i < ARRAY_SIZE(copy[0]); i++) assert(!memcmp(copy[1][i], testdata[i], sizeof(testdata[0]))); for (i = 0; i < ARRAY_SIZE(copy[0]); i++) - fast_put(copy[1][i]); + fast_put(td->mod, copy[1][i]); } int main(void) { - const struct CMUnitTest tests[] = { + struct CMUnitTest tests[] = { cmocka_unit_test(test_simple_fast_get_put), cmocka_unit_test(test_fast_get_size_missmatch_test), cmocka_unit_test(test_over_32_fast_gets_and_puts), @@ -153,7 +211,7 @@ int main(void) cmocka_set_message_output(CM_OUTPUT_TAP); - return cmocka_run_group_tests(tests, NULL, NULL); + return cmocka_run_group_tests(tests, setup, teardown); } void *__wrap_rzalloc(uint32_t flags, size_t bytes); diff --git a/test/cmocka/src/notifier_mocks.c b/test/cmocka/src/notifier_mocks.c index bfdb0d488ffe..3f4d19b5289f 100644 --- a/test/cmocka/src/notifier_mocks.c +++ b/test/cmocka/src/notifier_mocks.c @@ -99,4 +99,6 @@ void notifier_unregister_all(void *receiver, void *caller) for (i = 0; i < NOTIFIER_ID_COUNT; i++) notifier_unregister(receiver, caller, i); + free(_notify); + _notify = NULL; } diff --git a/zephyr/CMakeLists.txt b/zephyr/CMakeLists.txt index 4ef481d8c52d..09e3f9897b9a 100644 --- a/zephyr/CMakeLists.txt +++ b/zephyr/CMakeLists.txt @@ -288,9 +288,18 @@ if (CONFIG_SOC_SERIES_INTEL_ADSP_ACE) ) # Sources for virtual heap management - zephyr_library_sources( - lib/regions_mm.c - ) + # Virtual memory support is required and can be enabled with + # either VMH or Virtual pages and regions. + if (CONFIG_SOF_VREGIONS) + zephyr_library_sources( + lib/vpages.c + lib/vregion.c + ) + else() + zephyr_library_sources( + lib/regions_mm.c + ) + endif() zephyr_library_sources_ifdef(CONFIG_CAVS_LPS ${SOF_PLATFORM_PATH}/intel/ace/lps_wait.c diff --git a/zephyr/Kconfig b/zephyr/Kconfig index 20a3b6f173a2..151e7cb8e65d 100644 --- a/zephyr/Kconfig +++ b/zephyr/Kconfig @@ -101,6 +101,30 @@ config SOF_USERSPACE_PROXY It is responsible for forwarding module function calls coming from sof running in kernelspace to the module code executed with user privileges. +config SOF_VPAGE_MAX_ALLOCS + int "Number of virtual memory page allocation elements" + default 128 + help + This setting defines the maximum number of virtual memory page allocation + elements that can be tracked. Each allocation element represents a + contiguous block of virtual memory allocated from the virtual memory + region. Increasing this number allows for more simultaneous page allocations, + but also increases the memory overhead for tracking these allocations. + +config SOF_VREGIONS + bool "Enable virtual memory regions" + default y if ACE + default n + depends on ACE + help + Enable the virtual regions memory allocator for pipeline resource management. + This provides a way to manage memory resources for audio pipelines, + including + 1) multiple pipeline static lifetime allocations. + 2) runtime pipeline allocations. + 3) pipeline shared memory allocations. + 4) module text allocation. + config ZEPHYR_NATIVE_DRIVERS bool "Use Zephyr native drivers" help @@ -186,7 +210,7 @@ config SOF_ZEPHYR_NO_SOF_CLOCK config VIRTUAL_HEAP bool "Use virtual memory heap to allocate a buffers" - default y if ACE + default n if ACE depends on ACE help Enabling this option will use the virtual memory heap allocator to allocate buffers. diff --git a/zephyr/include/sof/lib/vpages.h b/zephyr/include/sof/lib/vpages.h new file mode 100644 index 000000000000..a360c6bf3a8a --- /dev/null +++ b/zephyr/include/sof/lib/vpages.h @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: BSD-3-Clause +// Copyright(c) 2025 Intel Corporation. + +/* Virtual Page Allocator API */ +#ifndef __SOF_LIB_VPAGE_H__ +#define __SOF_LIB_VPAGE_H__ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Allocate virtual pages + * Allocates a specified number of contiguous virtual memory pages by mapping + * physical pages. + * + * @param[in] pages Number of 4kB pages to allocate. + * + * @return Pointer to the allocated virtual memory region, or NULL on failure. + */ +void *alloc_vpages(uint32_t pages); + +/** + * @brief Free virtual pages + * Frees previously allocated virtual memory pages and unmaps them. + * + * @param[in] ptr Pointer to the memory pages to free. + */ +void free_vpages(void *ptr); + +/** + * @brief Initialize virtual page allocator + * + * Initializes a virtual page allocator that manages a virtual memory region + * using a page table and block structures. + * + * @retval 0 if successful. + * @retval -ENOMEM on creation failure. + */ +int init_vpages(void); + +#ifdef __cplusplus +} +#endif + +#endif /* __SOF_LIB_VPAGE_H__ */ diff --git a/zephyr/include/sof/lib/vregion.h b/zephyr/include/sof/lib/vregion.h new file mode 100644 index 000000000000..e659ee40dc79 --- /dev/null +++ b/zephyr/include/sof/lib/vregion.h @@ -0,0 +1,91 @@ +// SPDX-License-Identifier: BSD-3-Clause +// Copyright(c) 2025 Intel Corporation. + +/* Pre Allocated Contiguous Virtual Region */ +#ifndef __SOF_LIB_VREGION_H__ +#define __SOF_LIB_VREGION_H__ + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +struct vregion; + +/** + * @brief Create a new virtual region instance. + * + * Create a new virtual region instance with specified static, dynamic, and shared static sizes + * plus an optional read-only text partition and optional shared static partition. + * Total size is the sum of static, dynamic, shared static, and text sizes. + * + * @param[in] lifetime_size Size of the virtual region lifetime partition. + * @param[in] interim_size Size of the virtual region interim partition. + * @param[in] lifetime_shared_size Size of the virtual region shared lifetime partition. + * @param[in] interim_shared_size Size of the virtual region shared interim partition. + * @param[in] text_size Size of the optional read-only text partition. + * @return struct vregion* Pointer to the new virtual region instance, or NULL on failure. + */ +struct vregion *vregion_create(size_t lifetime_size, size_t interim_size, + size_t lifetime_shared_size, size_t interim_shared_size, + size_t text_size); + +/** + * @brief Destroy a virtual region instance. + * + * Free all associated resources and deallocate the virtual region instance. + * + * @param[in] vr Pointer to the virtual region instance to destroy. + */ +void vregion_destroy(struct vregion *vr); + +/** + * @brief Memory types for virtual region allocations. + * Used to specify the type of memory allocation within a virtual region. + */ +enum vregion_mem_type { + VREGION_MEM_TYPE_INTERIM, /* interim allocation that can be freed */ + VREGION_MEM_TYPE_LIFETIME, /* lifetime allocation */ + VREGION_MEM_TYPE_INTERIM_SHARED, /* shared interim allocation */ + VREGION_MEM_TYPE_LIFETIME_SHARED /* shared lifetime allocation */ +}; +void *vregion_alloc(struct vregion *vr, enum vregion_mem_type type, size_t size); + +/** + * @brief Allocate aligned memory from the specified virtual region. + * + * Allocate aligned memory from the specified virtual region based on the memory type. + * + * @param[in] vr Pointer to the virtual region instance. + * @param[in] type Type of memory to allocate (static, dynamic, or shared static). + * @param[in] size Size of memory to allocate in bytes. + * @param[in] alignment Alignment of memory to allocate in bytes. + * @return void* Pointer to the allocated memory, or NULL on failure. + */ +void *vregion_alloc_align(struct vregion *vr, enum vregion_mem_type type, + size_t size, size_t alignment); + +/** + * @brief Free memory allocated from the specified virtual region. + * + * Free memory previously allocated from the specified virtual region. + * + * @param[in] vr Pointer to the virtual region instance. + * @param[in] ptr Pointer to the memory to free. + */ +void vregion_free(struct vregion *vr, void *ptr); + +/** + * @brief Log virtual region memory usage. + * + * @param[in] vr Pointer to the virtual region instance. + */ +void vregion_info(struct vregion *vr); + +#ifdef __cplusplus +} +#endif + +#endif /* __SOF_LIB_VREGION_H__ */ diff --git a/zephyr/include/sof/trace/trace.h b/zephyr/include/sof/trace/trace.h index 656b859fb514..b141ce540da0 100644 --- a/zephyr/include/sof/trace/trace.h +++ b/zephyr/include/sof/trace/trace.h @@ -15,6 +15,9 @@ #endif +/* writes to FW register that kernel prints on IPC timeout */ +void trace_msg(int msg); + /* printk supports uint64_t so use it until LOG is ready */ #define USE_PRINTK 1 diff --git a/zephyr/lib/fast-get.c b/zephyr/lib/fast-get.c index 3be9475ff90c..009f2e917bd7 100644 --- a/zephyr/lib/fast-get.c +++ b/zephyr/lib/fast-get.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -35,7 +36,7 @@ static struct sof_fast_get_data fast_get_data = { LOG_MODULE_REGISTER(fast_get, CONFIG_SOF_LOG_LEVEL); -static int fast_get_realloc(struct sof_fast_get_data *data) +static int fast_get_realloc(struct processing_module *mod, struct sof_fast_get_data *data) { struct sof_fast_get_entry *entries; /* @@ -45,15 +46,16 @@ static int fast_get_realloc(struct sof_fast_get_data *data) const unsigned int init_n_entries = 8; unsigned int n_entries = data->num_entries ? data->num_entries * 2 : init_n_entries; - entries = rzalloc(SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, - n_entries * sizeof(*entries)); + entries = mod_alloc_ext(mod, SOF_MEM_FLAG_USER | SOF_MEM_FLAG_COHERENT, + n_entries * sizeof(*entries), 0); if (!entries) return -ENOMEM; + memset(entries, 0, n_entries * sizeof(*entries)); if (data->num_entries) { memcpy_s(entries, n_entries * sizeof(*entries), data->entries, data->num_entries * sizeof(*entries)); - rfree(data->entries); + mod_free(mod, data->entries); } data->entries = entries; @@ -80,7 +82,7 @@ static struct sof_fast_get_entry *fast_get_find_entry(struct sof_fast_get_data * return NULL; } -const void *fast_get(const void *dram_ptr, size_t size) +const void *fast_get(struct processing_module *mod, const void *dram_ptr, size_t size) { struct sof_fast_get_data *data = &fast_get_data; struct sof_fast_get_entry *entry; @@ -91,7 +93,7 @@ const void *fast_get(const void *dram_ptr, size_t size) do { entry = fast_get_find_entry(data, dram_ptr); if (!entry) { - if (fast_get_realloc(data)) { + if (fast_get_realloc(mod, data)) { ret = NULL; goto out; } @@ -116,7 +118,7 @@ const void *fast_get(const void *dram_ptr, size_t size) goto out; } - ret = rmalloc(SOF_MEM_FLAG_USER, size); + ret = mod_alloc_ext(mod, SOF_MEM_FLAG_USER, size, DCACHE_LINE_SIZE); if (!ret) goto out; entry->size = size; @@ -146,7 +148,7 @@ static struct sof_fast_get_entry *fast_put_find_entry(struct sof_fast_get_data * return NULL; } -void fast_put(const void *sram_ptr) +void fast_put(struct processing_module *mod, const void *sram_ptr) { struct sof_fast_get_data *data = &fast_get_data; struct sof_fast_get_entry *entry; @@ -160,7 +162,7 @@ void fast_put(const void *sram_ptr) } entry->refcount--; if (!entry->refcount) { - rfree(entry->sram_ptr); + mod_free(mod, entry->sram_ptr); memset(entry, 0, sizeof(*entry)); } out: diff --git a/zephyr/lib/vpages.c b/zephyr/lib/vpages.c new file mode 100644 index 000000000000..9e13efbdbfd6 --- /dev/null +++ b/zephyr/lib/vpages.c @@ -0,0 +1,340 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright(c) 2025 Intel Corporation. + * + * Author: Liam Girdwood + */ + +#include +#include +#include +#include +#include + +LOG_MODULE_REGISTER(vpage, CONFIG_SOF_LOG_LEVEL); + +/* Simple Page Allocator. + * + * This allocator manages the allocation and deallocation of virtual memory pages from + * a predefined virtual memory region which is larger than the physical memory region. + * + * Both memory regions are divided into 4kB pages that are represented as blocks in a + * bitmap using the zephyr sys_mem_blocks API. The virtual block map tracks the allocation + * of virtual memory pages while the physical block map in the Zephyr MM driver tracks + * the allocation of physical memory pages. + */ + +/* max number of allocation elements */ +#define VPAGE_MAX_ALLOCS CONFIG_SOF_VPAGE_MAX_ALLOCS + +/* + * Virtual memory allocation element - tracks allocated virtual page id and size + */ +struct valloc_elem { + uint16_t pages; /* number of 4kB pages allocated in contiguous block */ + uint16_t vpage; /* virtual page number from start of region */ +} __packed; + +/* + * Virtual page table structure + * + * This structure holds all information about virtual memory pages + * including the number of free and total pages, the virtual memory + * region, the block allocator for virtual pages and the allocation + * elements. + */ +struct vpage_context { + struct k_mutex lock; + uint32_t free_pages; /* number of free 4kB pages */ + uint32_t total_pages; /* total number of 4kB pages */ + + /* Virtual memory region information */ + const struct sys_mm_drv_region *virtual_region; + struct sys_mem_blocks vpage_blocks; + + /* allocation elements to track page id to allocation size */ + uint32_t num_elems_in_use; /* number of allocated elements in use*/ + struct valloc_elem velems[VPAGE_MAX_ALLOCS]; +}; + +/* uncache persistent across all cores */ +static struct vpage_context page_context; +static sys_bitarray_t bitmap; + +/* singleton across all cores */ +static int vpage_init_done; + +/** + * @brief Allocate and map virtual memory pages + * + * Allocates memory pages from the virtual page allocator. + * Maps physical memory pages to the virtual region as needed. + * + * @param pages Number of 4kB pages to allocate. + * @param ptr Pointer to store the address of allocated pages. + * @retval 0 if successful. + */ +static int vpages_alloc_and_map(uint32_t pages, void **ptr) +{ + void *vaddr; + int ret; + + /* check for valid pages and ptr */ + if (!pages || !ptr) + return -EINVAL; + + *ptr = NULL; + + /* quick check for enough free pages */ + if (page_context.free_pages < pages) { + LOG_ERR("error: not enough free pages %d for requested pages %d", + page_context.free_pages, pages); + return -ENOMEM; + } + + /* check for allocation elements */ + if (page_context.num_elems_in_use >= VPAGE_MAX_ALLOCS) { + LOG_ERR("error: max allocation elements reached"); + return -ENOMEM; + } + + /* allocate virtual continuous blocks */ + ret = sys_mem_blocks_alloc_contiguous(&page_context.vpage_blocks, pages, &vaddr); + if (ret < 0) { + LOG_ERR("error: failed to allocate %d continuous virtual pages, free %d", + pages, page_context.free_pages); + return ret; + } + + /* map the virtual blocks in virtual region to free physical blocks */ + ret = sys_mm_drv_map_region_safe(page_context.virtual_region, vaddr, + 0, pages * CONFIG_MM_DRV_PAGE_SIZE, SYS_MM_MEM_PERM_RW); + if (ret < 0) { + LOG_ERR("error: failed to map virtual region %p to physical region %p, error %d", + vaddr, page_context.virtual_region->addr, ret); + sys_mem_blocks_free(&page_context.vpage_blocks, pages, &vaddr); + return ret; + } + + /* success update the free pages */ + page_context.free_pages -= pages; + + /* store the size and virtual page number in first free alloc element, + * we have already checked for a free element before the mapping. + */ + for (int i = 0; i < VPAGE_MAX_ALLOCS; i++) { + if (page_context.velems[i].pages == 0) { + page_context.velems[i].pages = pages; + page_context.velems[i].vpage = + (POINTER_TO_UINT(vaddr) - + POINTER_TO_UINT(page_context.vpage_blocks.buffer)) / + CONFIG_MM_DRV_PAGE_SIZE; + page_context.num_elems_in_use++; + break; + } + } + + /* return the virtual address */ + *ptr = vaddr; + return ret; +} + +/** + * @brief Allocate virtual memory pages + * + * Allocates virtual memory pages from the virtual page allocator. + * + * @param pages Number of 4kB pages to allocate. + * @retval NULL on allocation failure. + */ +void *alloc_vpages(uint32_t pages) +{ + void *ptr; + int err; + + k_mutex_lock(&page_context.lock, K_FOREVER); + err = vpages_alloc_and_map(pages, &ptr); + k_mutex_unlock(&page_context.lock); + if (err < 0) { + LOG_ERR("vpage_alloc failed %d for %d pages, total %d free %d", + err, pages, page_context.total_pages, page_context.free_pages); + } + LOG_INF("vpage_alloc ptr %p pages %d free %d/%d", ptr, pages, page_context.free_pages, + page_context.total_pages); + return ptr; +} + +/** + * @brief Free and unmap virtual memory pages + * + * Frees previously allocated virtual memory pages and unmaps them. + * + * @param ptr Pointer to the memory pages to free. + * @retval 0 if successful. + * @retval -EINVAL if ptr is invalid. + */ +static int vpages_free_and_unmap(uintptr_t *ptr) +{ + int pages = 0; + int ret; + + /* check for valid ptr which must be page aligned */ + if (IS_ALIGNED(ptr, CONFIG_MM_DRV_PAGE_SIZE) == 0) { + LOG_ERR("error: invalid non aligned page pointer %p", ptr); + return -EINVAL; + } + + /* find the allocation element */ + for (int i = 0; i < VPAGE_MAX_ALLOCS; i++) { + if (page_context.velems[i].pages > 0 && + page_context.velems[i].vpage == + (POINTER_TO_UINT(ptr) - POINTER_TO_UINT(page_context.vpage_blocks.buffer)) / + CONFIG_MM_DRV_PAGE_SIZE) { + + pages = page_context.velems[i].pages; + + LOG_DBG("found allocation element %d pages %d vpage %d for ptr %p", + i, page_context.velems[i].pages, + page_context.velems[i].vpage, ptr); + + /* clear the element */ + page_context.velems[i].pages = 0; + page_context.velems[i].vpage = 0; + page_context.num_elems_in_use--; + break; + } + } + + /* check we found allocation element */ + if (pages == 0) { + LOG_ERR("error: invalid page pointer %p not found", ptr); + return -EINVAL; + } + + /* unmap the pages from virtual region */ + ret = sys_mm_drv_unmap_region((void *)ptr, pages * CONFIG_MM_DRV_PAGE_SIZE); + if (ret < 0) { + LOG_ERR("error: failed to unmap virtual region %p pages %d, error %d", + ptr, pages, ret); + return ret; + } + + /* free physical blocks */ + ret = sys_mem_blocks_free_contiguous(&page_context.vpage_blocks, ptr, pages); + if (ret < 0) { + LOG_ERR("error: failed to free %d continuous virtual page blocks at %p, error %d", + pages, ptr, ret); + return ret; + } + + /* success update the free pages */ + page_context.free_pages += pages; + return ret; +} + +/** + * @brief Free virtual pages + * Frees previously allocated virtual memory pages and unmaps them. + * + * @param ptr + */ +void free_vpages(void *ptr) +{ + int err; + k_mutex_lock(&page_context.lock, K_FOREVER); + err = vpages_free_and_unmap((uintptr_t *)ptr); + k_mutex_unlock(&page_context.lock); + assert(!err); /* should never fail */ + LOG_INF("vptr %p free/total pages %d/%d", ptr, page_context.free_pages, + page_context.total_pages); +} + +/** + * @brief Initialize virtual page allocator + * + * Initializes a virtual page allocator that manages a virtual memory region + * using a page table and block structures. + * + * @retval 0 if successful. + * @retval -ENOMEM on creation failure. + */ +static int init_vpages(void) +{ + const struct sys_mm_drv_region *virtual_memory_regions; + const struct sys_mm_drv_region *region; + uint32_t *bundles = NULL; + size_t block_count, bitmap_num_bundles; + int ret; + + /* Check if already initialized */ + if (vpage_init_done) + return 0; + + /* create the virtual memory region and add it to the system */ + ret = adsp_add_virtual_memory_region(adsp_mm_get_unused_l2_start_aligned(), + CONFIG_SOF_ZEPHYR_VIRTUAL_HEAP_REGION_SIZE, + VIRTUAL_REGION_SHARED_HEAP_ATTR); + if (ret) + return ret; + + memset(&page_context, 0, sizeof(page_context)); + k_mutex_init(&page_context.lock); + + /* now find the virtual region in all memory regions */ + virtual_memory_regions = sys_mm_drv_query_memory_regions(); + SYS_MM_DRV_MEMORY_REGION_FOREACH(virtual_memory_regions, region) { + if (region->attr == VIRTUAL_REGION_SHARED_HEAP_ATTR) { + page_context.virtual_region = region; + break; + } + } + sys_mm_drv_query_memory_regions_free(virtual_memory_regions); + + /* check for a valid region */ + if (!page_context.virtual_region) { + LOG_ERR("error: no valid virtual region found"); + return -EINVAL; + } + + block_count = region->size / CONFIG_MM_DRV_PAGE_SIZE; + if (block_count == 0) { + LOG_ERR("error: virtual region too small %d", region->size); + return -ENOMEM; + } + page_context.total_pages = block_count; + page_context.free_pages = block_count; + page_context.num_elems_in_use = 0; + + /* bundles are uint32_t of bits */ + bitmap_num_bundles = SOF_DIV_ROUND_UP(block_count, 32); + + /* allocate memory for bitmap bundles */ + bundles = rzalloc(SOF_MEM_FLAG_KERNEL | SOF_MEM_FLAG_COHERENT, + bitmap_num_bundles * sizeof(uint32_t)); + if (!bundles) { + LOG_ERR("error: virtual region bitmap alloc failed"); + return -ENOMEM; + } + + /* Fill allocators data based on config and virtual region data */ + page_context.vpage_blocks.info.num_blocks = block_count; + page_context.vpage_blocks.info.blk_sz_shift = ilog2(CONFIG_MM_DRV_PAGE_SIZE); + /* buffer is the start of the virtual memory region */ + page_context.vpage_blocks.buffer = (uint8_t *)page_context.virtual_region->addr; + + /* initialize bitmap */ + bitmap.num_bits = block_count; + bitmap.num_bundles = bitmap_num_bundles; + bitmap.bundles = bundles; + page_context.vpage_blocks.bitmap = &bitmap; + + LOG_INF("vpage_init region %p size 0x%x pages %d", + (void *)page_context.virtual_region->addr, + (int)page_context.virtual_region->size, block_count); + + vpage_init_done = 1; + return 0; +} + +SYS_INIT(init_vpages, POST_KERNEL, 1); + diff --git a/zephyr/lib/vregion.c b/zephyr/lib/vregion.c new file mode 100644 index 000000000000..f629f3336fbc --- /dev/null +++ b/zephyr/lib/vregion.c @@ -0,0 +1,432 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright(c) 2025 Intel Corporation. + * + * Author: Liam Girdwood + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +LOG_MODULE_REGISTER(vregion, CONFIG_SOF_LOG_LEVEL); + +/* + * Pre Allocated Contiguous Virtual Memory Region Allocator + * + * This allocator manages a pre-allocated virtual memory region that uses the + * virtual page allocator to allocate and free memory pages. + * + * It is designed for use cases where a contiguous virtual memory region + * is required, such as for batched allocation of audio pipelines and modules. + * + * New pipelines will create a new virtual region and will specify the size of the region + * which can be divided into multiple areas for different allocation lifetimes, permissions + * and sharing requirements. + * + * Advantages: + * + * 1) Contiguous virtual memory region for easier management and tracking of + * pipeline & DP module memory. i.e. we just need to track the vregion pointer. + * 2) Easier management of memory protection and sharing between different cores + * and domains by partitioning the virtual region into different areas with + * specific permissions and sharing requirements. + * 3) Reduced fragmentation and better cache utilization by using a simple linear + * allocator for lifetime objects. + * + * Note: Software must pass in the size of the region areas at pipeline creation time. + */ + +/** + * @brief virtual region memory region structure. + * + * This structure represents a virtual memory region, which includes + * information about the base address, size, and allocation status + * of the region. + * + * The virtual region memory region can be partitioned into five main areas on + * page-aligned boundaries (some are optional), listed here from base to top: + * + * 1. Text Region (optional): A read-only and executable region that can be used + * to store code or constant data. This region is optional and only present + * if the virtual region is created with a text size. It is page aligned and located + * at the start of the virtual region. Main use case would be DP module text. + * + * 2. Interim Heap: A interim memory area used for multiple temporary allocations + * and frees over the lifetime of the audio processing pipeline. e.g. for + * module kcontrol derived allocations/frees. + * + * 3. Shared Interim Heap (optional): A interim memory area used for multiple temporary + * allocations/frees that can be shared between multiple cores or memory domains. e.g + * shared buffers between different cores or domains. + * + * 4. Lifetime Allocator: A simple incrementing allocator used for long-term static + * allocations that persist for the lifetime of the audio processing pipeline. This + * allocator compresses allocations for better cache utilization. + * + * 5. Shared Lifetime Allocator (optional): A simple incrementing allocator used for long + * term static allocations that can be shared between multiple cores or domains. This + * allocator aligns allocations to cache line boundaries to ensure cache coherency. + * + * * TODO: Pipeline/module reset() could reset the dynamic heap. + */ + + /* linear heap used for lifetime allocations */ +struct vlinear_heap { + uint8_t *base; /* base address of linear allocator */ + uint8_t *ptr; /* current alloc pointer */ + size_t size; /* size of linear allocator in bytes */ + size_t used; /* used bytes in linear allocator */ + int free_count; /* number of frees - tuning only */ +}; + +/* zephyr k_heap for interim allocations. TODO: make lockless for improved performance */ +struct zephyr_heap { + struct k_heap heap; + uint8_t *base; /* base address of zephyr heap allocator */ + size_t size; /* size of heap in bytes */ +}; + +struct vregion { + /* region context */ + uint8_t *base; /* base address of entire region */ + size_t size; /* size of whole region in bytes */ + size_t pages; /* size of whole region in pages */ + + /* optional text region - RO and Executable */ + struct vlinear_heap text; /* text linear heap */ + + /* interim heap */ + struct zephyr_heap interim; /* interim heap */ + + /* interim shared */ + struct zephyr_heap interim_shared; /* shared interim heap */ + + /* lifetime heap */ + struct vlinear_heap lifetime; /* lifetime linear heap */ + + /* optional shared static buffer heap */ + struct vlinear_heap lifetime_shared; /* shared lifetime linear heap */ +}; + +/** + * @brief Create a new virtual region instance with shared pages. + * + * Create a new VIRTUAL REGION instance with specified static, dynamic, and shared static sizes. + * Total size is the sum of static, dynamic, and shared static sizes. + * + * @param[in] lifetime_size Size of the virtual region lifetime partition. + * @param[in] interim_size Size of the virtual region interim partition. + * @param[in] lifetime_shared_size Size of the virtual region shared lifetime partition. + * @param[in] interim_shared_size Size of the virtual region shared interim partition. + * @param[in] text_size Size of the optional read-only text partition. + * @return struct vregion* Pointer to the new virtual region instance, or NULL on failure. + */ +struct vregion *vregion_create(size_t lifetime_size, size_t interim_size, + size_t lifetime_shared_size, size_t interim_shared_size, + size_t text_size) +{ + struct vregion *vr; + uint32_t pages; + size_t total_size; + uint8_t *vregion_base; + + if (!lifetime_size || !interim_size) { + LOG_ERR("error: invalid vregion lifetime size %d or interim size %d", + lifetime_size, interim_size); + return NULL; + } + + /* + * Align up lifetime sizes and interim sizes to nearest page, the + * vregion structure is stored in lifetime area so account for its size too. + */ + lifetime_size += sizeof(*vr); + lifetime_size = ALIGN_UP(lifetime_size, CONFIG_MM_DRV_PAGE_SIZE); + interim_size = ALIGN_UP(interim_size, CONFIG_MM_DRV_PAGE_SIZE); + lifetime_shared_size = ALIGN_UP(lifetime_shared_size, CONFIG_MM_DRV_PAGE_SIZE); + interim_shared_size = ALIGN_UP(interim_shared_size, CONFIG_MM_DRV_PAGE_SIZE); + text_size = ALIGN_UP(text_size, CONFIG_MM_DRV_PAGE_SIZE); + total_size = lifetime_size + interim_size + + lifetime_shared_size + interim_shared_size + text_size; + + /* allocate pages for vregion */ + pages = total_size / CONFIG_MM_DRV_PAGE_SIZE; + vregion_base = alloc_vpages(pages); + if (!vregion_base) + return NULL; + + /* init vregion - place it at the start of the lifetime region */ + vr = (struct vregion *)(vregion_base + text_size + interim_size); + vr->base = vregion_base; + vr->size = total_size; + vr->pages = pages; + + /* set partition sizes */ + vr->interim.size = interim_size; + vr->interim_shared.size = interim_shared_size; + vr->lifetime.size = lifetime_size; + vr->lifetime_shared.size = lifetime_shared_size; + vr->text.size = text_size; + + /* set base addresses for partitions */ + vr->text.base = vr->base; + vr->interim.base = vr->text.base + text_size; + vr->lifetime.base = vr->interim.base + interim_size; + vr->lifetime_shared.base = vr->lifetime.base + lifetime_size; + vr->interim_shared.base = vr->lifetime_shared.base + lifetime_shared_size; + + /* set alloc ptr addresses for lifetime linear partitions */ + vr->text.ptr = vr->text.base; + vr->lifetime.ptr = vr->lifetime.base + sizeof(*vr); /* skip vregion struct */ + vr->lifetime.used = sizeof(*vr); + vr->lifetime_shared.ptr = vr->lifetime_shared.base; + + /* init interim heaps */ + k_heap_init(&vr->interim.heap, vr->interim.base, interim_size); + if (interim_shared_size) { + k_heap_init(&vr->interim_shared.heap, vr->interim_shared.base, + interim_shared_size); + } + + LOG_INF("new at base %p size 0x%x pages %d struct embedded at %p", + (void *)vr->base, total_size, pages, (void *)vr); + LOG_INF(" interim size 0x%x at %p", interim_size, (void *)vr->interim.base); + LOG_INF(" lifetime size 0x%x at %p", lifetime_size, (void *)vr->lifetime.base); + if (interim_shared_size) + LOG_INF(" interim shared size 0x%x at %p", interim_shared_size, + (void *)vr->interim_shared.base); + if (lifetime_shared_size) + LOG_INF(" lifetime shared size 0x%x at %p", lifetime_shared_size, + (void *)vr->lifetime_shared.base); + if (text_size) + LOG_INF(" text size 0x%x at %p", text_size, (void *)vr->text.base); + + + return vr; +} + +/** + * @brief Destroy a virtual region instance. + * + * @param[in] vr Pointer to the virtual region instance to destroy. + */ +void vregion_destroy(struct vregion *vr) +{ + if (!vr) + return; + + LOG_INF("destroy %p size 0x%x pages %d", + (void *)vr->base, vr->size, vr->pages); + LOG_INF(" lifetime used %d free count %d", + vr->lifetime.used, vr->lifetime.free_count); + if (vr->lifetime_shared.size) + LOG_INF(" lifetime shared used %d free count %d", + vr->lifetime_shared.used, vr->lifetime_shared.free_count); + free_vpages(vr->base); +} + + +/** + * @brief Allocate memory with alignment from the virtual region dynamic heap. + * + * @param[in] heap Pointer to the heap to use. + * @param[in] size Size of the allocation. + * @param[in] align Alignment of the allocation. + * @return void* Pointer to the allocated memory, or NULL on failure. + */ +static void *interim_alloc(struct zephyr_heap *heap, + size_t size, size_t align) +{ + void *ptr; + + ptr = k_heap_aligned_alloc(&heap->heap, size, align, K_FOREVER); + if (!ptr) { + LOG_ERR("error: interim alloc failed for %d bytes align %d", + size, align); + return NULL; + } + + return ptr; +} + +/** + * @brief Free memory from the virtual region interim heap. + * + * @param[in] heap Pointer to the heap to use. + * @param[in] ptr Pointer to the memory to free. + */ +static void interim_free(struct zephyr_heap *heap, void *ptr) +{ + k_heap_free(&heap->heap, ptr); +} + +/** + * @brief Allocate memory from the virtual region lifetime allocator. + * + * @param[in] heap Pointer to the linear heap to use. + * @param[in] size Size of the allocation. + * @param[in] align Alignment of the allocation. + * + * @return void* Pointer to the allocated memory, or NULL on failure. + */ +static void *lifetime_alloc(struct vlinear_heap *heap, + size_t size, size_t align) +{ + void *ptr; + uint8_t *aligned_ptr; + size_t heap_obj_size; + + /* align heap pointer to alignment requested */ + aligned_ptr = UINT_TO_POINTER(ALIGN_UP(POINTER_TO_UINT(heap->ptr), align)); + + /* also align up size to D$ bytes if asked - allocation head and tail aligned */ + if (align == CONFIG_DCACHE_LINE_SIZE) + size = ALIGN_UP(size, CONFIG_DCACHE_LINE_SIZE); + + /* calculate new heap object size for object and alignments */ + heap_obj_size = aligned_ptr - heap->ptr + size; + + /* check we have enough lifetime space left */ + if (heap_obj_size + heap->used > heap->size) { + LOG_ERR("error: lifetime alloc failed for object %d heap %d bytes free %d", + size, heap_obj_size, heap->size - heap->used); + return NULL; + } + + /* allocate memory */ + ptr = aligned_ptr; + heap->ptr += heap_obj_size; + heap->used += heap_obj_size; + + return ptr; +} + +/** + * @brief Free memory from the virtual region lifetime allocator. + * + * @param[in] heap Pointer to the linear heap to use. + * @param[in] ptr Pointer to the memory to free. + */ +static void lifetime_free(struct vlinear_heap *heap, void *ptr) +{ + /* simple free, just increment free count, this is for tuning only */ + heap->free_count++; + + LOG_DBG("lifetime free %p count %d", ptr, heap->free_count); +} + +/** + * @brief Free memory from the virtual region. + * + * @param vr Pointer to the virtual region instance. + * @param ptr Pointer to the memory to free. + */ +void vregion_free(struct vregion *vr, void *ptr) +{ + if (!vr || !ptr) + return; + + /* check if pointer is in interim heap */ + if (ptr >= (void *)vr->interim.base && + ptr < (void *)(vr->interim.base + vr->interim.size)) { + interim_free(&vr->interim, ptr); + return; + } + + /* check if pointer is in interim shared heap */ + if (vr->interim_shared.size && + ptr >= (void *)vr->interim_shared.base && + ptr < (void *)(vr->interim_shared.base + vr->interim_shared.size)) { + interim_free(&vr->interim_shared, ptr); + return; + } + + /* check if pointer is in lifetime heap */ + if (ptr >= (void *)vr->lifetime.base && + ptr < (void *)(vr->lifetime.base + vr->lifetime.size)) { + lifetime_free(&vr->lifetime, ptr); + return; + } + + /* check if pointer is in lifetime shared heap */ + if (vr->lifetime_shared.size && + ptr >= (void *)vr->lifetime_shared.base && + ptr < (void *)(vr->lifetime_shared.base + vr->lifetime_shared.size)) { + lifetime_free(&vr->lifetime_shared, ptr); + return; + } + + LOG_ERR("error: vregion free invalid pointer %p", ptr); +} + +/** + * @brief Allocate memory type from the virtual region. + * + * @param[in] vr Pointer to the virtual region instance. + * @param[in] type Memory type to allocate. + * @param[in] size Size of the allocation. + * @param[in] alignment Alignment of the allocation. + * + * @return void* Pointer to the allocated memory, or NULL on failure. + */ +void *vregion_alloc_align(struct vregion *vr, enum vregion_mem_type type, + size_t size, size_t alignment) +{ + if (!vr || !size) + return NULL; + + if (!alignment) + alignment = 4; /* default align 4 bytes */ + + switch (type) { + case VREGION_MEM_TYPE_INTERIM: + return interim_alloc(&vr->interim, size, alignment); + case VREGION_MEM_TYPE_LIFETIME: + return lifetime_alloc(&vr->lifetime, size, alignment); + case VREGION_MEM_TYPE_INTERIM_SHARED: + return interim_alloc(&vr->interim_shared, size, alignment); + case VREGION_MEM_TYPE_LIFETIME_SHARED: + return lifetime_alloc(&vr->lifetime_shared, size, + MAX(alignment, CONFIG_DCACHE_LINE_SIZE)); + default: + LOG_ERR("error: invalid memory type %d", type); + return NULL; + } +} + +/** + * @brief Allocate memory from the virtual region. + * @param[in] vr Pointer to the virtual region instance. + * @param[in] type Memory type to allocate. + * @param[in] size Size of the allocation. + * @return void* Pointer to the allocated memory, or NULL on failure. + */ +void *vregion_alloc(struct vregion *vr, enum vregion_mem_type type, size_t size) +{ + return vregion_alloc_align(vr, type, size, 0); +} + +/** + * @brief Log virtual region memory usage. + * + * @param[in] vr Pointer to the virtual region instance. + */ +void vregion_info(struct vregion *vr) +{ + if (!vr) + return; + + LOG_INF("base %p size 0x%x pages %d", + (void *)vr->base, vr->size, vr->pages); + LOG_INF("lifetime used 0x%x free count %d", + vr->lifetime.used, vr->lifetime.free_count); + LOG_INF("lifetime shared used 0x%x free count %d", + vr->lifetime_shared.used, vr->lifetime_shared.free_count); +} +EXPORT_SYMBOL(vregion_info); diff --git a/zephyr/wrapper.c b/zephyr/wrapper.c index 2e302f22c94b..f926606df7b2 100644 --- a/zephyr/wrapper.c +++ b/zephyr/wrapper.c @@ -46,6 +46,29 @@ SOF_DEFINE_REG_UUID(zephyr); DECLARE_TR_CTX(zephyr_tr, SOF_UUID(zephyr_uuid), LOG_LEVEL_INFO); +#include +#include +#include +#include +#include +#include +#include + +void trace_msg(int msg) +{ + uint32_t *win; + const struct mem_win_config *config; + const struct device *dev = DEVICE_DT_GET(DT_NODELABEL(mem_window0)); + + if (!device_is_ready(dev)) + return; + + config = dev->config; + + win = sys_cache_uncached_ptr_get((__sparse_force void __sparse_cache *)config->mem_base); + win[0] = msg; +} + /* * Interrupts. *