From 6367f09a9c4774d321afc86cccbb29613889f000 Mon Sep 17 00:00:00 2001 From: Marcin Szkudlinski Date: Tue, 14 Feb 2023 16:30:58 +0100 Subject: [PATCH 1/5] kconfig: add DP_SCHEDULER kconfig def some platforms don't use Zephyr, therefore they can't use DP scheduler. Add a config option Signed-off-by: Marcin Szkudlinski --- app/boards/intel_adsp_ace15_mtpm.conf | 1 + zephyr/Kconfig | 12 ++++++++++++ 2 files changed, 13 insertions(+) diff --git a/app/boards/intel_adsp_ace15_mtpm.conf b/app/boards/intel_adsp_ace15_mtpm.conf index 0ef1fe6975da..e38f16ffb4ff 100644 --- a/app/boards/intel_adsp_ace15_mtpm.conf +++ b/app/boards/intel_adsp_ace15_mtpm.conf @@ -30,6 +30,7 @@ CONFIG_DAI_INTEL_DMIC_NHLT=y CONFIG_DAI_DMIC_HAS_OWNERSHIP=y CONFIG_DAI_DMIC_HAS_MULTIPLE_LINE_SYNC=y CONFIG_DAI_INTEL_SSP=y +CONFIG_ZEPHYR_DP_SCHEDULER=y CONFIG_DMA=y CONFIG_DMA_INTEL_ADSP_GPDMA=y CONFIG_DMA_DW_LLI_POOL_SIZE=50 diff --git a/zephyr/Kconfig b/zephyr/Kconfig index 2534422a25cd..d62c07700741 100644 --- a/zephyr/Kconfig +++ b/zephyr/Kconfig @@ -37,4 +37,16 @@ config DMA_DOMAIN and dma_single_chan in Zephyr once it becomes more stable. +config ZEPHYR_DP_SCHEDULER + bool "use Zephyr thread based DP scheduler" + default y if ACE + default n + depends on IPC_MAJOR_4 + depends on ZEPHYR_SOF_MODULE + help + Enable Data Processing preemptive scheduler based on + Zephyr preemptive threads. + DP modules can be located in dieffrent cores than LL pipeline modules, may have + different tick (i.e. 300ms for speech reccognition, etc.) + endif From b9d1ab7700cdfb17e32ad1a0f9548d639fdea38d Mon Sep 17 00:00:00 2001 From: Marcin Szkudlinski Date: Tue, 14 Feb 2023 11:12:11 +0100 Subject: [PATCH 2/5] scheduling: add Data Processing scheduler type The DP scheduler is a scheduler based on Zephyr preemptible threads. It will start each SOF task as a separate Zephyr thread. At current implementation the scheduler can trigger each task/thread periodically or on demand. TODO: more sophisticated scheduling decisions, with deadline and task budgets calculations. Signed-off-by: Marcin Szkudlinski --- src/include/sof/schedule/dp_schedule.h | 94 +++++++ src/include/sof/schedule/schedule.h | 7 +- src/init/init.c | 7 + src/platform/intel/ace/platform.c | 13 +- src/schedule/zephyr_dp_schedule.c | 362 +++++++++++++++++++++++++ zephyr/CMakeLists.txt | 4 + 6 files changed, 484 insertions(+), 3 deletions(-) create mode 100644 src/include/sof/schedule/dp_schedule.h create mode 100644 src/schedule/zephyr_dp_schedule.c diff --git a/src/include/sof/schedule/dp_schedule.h b/src/include/sof/schedule/dp_schedule.h new file mode 100644 index 000000000000..f22b85c8fb99 --- /dev/null +++ b/src/include/sof/schedule/dp_schedule.h @@ -0,0 +1,94 @@ +/* SPDX-License-Identifier: BSD-3-Clause */ +/* + * Copyright(c) 2023 Intel Corporation. All rights reserved. + * + * Author: Marcin Szkudlinski + */ + +#ifndef __SOF_SCHEDULE_DP_SCHEDULE_H__ +#define __SOF_SCHEDULE_DP_SCHEDULE_H__ + +#include +#include +#include +#include + +/** + * + * DP scheduler is a scheduler that creates a separate preemptible Zephyr thread for each SOF task + * There's only one instance of DP in the system, however, threads can be assigned and pinned + * to any core in the system for its execution, there's no SMP processing. + * + * The task execution may be delayed and task may be re-scheduled periodically + * NOTE: delayed start and rescheduling takes place in sync with LL scheduler, meaning the + * DP scheduler is triggered as the last task of LL running on a primary core. + * That implies a limitation: LL scheduler MUST be running on primary core in order to have + * this feature working. + * It is fine, because rescheduling is a feature used for data processing when a pipeline is + * running. + * + * Other possible usage of DP scheduler is to schedule task with DP_SCHEDULER_RUN_TASK_IMMEDIATELY + * as start parameter. It will force the task to work without any delays and async to LL. + * This kind of scheduling may be used for staring regular zephyr tasks using SOF api + * + * Task run() may return: + * SOF_TASK_STATE_RESCHEDULE - the task will be rescheduled as specified in scheduler period + * note that task won't ever be rescheduled if LL is not running + * SOF_TASK_STATE_COMPLETED - the task will be removed from scheduling, + * calling schedule_task will add the task to processing again + * task_complete() will be called + * SOF_TASK_STATE_CANCEL - the task will be removed from scheduling, + * calling schedule_task will add the task to processing again + * task_complete() won't be called + * other statuses - assert will go off + * + * NOTE: task - means a SOF task + * thread - means a Zephyr preemptible thread + * + * TODO - EDF: + * Threads run on the same priority, lower than thread running LL tasks. Zephyr EDF mechanism + * is used for decision which thread/task is to be scheduled next. The DP scheduler calculates + * the task deadline and set it in Zephyr thread properties, the final scheduling decision is made + * by Zephyr. + * + * Each time tick the scheduler iterates through the list of all active tasks and calculates + * a deadline based on + * - knowledge how the modules are bound + * - declared time required by a task to complete processing + * - the deadline of the last module + * + */ + +/** \brief tell the scheduler to run the task immediately, even if LL tick is not yet running */ +#define SCHEDULER_DP_RUN_TASK_IMMEDIATELY ((uint64_t)-1) + +/** + * \brief Init the Data Processing scheduler + */ +int scheduler_dp_init(void); + +/** + * \brief Set the Data Processing scheduler to be accessible at secondary cores + */ +int scheduler_dp_init_secondary_core(void); + +/** + * \brief initialize a DP task and add it to scheduling + * + * \param[out] task pointer, pointer to allocated task structure will be return + * \param[in] uid pointer to UUID of the task + * \param[in] ops pointer to task functions + * \param[in] data pointer to the thread private data + * \param[in] core CPU the thread should run on + * \param[in] stack_size size of stack for a zephyr task + * \param[in] task_priority priority of the zephyr task + */ +int scheduler_dp_task_init(struct task **task, + const struct sof_uuid_entry *uid, + const struct task_ops *ops, + void *data, + uint16_t core, + size_t stack_size, + uint32_t task_priority); + +#endif /* __SOF_SCHEDULE_DP_SCHEDULE_H__ */ diff --git a/src/include/sof/schedule/schedule.h b/src/include/sof/schedule/schedule.h index 388dc43f5168..e503333942ac 100644 --- a/src/include/sof/schedule/schedule.h +++ b/src/include/sof/schedule/schedule.h @@ -31,6 +31,12 @@ enum { SOF_SCHEDULE_LL_DMA, /**< Low latency DMA, schedules immediately * on scheduling component's DMA interrupt */ + SOF_SCHEDULE_DP, /**< DataProcessing scheduler + * Scheduler based on Zephyr peemptive threads + * TODO: DP will become the Zephyr EDF scheduler type + * and will be unified with SOF_SCHEDULE_EDF for Zephyr builds + * current implementation of Zephyr based EDF is depreciated now + */ SOF_SCHEDULE_COUNT /**< indicates number of scheduler types */ }; @@ -38,7 +44,6 @@ enum { #define SOF_SCHEDULER_FREE_IRQ_ONLY BIT(0) /**< Free function disables only * interrupts */ - /** * Scheduler operations. * diff --git a/src/init/init.c b/src/init/init.c index 63339d2af94d..6e5049210c3c 100644 --- a/src/init/init.c +++ b/src/init/init.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include #include @@ -183,6 +184,12 @@ int secondary_core_init(struct sof *sof) if (dma_domain) scheduler_init_ll(dma_domain); +#if CONFIG_ZEPHYR_DP_SCHEDULER + err = scheduler_dp_init_secondary_core(); + if (err < 0) + return err; +#endif /* CONFIG_ZEPHYR_DP_SCHEDULER */ + /* initialize IDC mechanism */ trace_point(TRACE_BOOT_PLATFORM_IDC); err = idc_init(); diff --git a/src/platform/intel/ace/platform.c b/src/platform/intel/ace/platform.c index 78a4e4f6e52d..4874d832bd9e 100644 --- a/src/platform/intel/ace/platform.c +++ b/src/platform/intel/ace/platform.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -84,9 +85,17 @@ int platform_init(struct sof *sof) trace_point(TRACE_BOOT_PLATFORM_SCHED); scheduler_init_edf(); - /* init low latency timer domain and scheduler */ + /* init low latency timer domain and scheduler. Any failure is fatal */ sof->platform_timer_domain = zephyr_domain_init(PLATFORM_DEFAULT_CLOCK); - scheduler_init_ll(sof->platform_timer_domain); + ret = scheduler_init_ll(sof->platform_timer_domain); + if (ret < 0) + return ret; + +#if CONFIG_ZEPHYR_DP_SCHEDULER + ret = scheduler_dp_init(); + if (ret < 0) + return ret; +#endif /* CONFIG_ZEPHYR_DP_SCHEDULER */ /* init the system agent */ trace_point(TRACE_BOOT_PLATFORM_AGENT); diff --git a/src/schedule/zephyr_dp_schedule.c b/src/schedule/zephyr_dp_schedule.c new file mode 100644 index 000000000000..dfb95e71b148 --- /dev/null +++ b/src/schedule/zephyr_dp_schedule.c @@ -0,0 +1,362 @@ +// SPDX-License-Identifier: BSD-3-Clause +/* + * Copyright(c) 2023 Intel Corporation. All rights reserved. + * + * Author: Marcin Szkudlinski + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +LOG_MODULE_REGISTER(dp_schedule, CONFIG_SOF_LOG_LEVEL); +/* 87858bc2-baa9-40b6-8e4c-2c95ba8b1545 */ +DECLARE_SOF_UUID("dp-schedule", dp_sched_uuid, 0x87858bc2, 0xbaa9, 0x40b6, + 0x8e, 0x4c, 0x2c, 0x95, 0xba, 0x8b, 0x15, 0x45); + +DECLARE_TR_CTX(dp_tr, SOF_UUID(dp_sched_uuid), LOG_LEVEL_INFO); + +struct scheduler_dp_data { + struct list_item tasks; /* list of active dp tasks */ + struct k_spinlock lock; /* synchronization between cores */ +}; + +struct task_dp_pdata { + k_tid_t thread_id; /* zephyr thread ID */ + k_thread_stack_t __sparse_cache *p_stack; /* pointer to thread stack */ + uint32_t ticks_period; /* period the task should be scheduled in LL ticks */ + uint32_t ticks_to_trigger; /* number of ticks the task should be triggered after */ + struct k_sem sem; /* semaphore for task scheduling */ +}; + +/* + * there's only one instance of DP scheduler for all cores + * Keep pointer to it here + */ +static struct scheduler_dp_data *dp_sch; + +static inline k_spinlock_key_t scheduler_dp_lock(void) +{ + return k_spin_lock(&dp_sch->lock); +} + +static inline void scheduler_dp_unlock(k_spinlock_key_t key) +{ + k_spin_unlock(&dp_sch->lock, key); +} + +/* + * function called after every LL tick + * + * TODO: + * the scheduler should here calculate deadlines of all task and tell Zephyr about them + * Currently there's an assumption that the task is always ready to run + */ +void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void *caller_data) +{ + (void)receiver_data; + (void)event_type; + (void)caller_data; + struct list_item *tlist; + struct task *curr_task; + struct task_dp_pdata *pdata; + k_spinlock_key_t lock_key; + + if (cpu_get_id() != PLATFORM_PRIMARY_CORE_ID) + return; + + if (!dp_sch) + return; + + lock_key = scheduler_dp_lock(); + list_for_item(tlist, &dp_sch->tasks) { + curr_task = container_of(tlist, struct task, list); + pdata = curr_task->priv_data; + + if (pdata->ticks_to_trigger == 0) { + if (curr_task->state == SOF_TASK_STATE_QUEUED) { + /* set new trigger time, start the thread */ + pdata->ticks_to_trigger = pdata->ticks_period; + curr_task->state = SOF_TASK_STATE_RUNNING; + k_sem_give(&pdata->sem); + } + } else { + if (curr_task->state == SOF_TASK_STATE_QUEUED || + curr_task->state == SOF_TASK_STATE_RUNNING) + /* decrease num of ticks to re-schedule */ + pdata->ticks_to_trigger--; + } + } + scheduler_dp_unlock(lock_key); +} + +static int scheduler_dp_task_cancel(void *data, struct task *task) +{ + (void)(data); + k_spinlock_key_t lock_key; + + /* this is asyn cancel - mark the task as canceled and remove it from scheduling */ + lock_key = scheduler_dp_lock(); + + task->state = SOF_TASK_STATE_CANCEL; + list_item_del(&task->list); + + scheduler_dp_unlock(lock_key); + + return 0; +} + +static int scheduler_dp_task_free(void *data, struct task *task) +{ + k_spinlock_key_t lock_key; + struct task_dp_pdata *pdata = task->priv_data; + + /* abort the execution of the thread */ + k_thread_abort(pdata->thread_id); + + lock_key = scheduler_dp_lock(); + list_item_del(&task->list); + task->priv_data = NULL; + task->state = SOF_TASK_STATE_FREE; + scheduler_dp_unlock(lock_key); + + /* free task stack */ + rfree(pdata->p_stack); + + /* all other memory has been allocated as a single malloc, will be freed later by caller */ + return 0; +} + +/* Thread function called in component context, on target core */ +static void dp_thread_fn(void *p1, void *p2, void *p3) +{ + struct task *task = p1; + (void)p2; + (void)p3; + struct task_dp_pdata *task_pdata = task->priv_data; + k_spinlock_key_t lock_key; + enum task_state state; + + while (1) { + /* + * the thread is started immediately after creation, it will stop on semaphore + * Semaphore will be released once the task is ready to process + */ + k_sem_take(&task_pdata->sem, K_FOREVER); + + if (task->state == SOF_TASK_STATE_RUNNING) + state = task_run(task); + else + state = task->state; /* to avoid undefined variable warning */ + + lock_key = scheduler_dp_lock(); + /* + * check if task is still running, may have been canceled by external call + * if not, set the state returned by run procedure + */ + if (task->state == SOF_TASK_STATE_RUNNING) { + task->state = state; + switch (state) { + case SOF_TASK_STATE_RESCHEDULE: + /* mark to reschedule, schedule time is already calculated */ + task->state = SOF_TASK_STATE_QUEUED; + break; + + case SOF_TASK_STATE_CANCEL: + case SOF_TASK_STATE_COMPLETED: + /* remove from scheduling */ + list_item_del(&task->list); + break; + + default: + /* illegal state, serious defect, won't happen */ + k_panic(); + } + } + + /* call task_complete */ + if (task->state == SOF_TASK_STATE_COMPLETED) { + /* call task_complete out of lock, it may eventually call schedule again */ + scheduler_dp_unlock(lock_key); + task_complete(task); + } else { + scheduler_dp_unlock(lock_key); + } + + }; + + /* never be here */ +} + +static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t start, + uint64_t period) +{ + struct scheduler_dp_data *sch = data; + struct task_dp_pdata *pdata = task->priv_data; + k_spinlock_key_t lock_key; + + lock_key = scheduler_dp_lock(); + + if (task->state != SOF_TASK_STATE_INIT && + task->state != SOF_TASK_STATE_CANCEL && + task->state != SOF_TASK_STATE_COMPLETED) { + scheduler_dp_unlock(lock_key); + return -EINVAL; + } + + /* calculate period and start time in LL ticks */ + pdata->ticks_period = period / LL_TIMER_PERIOD_US; + + /* add a task to DP scheduler list */ + list_item_prepend(&task->list, &sch->tasks); + + if (start == SCHEDULER_DP_RUN_TASK_IMMEDIATELY) { + /* trigger the task immediately, don't wait for LL tick */ + pdata->ticks_to_trigger = 0; + task->state = SOF_TASK_STATE_RUNNING; + k_sem_give(&pdata->sem); + } else { + /* wait for tick */ + pdata->ticks_to_trigger = start / LL_TIMER_PERIOD_US; + task->state = SOF_TASK_STATE_QUEUED; + } + + scheduler_dp_unlock(lock_key); + + return 0; +} + +static struct scheduler_ops schedule_dp_ops = { + .schedule_task = scheduler_dp_task_shedule, + .schedule_task_cancel = scheduler_dp_task_cancel, + .schedule_task_free = scheduler_dp_task_free, +}; + +int scheduler_dp_init_secondary_core(void) +{ + if (!dp_sch) + return -ENOMEM; + + /* register the scheduler instance for secondary core */ + scheduler_init(SOF_SCHEDULE_DP, &schedule_dp_ops, dp_sch); + + return 0; +} + +int scheduler_dp_init(void) +{ + dp_sch = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*dp_sch)); + if (!dp_sch) + return -ENOMEM; + + list_init(&dp_sch->tasks); + + scheduler_init(SOF_SCHEDULE_DP, &schedule_dp_ops, dp_sch); + + notifier_register(NULL, NULL, NOTIFIER_ID_LL_POST_RUN, scheduler_dp_ll_tick, 0); + + return 0; +} + +int scheduler_dp_task_init(struct task **task, + const struct sof_uuid_entry *uid, + const struct task_ops *ops, + void *data, + uint16_t core, + size_t stack_size, + uint32_t task_priority) +{ + void *p_stack = NULL; + + /* memory allocation helper structure */ + struct { + struct task task; + struct task_dp_pdata pdata; + struct k_thread thread; + } *task_memory; + + k_tid_t thread_id = NULL; + int ret; + + /* + * allocate memory + * to avoid multiple malloc operations allocate all required memory as a single structure + * and return pointer to task_memory->task + */ + task_memory = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(*task_memory)); + if (!task_memory) { + tr_err(&dp_tr, "zephyr_dp_task_init(): memory alloc failed"); + return -ENOMEM; + } + + /* allocate stack - must be aligned so a separate alloc */ + stack_size = Z_KERNEL_STACK_SIZE_ADJUST(stack_size); + p_stack = rballoc_align(0, SOF_MEM_CAPS_RAM, stack_size, Z_KERNEL_STACK_OBJ_ALIGN); + if (!p_stack) { + tr_err(&dp_tr, "zephyr_dp_task_init(): stack alloc failed"); + ret = -ENOMEM; + goto err; + } + + /* create a zephyr thread for the task */ + thread_id = k_thread_create(&task_memory->thread, p_stack, stack_size, dp_thread_fn, + &task_memory->task, NULL, NULL, task_priority, + K_USER, K_FOREVER); + if (!thread_id) { + ret = -EFAULT; + tr_err(&dp_tr, "zephyr_dp_task_init(): zephyr thread create failed"); + goto err; + } + /* pin the thread to specific core */ + ret = k_thread_cpu_pin(thread_id, core); + if (ret < 0) { + ret = -EFAULT; + tr_err(&dp_tr, "zephyr_dp_task_init(): zephyr task pin to core failed"); + goto err; + } + + /* internal SOF task init */ + ret = schedule_task_init(&task_memory->task, uid, SOF_SCHEDULE_DP, 0, ops->run, + data, core, 0); + if (ret < 0) { + tr_err(&dp_tr, "zephyr_dp_task_init(): schedule_task_init failed"); + goto err; + } + + /* initialize other task structures */ + task_memory->task.ops.complete = ops->complete; + task_memory->task.ops.get_deadline = ops->get_deadline; + task_memory->task.state = SOF_TASK_STATE_INIT; + task_memory->task.core = core; + + /* initialize semaprhore */ + k_sem_init(&task_memory->pdata.sem, 0, 1); + + /* success, fill the structures */ + task_memory->task.priv_data = &task_memory->pdata; + task_memory->pdata.thread_id = thread_id; + task_memory->pdata.p_stack = p_stack; + *task = &task_memory->task; + + /* start the thread - it will immediately stop at a semaphore */ + k_thread_start(thread_id); + + return 0; +err: + /* cleanup - free all allocated resources */ + if (thread_id) + k_thread_abort(thread_id); + rfree(p_stack); + rfree(task_memory); + return ret; +} diff --git a/zephyr/CMakeLists.txt b/zephyr/CMakeLists.txt index 904a1cefa426..e950ce6206c3 100644 --- a/zephyr/CMakeLists.txt +++ b/zephyr/CMakeLists.txt @@ -220,6 +220,10 @@ if (CONFIG_ACE_VERSION_1_5) ${SOF_SRC_PATH}/schedule/zephyr_ll.c ) + zephyr_library_sources_ifdef(CONFIG_ZEPHYR_DP_SCHEDULER + ${SOF_SRC_PATH}/schedule/zephyr_dp_schedule.c + ) + # Sources for virtual heap management zephyr_library_sources( lib/regions_mm.c From 3ede75e437d41623d2de33cfaeff5b26a6bafb31 Mon Sep 17 00:00:00 2001 From: Marcin Szkudlinski Date: Tue, 14 Feb 2023 11:20:05 +0100 Subject: [PATCH 3/5] IPC4: add processing domain - LL or DP to component context A component need to keep an information about how it need to be scheduled - as LowLatency or DataProcessing. The information comes from IPC4 init instance message Signed-off-by: Marcin Szkudlinski --- src/include/sof/audio/component.h | 11 ++++++++--- src/ipc/ipc3/helper.c | 1 + src/ipc/ipc4/helper.c | 12 +++++++----- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/include/sof/audio/component.h b/src/include/sof/audio/component.h index 9b1806a48ec8..11dd7d3b5e87 100644 --- a/src/include/sof/audio/component.h +++ b/src/include/sof/audio/component.h @@ -535,6 +535,9 @@ struct comp_driver_info { struct list_item list; /**< list of component drivers */ }; +#define COMP_PROCESSING_DOMAIN_LL 0 +#define COMP_PROCESSING_DOMAIN_DP 1 + /** * Audio component base configuration from IPC at creation. */ @@ -542,6 +545,7 @@ struct comp_ipc_config { uint32_t core; /**< core we run on */ uint32_t id; /**< component id */ uint32_t pipeline_id; /**< component pipeline id */ + uint32_t proc_domain; /**< processing domain - LL or DP */ enum sof_comp_type type; /**< component type */ uint32_t periods_sink; /**< 0 means variable */ uint32_t periods_source;/**< 0 means variable */ @@ -569,9 +573,10 @@ struct comp_dev { * to run component's processing */ - struct task *task; /**< component's processing task used only - * for components running on different core - * than the rest of the pipeline + struct task *task; /**< component's processing task used + * 1) for components running on different core + * than the rest of the pipeline + * 2) for all DP tasks */ uint32_t size; /**< component's allocated size */ uint32_t period; /**< component's processing period */ diff --git a/src/ipc/ipc3/helper.c b/src/ipc/ipc3/helper.c index f06687760b73..116571e13318 100644 --- a/src/ipc/ipc3/helper.c +++ b/src/ipc/ipc3/helper.c @@ -163,6 +163,7 @@ static void comp_common_builder(struct sof_ipc_comp *comp, config->core = comp->core; config->id = comp->id; config->pipeline_id = comp->pipeline_id; + config->proc_domain = COMP_PROCESSING_DOMAIN_LL; config->type = comp->type; /* buffers dont have the following data */ diff --git a/src/ipc/ipc4/helper.c b/src/ipc/ipc4/helper.c index b25ef40e2289..602df444dad4 100644 --- a/src/ipc/ipc4/helper.c +++ b/src/ipc/ipc4/helper.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #ifdef __ZEPHYR__ #include /* for IMR_BOOT_LDR_MANIFEST_BASE */ @@ -97,6 +98,11 @@ struct comp_dev *comp_new_ipc4(struct ipc4_module_init_instance *module_init) ipc_config.pipeline_id = module_init->extension.r.ppl_instance_id; ipc_config.core = module_init->extension.r.core_id; + if (module_init->extension.r.proc_domain) + ipc_config.proc_domain = COMP_PROCESSING_DOMAIN_DP; + else + ipc_config.proc_domain = COMP_PROCESSING_DOMAIN_LL; + dcache_invalidate_region((__sparse_force void __sparse_cache *)MAILBOX_HOSTBOX_BASE, MAILBOX_HOSTBOX_SIZE); @@ -170,11 +176,7 @@ static int ipc4_create_pipeline(struct ipc4_pipeline_create *pipe_desc) } pipe->time_domain = SOF_TIME_DOMAIN_TIMER; - /* 1ms - * TODO: add DP scheduler support. Now only the - * LL scheduler tasks is supported. - */ - pipe->period = 1000; + pipe->period = LL_TIMER_PERIOD_US; /* sched_id is set in FW so initialize it to a invalid value */ pipe->sched_id = 0xFFFFFFFF; From 77c590292dddd86e1d5cf2f35f7ed61871782767 Mon Sep 17 00:00:00 2001 From: Marcin Szkudlinski Date: Tue, 14 Feb 2023 11:41:58 +0100 Subject: [PATCH 4/5] IPC4: add DP domain modules to pipeline scheduling Pipeline creation - create a task for each DP module when started on primary or secondary core - delete a task for each DP module when stopped - don't call comp_copy in LL context for DP modules Signed-off-by: Marcin Szkudlinski --- src/audio/component.c | 5 ++- src/audio/pipeline/pipeline-params.c | 18 ++++++++- src/audio/pipeline/pipeline-schedule.c | 56 +++++++++++++++++++++++++- src/include/sof/audio/component_ext.h | 39 ++++++++++++++++-- src/include/sof/audio/pipeline.h | 11 ++++- src/ipc/ipc4/helper.c | 2 +- 6 files changed, 120 insertions(+), 11 deletions(-) diff --git a/src/audio/component.c b/src/audio/component.c index 295fc890c74e..74501b0fcbdf 100644 --- a/src/audio/component.c +++ b/src/audio/component.c @@ -302,8 +302,9 @@ int comp_copy(struct comp_dev *dev) assert(dev->drv->ops.copy); - /* copy only if we are the owner of the component */ - if (cpu_is_me(dev->ipc_config.core)) { + /* copy only if we are the owner of the LL component */ + if (dev->ipc_config.proc_domain == COMP_PROCESSING_DOMAIN_LL && + cpu_is_me(dev->ipc_config.core)) { #if CONFIG_PERFORMANCE_COUNTERS perf_cnt_init(&dev->pcd); #endif diff --git a/src/audio/pipeline/pipeline-params.c b/src/audio/pipeline/pipeline-params.c index 6f1e883684b9..baab59e2120c 100644 --- a/src/audio/pipeline/pipeline-params.c +++ b/src/audio/pipeline/pipeline-params.c @@ -317,7 +317,23 @@ static int pipeline_comp_prepare(struct comp_dev *current, } } - err = pipeline_comp_task_init(current->pipeline); + switch (current->ipc_config.proc_domain) { + case COMP_PROCESSING_DOMAIN_LL: + /* this is a LL scheduled module */ + err = pipeline_comp_ll_task_init(current->pipeline); + break; + +#if CONFIG_ZEPHYR_DP_SCHEDULER + case COMP_PROCESSING_DOMAIN_DP: + /* this is a DP scheduled module */ + err = pipeline_comp_dp_task_init(current); + break; +#endif /* CONFIG_ZEPHYR_DP_SCHEDULER */ + + default: + err = -EINVAL; + } + if (err < 0) return err; diff --git a/src/audio/pipeline/pipeline-schedule.c b/src/audio/pipeline/pipeline-schedule.c index 6bc010bfa01d..a4a7a71da50f 100644 --- a/src/audio/pipeline/pipeline-schedule.c +++ b/src/audio/pipeline/pipeline-schedule.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -31,6 +32,25 @@ LOG_MODULE_DECLARE(pipe, CONFIG_SOF_LOG_LEVEL); DECLARE_SOF_UUID("pipe-task", pipe_task_uuid, 0xf11818eb, 0xe92e, 0x4082, 0x82, 0xa3, 0xdc, 0x54, 0xc6, 0x04, 0xeb, 0xb3); +#if CONFIG_ZEPHYR_DP_SCHEDULER + +/* ee755917-96b9-4130-b49e-37b9d0501993 */ +DECLARE_SOF_UUID("dp-task", dp_task_uuid, 0xee755917, 0x96b9, 0x4130, + 0xb4, 0x9e, 0x37, 0xb9, 0xd0, 0x50, 0x19, 0x93); + +/** + * current static stack size for each DP component + * TODO: to be taken from module manifest + */ +#define TASK_DP_STACK_SIZE 8192 + +/** + * \brief a priority of the DP threads in the system. + */ +#define ZEPHYR_DP_THREAD_PRIORITY (CONFIG_NUM_PREEMPT_PRIORITIES - 1) + +#endif /* CONFIG_ZEPHYR_DP_SCHEDULER */ + static void pipeline_schedule_cancel(struct pipeline *p) { schedule_task_cancel(p->pipe_task); @@ -333,7 +353,7 @@ void pipeline_schedule_triggered(struct pipeline_walk_context *ctx, irq_local_enable(flags); } -int pipeline_comp_task_init(struct pipeline *p) +int pipeline_comp_ll_task_init(struct pipeline *p) { uint32_t type; @@ -355,6 +375,40 @@ int pipeline_comp_task_init(struct pipeline *p) return 0; } +#if CONFIG_ZEPHYR_DP_SCHEDULER +static enum task_state dp_task_run(void *data) +{ + struct comp_dev *comp = data; + + comp->drv->ops.copy(comp); + return SOF_TASK_STATE_RESCHEDULE; +} + +int pipeline_comp_dp_task_init(struct comp_dev *comp) +{ + int ret; + struct task_ops ops = { + .run = dp_task_run, + .get_deadline = NULL, + .complete = NULL + }; + + if (!comp->task) { + ret = scheduler_dp_task_init(&comp->task, + SOF_UUID(dp_task_uuid), + &ops, + comp, + comp->ipc_config.core, + TASK_DP_STACK_SIZE, + ZEPHYR_DP_THREAD_PRIORITY); + if (ret < 0) + return ret; + } + + return 0; +} +#endif /* CONFIG_ZEPHYR_DP_SCHEDULER */ + void pipeline_comp_trigger_sched_comp(struct pipeline *p, struct comp_dev *comp, struct pipeline_walk_context *ctx) diff --git a/src/include/sof/audio/component_ext.h b/src/include/sof/audio/component_ext.h index e56c33eea1e9..339b479b3ff2 100644 --- a/src/include/sof/audio/component_ext.h +++ b/src/include/sof/audio/component_ext.h @@ -50,8 +50,9 @@ static inline void comp_free(struct comp_dev *dev) { assert(dev->drv->ops.free); - /* free task if shared component */ - if (dev->is_shared && dev->task) { + /* free task if shared component or DP task*/ + if ((dev->is_shared || dev->ipc_config.proc_domain == COMP_PROCESSING_DOMAIN_DP) && + dev->task) { schedule_task_free(dev->task); rfree(dev->task); } @@ -147,13 +148,43 @@ static inline int comp_trigger_remote(struct comp_dev *dev, int cmd) return idc_send_msg(&msg, IDC_BLOCKING); } +static inline int comp_trigger_local(struct comp_dev *dev, int cmd) +{ + int ret; + + ret = dev->drv->ops.trigger(dev, cmd); + + /* start a thread in case of shared component or DP scheduling */ + if (dev->task) { + /* schedule or cancel task */ + switch (cmd) { + case COMP_TRIGGER_START: + case COMP_TRIGGER_RELEASE: + schedule_task(dev->task, 0, dev->period); + break; + case COMP_TRIGGER_XRUN: + case COMP_TRIGGER_PAUSE: + case COMP_TRIGGER_STOP: + schedule_task_cancel(dev->task); + break; + } + } + + return ret; +} + /** See comp_ops::trigger */ static inline int comp_trigger(struct comp_dev *dev, int cmd) { + int ret; assert(dev->drv->ops.trigger); - return (dev->is_shared && !cpu_is_me(dev->ipc_config.core)) ? - comp_trigger_remote(dev, cmd) : dev->drv->ops.trigger(dev, cmd); + if (dev->is_shared && !cpu_is_me(dev->ipc_config.core)) + ret = comp_trigger_remote(dev, cmd); + else + ret = comp_trigger_local(dev, cmd); + + return ret; } /** Runs comp_ops::prepare on the target component's core */ diff --git a/src/include/sof/audio/pipeline.h b/src/include/sof/audio/pipeline.h index be631a8eaed0..d80e4380aa82 100644 --- a/src/include/sof/audio/pipeline.h +++ b/src/include/sof/audio/pipeline.h @@ -340,11 +340,18 @@ static inline bool pipeline_is_this_cpu(struct pipeline *p) } /** - * \brief Free's a pipeline. + * \brief Init an LL task for a pipeline. * \param[in] p pipeline. * \return 0 on success. */ -int pipeline_comp_task_init(struct pipeline *p); +int pipeline_comp_ll_task_init(struct pipeline *p); + +/** + * \brief Init a DP task for a component + * \param[in] comp a component the task is created for + * \return 0 on success. + */ +int pipeline_comp_dp_task_init(struct comp_dev *comp); /** * \brief Free's a pipeline. diff --git a/src/ipc/ipc4/helper.c b/src/ipc/ipc4/helper.c index 602df444dad4..11d9bf784154 100644 --- a/src/ipc/ipc4/helper.c +++ b/src/ipc/ipc4/helper.c @@ -289,7 +289,7 @@ int ipc_pipeline_free(struct ipc *ipc, uint32_t comp_id) return ret; } - /* free buffer and remove from list */ + /* free buffer, delete all tasks and remove from list */ ret = pipeline_free(ipc_pipe->pipeline); if (ret < 0) { tr_err(&ipc_tr, "ipc_pipeline_free(): pipeline_free() failed"); From 3b4f87ae23dc5c0acd31966bb5605bb69d692795 Mon Sep 17 00:00:00 2001 From: Marcin Szkudlinski Date: Tue, 28 Feb 2023 17:40:55 +0100 Subject: [PATCH 5/5] IPC4: Workaround: Lock DP modules to same core as pipeline This commit prevents DP modueles from run on different cores than the pipleine LL modules. This limitation is enforced because of possible cache races in pipeline_for_each_comp() To be removed till safe implementation is ready Signed-off-by: Marcin Szkudlinski --- src/audio/pipeline/pipeline-params.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/src/audio/pipeline/pipeline-params.c b/src/audio/pipeline/pipeline-params.c index baab59e2120c..ecdac1249c78 100644 --- a/src/audio/pipeline/pipeline-params.c +++ b/src/audio/pipeline/pipeline-params.c @@ -326,7 +326,17 @@ static int pipeline_comp_prepare(struct comp_dev *current, #if CONFIG_ZEPHYR_DP_SCHEDULER case COMP_PROCESSING_DOMAIN_DP: /* this is a DP scheduled module */ - err = pipeline_comp_dp_task_init(current); + + /* + * workaround - because of some issues with cache, currently we can allow DP + * modules to run on the same core as LL pipeline only. + * to be removed once buffering is fixed + */ + if (current->pipeline->core != current->ipc_config.core) + err = -EINVAL; + else + err = pipeline_comp_dp_task_init(current); + break; #endif /* CONFIG_ZEPHYR_DP_SCHEDULER */