From a488413a073120af55162b23ac7bbf7a5afdd264 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 17 Jun 2021 12:41:11 +0200 Subject: [PATCH 1/7] schedule: remove an unused "period" parameter When registering scheduling domains period is never used, remove it. Signed-off-by: Guennadi Liakhovetski --- src/include/sof/schedule/ll_schedule_domain.h | 6 +++--- src/schedule/dma_multi_chan_domain.c | 2 +- src/schedule/dma_single_chan_domain.c | 2 +- src/schedule/ll_schedule.c | 3 +-- src/schedule/timer_domain.c | 6 +++--- src/schedule/zephyr_domain.c | 2 +- 6 files changed, 10 insertions(+), 11 deletions(-) diff --git a/src/include/sof/schedule/ll_schedule_domain.h b/src/include/sof/schedule/ll_schedule_domain.h index 06dc980f212b..2eefae6acac3 100644 --- a/src/include/sof/schedule/ll_schedule_domain.h +++ b/src/include/sof/schedule/ll_schedule_domain.h @@ -29,7 +29,7 @@ struct timer; struct ll_schedule_domain_ops { int (*domain_register)(struct ll_schedule_domain *domain, - uint64_t period, struct task *task, + struct task *task, void (*handler)(void *arg), void *arg); int (*domain_unregister)(struct ll_schedule_domain *domain, struct task *task, uint32_t num_tasks); @@ -116,7 +116,7 @@ static inline void domain_clear(struct ll_schedule_domain *domain) } static inline int domain_register(struct ll_schedule_domain *domain, - uint64_t period, struct task *task, + struct task *task, void (*handler)(void *arg), void *arg) { int core = cpu_get_id(); @@ -124,7 +124,7 @@ static inline int domain_register(struct ll_schedule_domain *domain, assert(domain->ops->domain_register); - ret = domain->ops->domain_register(domain, period, task, handler, arg); + ret = domain->ops->domain_register(domain, task, handler, arg); if (!ret) { /* registered one more task, increase the count */ diff --git a/src/schedule/dma_multi_chan_domain.c b/src/schedule/dma_multi_chan_domain.c index 049273210636..997cab3d5e68 100644 --- a/src/schedule/dma_multi_chan_domain.c +++ b/src/schedule/dma_multi_chan_domain.c @@ -97,7 +97,7 @@ static int dma_multi_chan_domain_irq_register(struct dma_domain_data *data, * DMA interrupts (different irq number per DMA channel). */ static int dma_multi_chan_domain_register(struct ll_schedule_domain *domain, - uint64_t period, struct task *task, + struct task *task, void (*handler)(void *arg), void *arg) { struct dma_domain *dma_domain = ll_sch_domain_get_pdata(domain); diff --git a/src/schedule/dma_single_chan_domain.c b/src/schedule/dma_single_chan_domain.c index e61ea6df4e58..816dab8e8ffb 100644 --- a/src/schedule/dma_single_chan_domain.c +++ b/src/schedule/dma_single_chan_domain.c @@ -174,7 +174,7 @@ static void dma_single_chan_domain_irq_unregister(struct dma_domain_data *data) * cores need to be notified. */ static int dma_single_chan_domain_register(struct ll_schedule_domain *domain, - uint64_t period, struct task *task, + struct task *task, void (*handler)(void *arg), void *arg) { diff --git a/src/schedule/ll_schedule.c b/src/schedule/ll_schedule.c index ce99298c36c2..881eda9acb31 100644 --- a/src/schedule/ll_schedule.c +++ b/src/schedule/ll_schedule.c @@ -253,8 +253,7 @@ static int schedule_ll_domain_set(struct ll_schedule_data *sch, spin_lock(&domain->lock); - ret = domain_register(domain, period, task, &schedule_ll_tasks_run, - sch); + ret = domain_register(domain, task, &schedule_ll_tasks_run, sch); if (ret < 0) { tr_err(&ll_tr, "schedule_ll_domain_set: cannot register domain %d", ret); diff --git a/src/schedule/timer_domain.c b/src/schedule/timer_domain.c index 1160cd16897c..48c7862033b4 100644 --- a/src/schedule/timer_domain.c +++ b/src/schedule/timer_domain.c @@ -44,7 +44,7 @@ static void timer_report_delay(int id, uint64_t delay) } static int timer_domain_register(struct ll_schedule_domain *domain, - uint64_t period, struct task *task, + struct task *task, void (*handler)(void *arg), void *arg) { struct timer_domain *timer_domain = ll_sch_domain_get_pdata(domain); @@ -60,8 +60,8 @@ static int timer_domain_register(struct ll_schedule_domain *domain, timer_domain->arg[core] = arg; ret = timer_register(timer_domain->timer, handler, arg); - tr_info(&ll_tr, "timer_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d period %d", - domain->type, domain->clk, domain->ticks_per_ms, (uint32_t)period); + tr_info(&ll_tr, "timer_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d", + domain->type, domain->clk, domain->ticks_per_ms); out: return ret; diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index 790693949fd1..c4e4a00a36d7 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -81,7 +81,7 @@ static void zephyr_domain_timer_fn(struct k_timer *timer) } static int zephyr_domain_register(struct ll_schedule_domain *domain, - uint64_t period, struct task *task, + struct task *task, void (*handler)(void *arg), void *arg) { struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); From 0c7b6b76d570e635485d6863a4fb590b26d9d567 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 22 Jun 2021 16:42:10 +0200 Subject: [PATCH 2/7] zephyr: ll-domain: fix domain unregistration When the .domain_unregister() method is called, .total_num_tasks is still positive, it will only become 0 for the last task after .domain_unregister() returns. When cleaning up also set the user pointer to NULL. Signed-off-by: Guennadi Liakhovetski --- src/schedule/zephyr_domain.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index c4e4a00a36d7..edebc6874c03 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -69,7 +69,7 @@ static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3) /* Timer callback: runs in timer IRQ context */ static void zephyr_domain_timer_fn(struct k_timer *timer) { - struct zephyr_domain *zephyr_domain = timer->user_data; + struct zephyr_domain *zephyr_domain = k_timer_user_data_get(timer); int core; if (!zephyr_domain) @@ -113,11 +113,11 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, k_thread_start(thread); - if (!zephyr_domain->timer.user_data) { + if (!k_timer_user_data_get(&zephyr_domain->timer)) { k_timeout_t start = {0}; k_timer_init(&zephyr_domain->timer, zephyr_domain_timer_fn, NULL); - zephyr_domain->timer.user_data = zephyr_domain; + k_timer_user_data_set(&zephyr_domain->timer, zephyr_domain); k_timer_start(&zephyr_domain->timer, start, K_USEC(LL_TIMER_PERIOD_US)); } @@ -140,8 +140,10 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain, if (num_tasks) return 0; - if (!atomic_read(&domain->total_num_tasks)) + if (atomic_read(&domain->total_num_tasks) == 1) { k_timer_stop(&zephyr_domain->timer); + k_timer_user_data_set(&zephyr_domain->timer, NULL); + } k_thread_abort(&zephyr_domain->domain_thread[core].ll_thread); zephyr_domain->domain_thread[core].handler = NULL; From 53eec382b4f6ded015b71de33f11c111af3fceca Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 22 Jun 2021 16:48:54 +0200 Subject: [PATCH 3/7] zephyr: ll-domain: update .next_tick on each timer interrupt .next_tick has to be initialised at domain registration and updated on each scheduling domain event. Signed-off-by: Guennadi Liakhovetski --- src/schedule/zephyr_domain.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index edebc6874c03..ed6079c4d61f 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -34,7 +34,8 @@ #define ZEPHYR_LL_STACK_SIZE 8192 -#define LL_TIMER_PERIOD_US 1000 /* period in microseconds */ +#define LL_TIMER_PERIOD_US 1000ULL /* period in microseconds */ +#define LL_TIMER_PERIOD_TICKS (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC * LL_TIMER_PERIOD_US / 1000000ULL) K_KERNEL_STACK_ARRAY_DEFINE(ll_sched_stack, CONFIG_CORE_COUNT, ZEPHYR_LL_STACK_SIZE); @@ -70,11 +71,16 @@ static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3) static void zephyr_domain_timer_fn(struct k_timer *timer) { struct zephyr_domain *zephyr_domain = k_timer_user_data_get(timer); + uint64_t now = platform_timer_get(NULL); int core; if (!zephyr_domain) return; + /* This loop should only run once */ + while (zephyr_domain->ll_domain->next_tick < now) + zephyr_domain->ll_domain->next_tick += LL_TIMER_PERIOD_TICKS; + for (core = 0; core < CONFIG_CORE_COUNT; core++) if (zephyr_domain->domain_thread[core].handler) k_sem_give(&zephyr_domain->sem); @@ -120,6 +126,8 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, k_timer_user_data_set(&zephyr_domain->timer, zephyr_domain); k_timer_start(&zephyr_domain->timer, start, K_USEC(LL_TIMER_PERIOD_US)); + domain->next_tick = platform_timer_get_atomic(zephyr_domain->ll_timer) + + k_ticks_to_cyc_ceil64(k_timer_remaining_ticks(&zephyr_domain->timer)); } tr_info(&ll_tr, "zephyr_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d period %d", From 619c609b5c0abd27c0e663638183fec1a6d2a0f2 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 22 Jun 2021 17:05:04 +0200 Subject: [PATCH 4/7] zephyr: ll-domain: stay within the zephyr_* namespace zephyr_domain.c is a drop-in replacement for timer_domain.c. To avoid modifying initialisation code we used the same timer_domain_init() name for its initialisation function. However, the rest of the file uses the zephyr_domain_* namespace. Rename the function to stay within the same namespace and use a macro to redirect the call. Signed-off-by: Guennadi Liakhovetski --- src/include/sof/schedule/ll_schedule_domain.h | 6 ++++++ src/schedule/zephyr_domain.c | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/src/include/sof/schedule/ll_schedule_domain.h b/src/include/sof/schedule/ll_schedule_domain.h index 2eefae6acac3..3f1643dbcfc0 100644 --- a/src/include/sof/schedule/ll_schedule_domain.h +++ b/src/include/sof/schedule/ll_schedule_domain.h @@ -188,7 +188,13 @@ static inline bool domain_is_pending(struct ll_schedule_domain *domain, return ret; } + +#ifndef __ZEPHYR__ struct ll_schedule_domain *timer_domain_init(struct timer *timer, int clk); +#else +struct ll_schedule_domain *zephyr_domain_init(struct timer *timer, int clk); +#define timer_domain_init zephyr_domain_init +#endif struct ll_schedule_domain *dma_multi_chan_domain_init(struct dma *dma_array, uint32_t num_dma, int clk, diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index ed6079c4d61f..cd2c4da41e56 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -176,7 +176,7 @@ static const struct ll_schedule_domain_ops zephyr_domain_ops = { .domain_is_pending = zephyr_domain_is_pending }; -struct ll_schedule_domain *timer_domain_init(struct timer *timer, int clk) +struct ll_schedule_domain *zephyr_domain_init(struct timer *timer, int clk) { struct ll_schedule_domain *domain; struct zephyr_domain *zephyr_domain; From 8f6e61d34b113749ff7d3b9cd0a6cf46e2e5c55c Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Fri, 25 Jun 2021 15:59:27 +0200 Subject: [PATCH 5/7] zephyr: ll_schedule: support no-return in domain_unregister() Under Zephyr LL scheduling is implemented by per-core threads. They are spawned when the first task on that core is scheduled. When the last task on that core is completed, the thread is terminated, which can happen in context of that very thread. This patch adapts the generic LL scheduler code and the Zephyr LL domain scheduler for that by making sure to call thread termination in a consistent state. Signed-off-by: Guennadi Liakhovetski --- src/include/sof/schedule/ll_schedule_domain.h | 22 ++++++++++++++----- src/schedule/zephyr_domain.c | 17 +++++++++++--- 2 files changed, 30 insertions(+), 9 deletions(-) diff --git a/src/include/sof/schedule/ll_schedule_domain.h b/src/include/sof/schedule/ll_schedule_domain.h index 3f1643dbcfc0..7d65e683539b 100644 --- a/src/include/sof/schedule/ll_schedule_domain.h +++ b/src/include/sof/schedule/ll_schedule_domain.h @@ -142,19 +142,29 @@ static inline void domain_unregister(struct ll_schedule_domain *domain, struct task *task, uint32_t num_tasks) { int core = cpu_get_id(); + bool registered = domain->registered[core]; int ret; assert(domain->ops->domain_unregister); - ret = domain->ops->domain_unregister(domain, task, num_tasks); + /* unregistering a task, decrement the count */ + atomic_sub(&domain->total_num_tasks, 1); - if (!ret) { - /* unregistered the task, decrease the count */ - atomic_sub(&domain->total_num_tasks, 1); + /* the last task of the core, unregister the client/core */ + if (!num_tasks && registered) + domain->registered[core] = false; + + /* + * In some cases .domain_unregister() might not return, terminating the + * current thread, that's why we had to update state before calling it. + */ + ret = domain->ops->domain_unregister(domain, task, num_tasks); + if (ret < 0) { + /* Failed to unregister the domain, restore state */ + atomic_add(&domain->total_num_tasks, 1); - /* the last task of the core, unregister the client/core */ if (!num_tasks && domain->registered[core]) - domain->registered[core] = false; + domain->registered[core] = registered; } } diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index cd2c4da41e56..453ef440f741 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -77,7 +77,13 @@ static void zephyr_domain_timer_fn(struct k_timer *timer) if (!zephyr_domain) return; - /* This loop should only run once */ + /* + * This loop should only run once, but for the (nearly) impossible + * case of a missed interrupt, add as many periods as needed. In fact + * we don't need struct ll_schedule_domain::next tick and + * struct task::start for a strictly periodic Zephyr-based LL scheduler + * implementation, they will be removed after a short grace period. + */ while (zephyr_domain->ll_domain->next_tick < now) zephyr_domain->ll_domain->next_tick += LL_TIMER_PERIOD_TICKS; @@ -148,17 +154,22 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain, if (num_tasks) return 0; - if (atomic_read(&domain->total_num_tasks) == 1) { + if (!atomic_read(&domain->total_num_tasks)) { k_timer_stop(&zephyr_domain->timer); k_timer_user_data_set(&zephyr_domain->timer, NULL); } - k_thread_abort(&zephyr_domain->domain_thread[core].ll_thread); zephyr_domain->domain_thread[core].handler = NULL; tr_info(&ll_tr, "zephyr_domain_unregister domain->type %d domain->clk %d", domain->type, domain->clk); + /* + * If running in the context of the domain thread, k_thread_abort() will + * not return + */ + k_thread_abort(&zephyr_domain->domain_thread[core].ll_thread); + return 0; } From 898bf68f3824ad28a94eec01f9bb64e051754207 Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Fri, 25 Jun 2021 16:44:05 +0200 Subject: [PATCH 6/7] zephyr: ll-domain: make semaphore per-core Currently a global semaphore is used to signal all schedulers on all cores, waiting for the next timer period. A more reliable solution is using a per-core semaphore. This patch switched over to that approach. Signed-off-by: Guennadi Liakhovetski --- src/schedule/zephyr_domain.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index 453ef440f741..4a55af763db8 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -41,13 +41,13 @@ K_KERNEL_STACK_ARRAY_DEFINE(ll_sched_stack, CONFIG_CORE_COUNT, ZEPHYR_LL_STACK_S struct zephyr_domain_thread { struct k_thread ll_thread; + struct k_sem sem; void (*handler)(void *arg); void *arg; }; struct zephyr_domain { struct k_timer timer; - struct k_sem sem; struct timer *ll_timer; struct zephyr_domain_thread domain_thread[CONFIG_CORE_COUNT]; struct ll_schedule_domain *ll_domain; @@ -61,7 +61,7 @@ static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3) for (;;) { /* immediately go to sleep, waiting to be woken up by the timer */ - k_sem_take(&zephyr_domain->sem, K_FOREVER); + k_sem_take(&dt->sem, K_FOREVER); dt->handler(dt->arg); } @@ -87,9 +87,12 @@ static void zephyr_domain_timer_fn(struct k_timer *timer) while (zephyr_domain->ll_domain->next_tick < now) zephyr_domain->ll_domain->next_tick += LL_TIMER_PERIOD_TICKS; - for (core = 0; core < CONFIG_CORE_COUNT; core++) - if (zephyr_domain->domain_thread[core].handler) - k_sem_give(&zephyr_domain->sem); + for (core = 0; core < CONFIG_CORE_COUNT; core++) { + struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; + + if (dt->handler) + k_sem_give(&dt->sem); + } } static int zephyr_domain_register(struct ll_schedule_domain *domain, @@ -111,6 +114,9 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, dt->handler = handler; dt->arg = arg; + /* 10 is rather random, we better not accumulate 10 missed timer interrupts */ + k_sem_init(&dt->sem, 0, 10); + thread_name[sizeof(thread_name) - 2] = '0' + core; thread = k_thread_create(&dt->ll_thread, @@ -200,8 +206,6 @@ struct ll_schedule_domain *zephyr_domain_init(struct timer *timer, int clk) zephyr_domain->ll_timer = timer; zephyr_domain->ll_domain = domain; - /* 10 is rather random, we better not accumulate 10 missed timer interrupts */ - k_sem_init(&zephyr_domain->sem, 0, 10); ll_sch_domain_set_pdata(domain, zephyr_domain); From 26d7f6bb5f0324ae9ad334b1cad27fc5f8cff9fb Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Tue, 22 Jun 2021 17:14:39 +0200 Subject: [PATCH 7/7] zephyr: ll-schedule: switch over to a simplified implementation Switch SOF under Zephyr to use a simplified native low-latency scheduler implementation. Signed-off-by: Guennadi Liakhovetski --- src/include/sof/schedule/ll_schedule.h | 13 + src/include/sof/schedule/ll_schedule_domain.h | 2 + src/schedule/zephyr_domain.c | 1 - src/schedule/zephyr_ll.c | 467 ++++++++++++++++++ zephyr/CMakeLists.txt | 2 +- 5 files changed, 483 insertions(+), 2 deletions(-) create mode 100644 src/schedule/zephyr_ll.c diff --git a/src/include/sof/schedule/ll_schedule.h b/src/include/sof/schedule/ll_schedule.h index b231625aeeb4..441d71db5d03 100644 --- a/src/include/sof/schedule/ll_schedule.h +++ b/src/include/sof/schedule/ll_schedule.h @@ -34,11 +34,24 @@ struct ll_task_pdata { uint16_t skip_cnt; /**< how many times the task was skipped for execution */ }; +#ifndef __ZEPHYR__ int scheduler_init_ll(struct ll_schedule_domain *domain); int schedule_task_init_ll(struct task *task, const struct sof_uuid_entry *uid, uint16_t type, uint16_t priority, enum task_state (*run)(void *data), void *data, uint16_t core, uint32_t flags); +#else +int zephyr_ll_scheduler_init(struct ll_schedule_domain *domain); + +int zephyr_ll_task_init(struct task *task, + const struct sof_uuid_entry *uid, uint16_t type, + uint16_t priority, enum task_state (*run)(void *data), + void *data, uint16_t core, uint32_t flags); + +#define scheduler_init_ll zephyr_ll_scheduler_init +#define schedule_task_init_ll zephyr_ll_task_init + +#endif #endif /* __SOF_SCHEDULE_LL_SCHEDULE_H__ */ diff --git a/src/include/sof/schedule/ll_schedule_domain.h b/src/include/sof/schedule/ll_schedule_domain.h index 7d65e683539b..4dd9ecfac041 100644 --- a/src/include/sof/schedule/ll_schedule_domain.h +++ b/src/include/sof/schedule/ll_schedule_domain.h @@ -22,6 +22,8 @@ #include #include +#define LL_TIMER_PERIOD_US 1000ULL /* default period in microseconds */ + struct dma; struct ll_schedule_domain; struct task; diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index 4a55af763db8..812ed68d6e55 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -34,7 +34,6 @@ #define ZEPHYR_LL_STACK_SIZE 8192 -#define LL_TIMER_PERIOD_US 1000ULL /* period in microseconds */ #define LL_TIMER_PERIOD_TICKS (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC * LL_TIMER_PERIOD_US / 1000000ULL) K_KERNEL_STACK_ARRAY_DEFINE(ll_sched_stack, CONFIG_CORE_COUNT, ZEPHYR_LL_STACK_SIZE); diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c new file mode 100644 index 000000000000..707dcec2ecf5 --- /dev/null +++ b/src/schedule/zephyr_ll.c @@ -0,0 +1,467 @@ +// SPDX-License-Identifier: BSD-3-Clause +// +// Copyright(c) 2021 Intel Corporation. All rights reserved. +// +// Author: Guennadi Liakhovetski + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +/* per-scheduler data */ +struct zephyr_ll { + struct list_item tasks; /* list of ll tasks */ + unsigned int n_tasks; /* task counter */ + spinlock_t lock; /* protects the list of tasks and the counter */ + struct ll_schedule_domain *ll_domain; /* scheduling domain */ +}; + +/* per-task scheduler data */ +struct zephyr_ll_pdata { + bool run; + bool freeing; + struct k_sem sem; +}; + +/* FIXME: would be good to use the timer, configured in the underlying domain */ +#define domain_time(sch) platform_timer_get_atomic(timer_get()) + +/* Locking: caller should hold the domain lock */ +static void zephyr_ll_task_done(struct zephyr_ll *sch, + struct task *task) +{ + list_item_del(&task->list); + + if (!sch->n_tasks) { + tr_info(&ll_tr, "task count underrun!"); + k_panic(); + } + + task->state = SOF_TASK_STATE_FREE; + + tr_info(&ll_tr, "task complete %p %pU", task, task->uid); + tr_info(&ll_tr, "num_tasks %d total_num_tasks %d", + sch->n_tasks, atomic_read(&sch->ll_domain->total_num_tasks)); + + /* + * If this is the last task, domain_unregister() won't return. It is + * important to decrement the task counter last before aborting the + * thread. + */ + domain_unregister(sch->ll_domain, task, --sch->n_tasks); +} + +/* The caller must hold the lock and possibly disable interrupts */ +static void zephyr_ll_task_insert_unlocked(struct zephyr_ll *sch, struct task *task) +{ + struct task *task_iter; + struct list_item *list; + + task->state = SOF_TASK_STATE_QUEUED; + + /* + * Tasks are added into the list from highest to lowest priority. This + * way they can then be run in the same order. Tasks with the same + * priority are served on a first-come-first-serve basis + */ + list_for_item(list, &sch->tasks) { + task_iter = container_of(list, struct task, list); + if (task->priority < task_iter->priority) { + list_item_append(&task->list, &task_iter->list); + break; + } + } + + /* + * If the task has not been added, it means that it has the lowest + * priority and should be added at the end of the list + */ + if (list == &sch->tasks) + list_item_append(&task->list, &sch->tasks); +} + +/* + * Task state machine: + * INIT: initialized + * QUEUED: inserted into the scheduler queue + * RUNNING: the scheduler is running, the task is moved to a temporary list + * and then executed + * CANCEL: the task has been cancelled but it is still active. Transition + * to CANCEL can happen anywhere, where the lock is not held, tasks + * can be cancelled asynchronously from an arbitrary context + * FREE: removed from all lists, ready to be freed + * other: never set, shouldn't be used, but RESCHEDULE and COMPLETED are + * returned by task's .run function, they are assigned to a + * temporary state, but not to the task .state field + */ + +/* + * struct task::start and struct ll_schedule_domain::next are parts of the + * original LL scheduler design, they aren't needed in this Zephyr-only + * implementation and will be removed after an initial upstreaming. + */ +static void zephyr_ll_run(void *data) +{ + struct zephyr_ll *sch = data; + struct task *task; + struct list_item *list, *tmp, head; + uint32_t flags; + + list_init(&head); + + spin_lock_irq(&sch->lock, flags); + + /* Move all pending tasks to a temporary local list */ + list_for_item_safe(list, tmp, &sch->tasks) { + struct comp_dev *sched_comp; + struct zephyr_ll_pdata *pdata; + + task = container_of(list, struct task, list); + pdata = task->priv_data; + + if (task->state == SOF_TASK_STATE_CANCEL) { + zephyr_ll_task_done(sch, task); + continue; + } + + /* To be removed together with .start and .next_tick */ + if (!domain_is_pending(sch->ll_domain, task, &sched_comp)) + continue; + + /* + * Do not adjust .n_tasks, it's only decremented if the task is + * removed via zephyr_ll_task_done() + */ + list_item_del(&task->list); + list_item_append(&task->list, &head); + pdata->run = true; + + task->state = SOF_TASK_STATE_RUNNING; + } + + spin_unlock_irq(&sch->lock, flags); + + /* + * Execute tasks on the temporary list. We are racing against + * zephyr_ll_task_free(), see comments there for details. + */ + list_for_item_safe(list, tmp, &head) { + enum task_state state; + struct zephyr_ll_pdata *pdata; + + task = container_of(list, struct task, list); + + /* + * While the lock is not held, the task can be cancelled, we + * deal with it after the task completion. + */ + if (task->state == SOF_TASK_STATE_RUNNING) { + /* + * task's .run() should only return either + * SOF_TASK_STATE_COMPLETED or SOF_TASK_STATE_RESCHEDULE + */ + state = task_run(task); + if (state != SOF_TASK_STATE_COMPLETED && + state != SOF_TASK_STATE_RESCHEDULE) { + tr_err(&ll_tr, + "zephyr_ll_run: invalid return state %u", + state); + state = SOF_TASK_STATE_RESCHEDULE; + } + } else { + state = SOF_TASK_STATE_COMPLETED; + } + + spin_lock_irq(&sch->lock, flags); + + pdata = task->priv_data; + + if (pdata->freeing) { + /* + * zephyr_ll_task_free() is trying to free this task. + * complete it and signal the semaphore to let the + * function proceed + */ + zephyr_ll_task_done(sch, task); + k_sem_give(&pdata->sem); + } else if (state == SOF_TASK_STATE_COMPLETED) { + zephyr_ll_task_done(sch, task); + } else { + /* + * task->state could've been changed to + * SOF_TASK_STATE_CANCEL + */ + switch (task->state) { + case SOF_TASK_STATE_CANCEL: + zephyr_ll_task_done(sch, task); + break; + default: + /* reschedule */ + list_item_del(&task->list); + + zephyr_ll_task_insert_unlocked(sch, task); + + task->start = sch->ll_domain->next_tick; + } + } + + spin_unlock_irq(&sch->lock, flags); + } + + notifier_event(sch, NOTIFIER_ID_LL_POST_RUN, + NOTIFIER_TARGET_CORE_LOCAL, NULL, 0); +} + +static void zephyr_ll_init_scheduler_for_first_task(struct zephyr_ll *sch) +{ + /* + * If we're adding the first task to the scheduler, it is possible that + * the scheduler has already run before on this core and its thread has + * been aborted while holding the spinlock, so we have to re-initialize + * it. + */ + spinlock_init(&sch->lock); +} + +/* + * Called once for periodic tasks or multiple times for one-shot tasks + * TODO: start should be ignored in Zephyr LL scheduler implementation. Tasks + * are scheduled to start on the following tick and run on each subsequent timer + * event. In the future we shall add support for long-period tasks with periods + * equal to a multiple of the scheduler tick time. Ignoring start will eliminate + * the use of task::start and ll_schedule_domain::next in this scheduler. + */ +static int zephyr_ll_task_schedule(void *data, struct task *task, uint64_t start, + uint64_t period) +{ + struct zephyr_ll *sch = data; + struct zephyr_ll_pdata *pdata; + struct task *task_iter; + struct list_item *list; + uint64_t delay = period ? period : start; + uint32_t flags; + int ret; + + tr_info(&ll_tr, "task add %p %pU priority %d flags 0x%x", task, task->uid, + task->priority, task->flags); + + irq_local_disable(flags); + /* + * The task counter is decremented as the last operation in + * zephyr_ll_task_done() before aborting the thread, so just disabling + * local IRQs provides sufficient protection here. + */ + if (!sch->n_tasks) + zephyr_ll_init_scheduler_for_first_task(sch); + irq_local_enable(flags); + + spin_lock_irq(&sch->lock, flags); + + pdata = task->priv_data; + + if (!pdata || pdata->freeing) { + /* + * The user has called schedule_task_free() and then + * schedule_task(). This is clearly a bug in the application + * code, but we have to protect against it + */ + spin_unlock_irq(&sch->lock, flags); + return -EDEADLK; + } + + /* check if the task is already scheduled */ + list_for_item(list, &sch->tasks) { + task_iter = container_of(list, struct task, list); + + /* + * keep original start. TODO: this shouldn't be happening. + * Remove after verification + */ + if (task_iter == task) { + spin_unlock_irq(&sch->lock, flags); + tr_warn(&ll_tr, "task %p (%pU) already scheduled", + task, task->uid); + return 0; + } + } + + task->start = domain_time(sch) + delay; + if (sch->ll_domain->next_tick != UINT64_MAX && + sch->ll_domain->next_tick > task->start) + task->start = sch->ll_domain->next_tick; + + zephyr_ll_task_insert_unlocked(sch, task); + + sch->n_tasks++; + + spin_unlock_irq(&sch->lock, flags); + + ret = domain_register(sch->ll_domain, task, &zephyr_ll_run, sch); + if (ret < 0) + tr_err(&ll_tr, "zephyr_ll_task_schedule: cannot register domain %d", + ret); + + return 0; +} + +/* + * This is synchronous - after this returns the object can be destroyed! + * Assertion: under Zephyr this is always called from a thread context! + */ +static int zephyr_ll_task_free(void *data, struct task *task) +{ + struct zephyr_ll *sch = data; + uint32_t flags; + struct zephyr_ll_pdata *pdata = task->priv_data; + bool must_wait, on_list = true; + + if (k_is_in_isr()) { + tr_err(&ll_tr, + "zephyr_ll_task_free: cannot free tasks from interrupt context!"); + return -EDEADLK; + } + + spin_lock_irq(&sch->lock, flags); + + /* + * It is safe to free the task in state INIT or QUEUED. CANCEL is + * unknown, because it can be set either in a safe state or in an unsafe + * one. If this function took the spin-lock while tasks are pending on a + * temporary list in zephyr_ll_run(), no task can be freed, because + * doing so would corrupt the temporary list. We could use an array of + * task pointers instead, but that array would have to be dynamically + * allocated and we anyway have to wait for the task completion at least + * when it is in the RUNNING state. To identify whether CANCEL is a safe + * one an additional flag must be used. + */ + switch (task->state) { + case SOF_TASK_STATE_FREE: + on_list = false; + /* fall through */ + case SOF_TASK_STATE_INIT: + case SOF_TASK_STATE_QUEUED: + must_wait = false; + break; + case SOF_TASK_STATE_CANCEL: + must_wait = pdata->run; + break; + default: + must_wait = true; + } + + if (on_list && !must_wait) + zephyr_ll_task_done(sch, task); + + pdata->freeing = true; + + spin_unlock_irq(&sch->lock, flags); + + if (must_wait) + /* Wait for up to 100 periods */ + k_sem_take(&pdata->sem, K_USEC(LL_TIMER_PERIOD_US * 100)); + + /* Protect against racing with schedule_task() */ + spin_lock_irq(&sch->lock, flags); + task->priv_data = NULL; + rfree(pdata); + spin_unlock_irq(&sch->lock, flags); + + return 0; +} + +/* This seems to be asynchronous? */ +static int zephyr_ll_task_cancel(void *data, struct task *task) +{ + struct zephyr_ll *sch = data; + uint32_t flags; + + /* + * Read-modify-write of task state in zephyr_ll_task_schedule() must be + * kept atomic, so we have to lock here too. + */ + spin_lock_irq(&sch->lock, flags); + task->state = SOF_TASK_STATE_CANCEL; + spin_unlock_irq(&sch->lock, flags); + + return 0; +} + +/* + * Runs on secondary cores in their shutdown sequence. In theory tasks can still + * be active, but other schedulers ignore them too... And we don't need to free + * the scheduler data - it's allocated in the SYS zone. + */ +static void zephyr_ll_scheduler_free(void *data) +{ + struct zephyr_ll *sch = data; + + if (sch->n_tasks) + tr_err(&ll_tr, "zephyr_ll_scheduler_free: %u tasks are still active!", + sch->n_tasks); +} + +static const struct scheduler_ops zephyr_ll_ops = { + .schedule_task = zephyr_ll_task_schedule, + .schedule_task_free = zephyr_ll_task_free, + .schedule_task_cancel = zephyr_ll_task_cancel, + .scheduler_free = zephyr_ll_scheduler_free, +}; + +int zephyr_ll_task_init(struct task *task, + const struct sof_uuid_entry *uid, uint16_t type, + uint16_t priority, enum task_state (*run)(void *data), + void *data, uint16_t core, uint32_t flags) +{ + struct zephyr_ll_pdata *pdata; + int ret; + + if (task->priv_data) + return -EEXIST; + + ret = schedule_task_init(task, uid, type, priority, run, data, core, + flags); + if (ret < 0) + return ret; + + pdata = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, + sizeof(*pdata)); + if (!pdata) { + tr_err(&ll_tr, "zephyr_ll_task_init(): alloc failed"); + return -ENOMEM; + } + + k_sem_init(&pdata->sem, 0, 1); + + task->priv_data = pdata; + + return 0; +} + +/* TODO: low-power mode clock support */ +/* Runs on each core during initialisation with the same domain argument */ +int zephyr_ll_scheduler_init(struct ll_schedule_domain *domain) +{ + struct zephyr_ll *sch; + + if (domain->type != SOF_SCHEDULE_LL_TIMER) { + tr_err(&ll_tr, "zephyr_ll_scheduler_init(): unsupported domain %u", + domain->type); + return -EINVAL; + } + + /* initialize per-core scheduler private data */ + sch = rmalloc(SOF_MEM_ZONE_SYS, 0, SOF_MEM_CAPS_RAM, sizeof(*sch)); + list_init(&sch->tasks); + sch->ll_domain = domain; + + scheduler_init(domain->type, &zephyr_ll_ops, sch); + + return 0; +} diff --git a/zephyr/CMakeLists.txt b/zephyr/CMakeLists.txt index 52cdfa2d39fd..824f33077d81 100644 --- a/zephyr/CMakeLists.txt +++ b/zephyr/CMakeLists.txt @@ -393,7 +393,7 @@ zephyr_library_sources( ${SOF_SRC_PATH}/schedule/schedule.c ${SOF_SRC_PATH}/schedule/dma_single_chan_domain.c ${SOF_SRC_PATH}/schedule/dma_multi_chan_domain.c - ${SOF_SRC_PATH}/schedule/ll_schedule.c + ${SOF_SRC_PATH}/schedule/zephyr_ll.c ${SOF_SRC_PATH}/schedule/zephyr.c # Bridge wrapper between SOF and Zephyr APIs - Will shrink over time.