-
Notifications
You must be signed in to change notification settings - Fork 349
zephyr: switch over to a simple priority-based LL scheduler #4377
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
a488413
0c7b6b7
53eec38
619c609
8f6e61d
898bf68
26d7f6b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -22,14 +22,16 @@ | |
| #include <stdbool.h> | ||
| #include <stdint.h> | ||
|
|
||
| #define LL_TIMER_PERIOD_US 1000ULL /* default period in microseconds */ | ||
|
|
||
| struct dma; | ||
| struct ll_schedule_domain; | ||
| struct task; | ||
| struct timer; | ||
|
|
||
| struct ll_schedule_domain_ops { | ||
| int (*domain_register)(struct ll_schedule_domain *domain, | ||
| uint64_t period, struct task *task, | ||
| struct task *task, | ||
| void (*handler)(void *arg), void *arg); | ||
| int (*domain_unregister)(struct ll_schedule_domain *domain, | ||
| struct task *task, uint32_t num_tasks); | ||
|
|
@@ -116,15 +118,15 @@ static inline void domain_clear(struct ll_schedule_domain *domain) | |
| } | ||
|
|
||
| static inline int domain_register(struct ll_schedule_domain *domain, | ||
| uint64_t period, struct task *task, | ||
| struct task *task, | ||
| void (*handler)(void *arg), void *arg) | ||
| { | ||
| int core = cpu_get_id(); | ||
| int ret; | ||
|
|
||
| assert(domain->ops->domain_register); | ||
|
|
||
| ret = domain->ops->domain_register(domain, period, task, handler, arg); | ||
| ret = domain->ops->domain_register(domain, task, handler, arg); | ||
|
|
||
| if (!ret) { | ||
| /* registered one more task, increase the count */ | ||
|
|
@@ -142,19 +144,29 @@ static inline void domain_unregister(struct ll_schedule_domain *domain, | |
| struct task *task, uint32_t num_tasks) | ||
| { | ||
| int core = cpu_get_id(); | ||
| bool registered = domain->registered[core]; | ||
| int ret; | ||
|
|
||
| assert(domain->ops->domain_unregister); | ||
|
|
||
| ret = domain->ops->domain_unregister(domain, task, num_tasks); | ||
| /* unregistering a task, decrement the count */ | ||
| atomic_sub(&domain->total_num_tasks, 1); | ||
|
|
||
| if (!ret) { | ||
| /* unregistered the task, decrease the count */ | ||
| atomic_sub(&domain->total_num_tasks, 1); | ||
| /* the last task of the core, unregister the client/core */ | ||
| if (!num_tasks && registered) | ||
| domain->registered[core] = false; | ||
|
|
||
| /* | ||
| * In some cases .domain_unregister() might not return, terminating the | ||
|
||
| * current thread, that's why we had to update state before calling it. | ||
| */ | ||
| ret = domain->ops->domain_unregister(domain, task, num_tasks); | ||
| if (ret < 0) { | ||
| /* Failed to unregister the domain, restore state */ | ||
| atomic_add(&domain->total_num_tasks, 1); | ||
|
|
||
| /* the last task of the core, unregister the client/core */ | ||
| if (!num_tasks && domain->registered[core]) | ||
| domain->registered[core] = false; | ||
| domain->registered[core] = registered; | ||
| } | ||
| } | ||
|
|
||
|
|
@@ -188,7 +200,13 @@ static inline bool domain_is_pending(struct ll_schedule_domain *domain, | |
| return ret; | ||
| } | ||
|
|
||
|
|
||
| #ifndef __ZEPHYR__ | ||
| struct ll_schedule_domain *timer_domain_init(struct timer *timer, int clk); | ||
| #else | ||
| struct ll_schedule_domain *zephyr_domain_init(struct timer *timer, int clk); | ||
| #define timer_domain_init zephyr_domain_init | ||
| #endif | ||
|
||
|
|
||
| struct ll_schedule_domain *dma_multi_chan_domain_init(struct dma *dma_array, | ||
| uint32_t num_dma, int clk, | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -34,19 +34,19 @@ | |
|
|
||
| #define ZEPHYR_LL_STACK_SIZE 8192 | ||
|
|
||
| #define LL_TIMER_PERIOD_US 1000 /* period in microseconds */ | ||
| #define LL_TIMER_PERIOD_TICKS (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC * LL_TIMER_PERIOD_US / 1000000ULL) | ||
|
|
||
| K_KERNEL_STACK_ARRAY_DEFINE(ll_sched_stack, CONFIG_CORE_COUNT, ZEPHYR_LL_STACK_SIZE); | ||
|
|
||
| struct zephyr_domain_thread { | ||
| struct k_thread ll_thread; | ||
| struct k_sem sem; | ||
| void (*handler)(void *arg); | ||
| void *arg; | ||
| }; | ||
|
|
||
| struct zephyr_domain { | ||
| struct k_timer timer; | ||
| struct k_sem sem; | ||
| struct timer *ll_timer; | ||
| struct zephyr_domain_thread domain_thread[CONFIG_CORE_COUNT]; | ||
| struct ll_schedule_domain *ll_domain; | ||
|
|
@@ -60,7 +60,7 @@ static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3) | |
|
|
||
| for (;;) { | ||
| /* immediately go to sleep, waiting to be woken up by the timer */ | ||
| k_sem_take(&zephyr_domain->sem, K_FOREVER); | ||
| k_sem_take(&dt->sem, K_FOREVER); | ||
|
|
||
| dt->handler(dt->arg); | ||
| } | ||
|
|
@@ -69,19 +69,33 @@ static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3) | |
| /* Timer callback: runs in timer IRQ context */ | ||
| static void zephyr_domain_timer_fn(struct k_timer *timer) | ||
| { | ||
| struct zephyr_domain *zephyr_domain = timer->user_data; | ||
| struct zephyr_domain *zephyr_domain = k_timer_user_data_get(timer); | ||
| uint64_t now = platform_timer_get(NULL); | ||
| int core; | ||
|
|
||
| if (!zephyr_domain) | ||
| return; | ||
|
|
||
| for (core = 0; core < CONFIG_CORE_COUNT; core++) | ||
| if (zephyr_domain->domain_thread[core].handler) | ||
| k_sem_give(&zephyr_domain->sem); | ||
| /* | ||
| * This loop should only run once, but for the (nearly) impossible | ||
| * case of a missed interrupt, add as many periods as needed. In fact | ||
| * we don't need struct ll_schedule_domain::next tick and | ||
| * struct task::start for a strictly periodic Zephyr-based LL scheduler | ||
| * implementation, they will be removed after a short grace period. | ||
| */ | ||
| while (zephyr_domain->ll_domain->next_tick < now) | ||
| zephyr_domain->ll_domain->next_tick += LL_TIMER_PERIOD_TICKS; | ||
|
||
|
|
||
| for (core = 0; core < CONFIG_CORE_COUNT; core++) { | ||
| struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; | ||
|
|
||
| if (dt->handler) | ||
| k_sem_give(&dt->sem); | ||
| } | ||
| } | ||
|
|
||
| static int zephyr_domain_register(struct ll_schedule_domain *domain, | ||
| uint64_t period, struct task *task, | ||
| struct task *task, | ||
| void (*handler)(void *arg), void *arg) | ||
| { | ||
| struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); | ||
|
|
@@ -99,6 +113,9 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, | |
| dt->handler = handler; | ||
| dt->arg = arg; | ||
|
|
||
| /* 10 is rather random, we better not accumulate 10 missed timer interrupts */ | ||
|
||
| k_sem_init(&dt->sem, 0, 10); | ||
|
|
||
| thread_name[sizeof(thread_name) - 2] = '0' + core; | ||
|
|
||
| thread = k_thread_create(&dt->ll_thread, | ||
|
|
@@ -113,13 +130,15 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, | |
|
|
||
| k_thread_start(thread); | ||
|
|
||
| if (!zephyr_domain->timer.user_data) { | ||
| if (!k_timer_user_data_get(&zephyr_domain->timer)) { | ||
| k_timeout_t start = {0}; | ||
|
|
||
| k_timer_init(&zephyr_domain->timer, zephyr_domain_timer_fn, NULL); | ||
| zephyr_domain->timer.user_data = zephyr_domain; | ||
| k_timer_user_data_set(&zephyr_domain->timer, zephyr_domain); | ||
|
|
||
| k_timer_start(&zephyr_domain->timer, start, K_USEC(LL_TIMER_PERIOD_US)); | ||
| domain->next_tick = platform_timer_get_atomic(zephyr_domain->ll_timer) + | ||
| k_ticks_to_cyc_ceil64(k_timer_remaining_ticks(&zephyr_domain->timer)); | ||
|
||
| } | ||
|
|
||
| tr_info(&ll_tr, "zephyr_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d period %d", | ||
|
|
@@ -140,15 +159,22 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain, | |
| if (num_tasks) | ||
| return 0; | ||
|
|
||
| if (!atomic_read(&domain->total_num_tasks)) | ||
| if (!atomic_read(&domain->total_num_tasks)) { | ||
| k_timer_stop(&zephyr_domain->timer); | ||
| k_timer_user_data_set(&zephyr_domain->timer, NULL); | ||
| } | ||
|
|
||
| k_thread_abort(&zephyr_domain->domain_thread[core].ll_thread); | ||
| zephyr_domain->domain_thread[core].handler = NULL; | ||
|
|
||
| tr_info(&ll_tr, "zephyr_domain_unregister domain->type %d domain->clk %d", | ||
| domain->type, domain->clk); | ||
|
|
||
| /* | ||
| * If running in the context of the domain thread, k_thread_abort() will | ||
| * not return | ||
| */ | ||
| k_thread_abort(&zephyr_domain->domain_thread[core].ll_thread); | ||
|
|
||
| return 0; | ||
| } | ||
|
|
||
|
|
@@ -166,7 +192,7 @@ static const struct ll_schedule_domain_ops zephyr_domain_ops = { | |
| .domain_is_pending = zephyr_domain_is_pending | ||
| }; | ||
|
|
||
| struct ll_schedule_domain *timer_domain_init(struct timer *timer, int clk) | ||
| struct ll_schedule_domain *zephyr_domain_init(struct timer *timer, int clk) | ||
| { | ||
| struct ll_schedule_domain *domain; | ||
| struct zephyr_domain *zephyr_domain; | ||
|
|
@@ -179,8 +205,6 @@ struct ll_schedule_domain *timer_domain_init(struct timer *timer, int clk) | |
|
|
||
| zephyr_domain->ll_timer = timer; | ||
| zephyr_domain->ll_domain = domain; | ||
| /* 10 is rather random, we better not accumulate 10 missed timer interrupts */ | ||
| k_sem_init(&zephyr_domain->sem, 0, 10); | ||
|
|
||
| ll_sch_domain_set_pdata(domain, zephyr_domain); | ||
|
|
||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Could we have this on the implementation side, so the public interface would remain the same for both?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I first implemented both with keeping the "old" names, but then I thought that having proper namespace consistency in the .c file is better... But we can discuss this.