Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 13 additions & 0 deletions src/include/sof/schedule/ll_schedule.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,11 +34,24 @@ struct ll_task_pdata {
uint16_t skip_cnt; /**< how many times the task was skipped for execution */
};

#ifndef __ZEPHYR__
int scheduler_init_ll(struct ll_schedule_domain *domain);

int schedule_task_init_ll(struct task *task,
const struct sof_uuid_entry *uid, uint16_t type,
uint16_t priority, enum task_state (*run)(void *data),
void *data, uint16_t core, uint32_t flags);
#else
int zephyr_ll_scheduler_init(struct ll_schedule_domain *domain);

int zephyr_ll_task_init(struct task *task,
const struct sof_uuid_entry *uid, uint16_t type,
uint16_t priority, enum task_state (*run)(void *data),
void *data, uint16_t core, uint32_t flags);

#define scheduler_init_ll zephyr_ll_scheduler_init
#define schedule_task_init_ll zephyr_ll_task_init

#endif
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could we have this on the implementation side, so the public interface would remain the same for both?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I first implemented both with keeping the "old" names, but then I thought that having proper namespace consistency in the .c file is better... But we can discuss this.


#endif /* __SOF_SCHEDULE_LL_SCHEDULE_H__ */
36 changes: 27 additions & 9 deletions src/include/sof/schedule/ll_schedule_domain.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,14 +22,16 @@
#include <stdbool.h>
#include <stdint.h>

#define LL_TIMER_PERIOD_US 1000ULL /* default period in microseconds */

struct dma;
struct ll_schedule_domain;
struct task;
struct timer;

struct ll_schedule_domain_ops {
int (*domain_register)(struct ll_schedule_domain *domain,
uint64_t period, struct task *task,
struct task *task,
void (*handler)(void *arg), void *arg);
int (*domain_unregister)(struct ll_schedule_domain *domain,
struct task *task, uint32_t num_tasks);
Expand Down Expand Up @@ -116,15 +118,15 @@ static inline void domain_clear(struct ll_schedule_domain *domain)
}

static inline int domain_register(struct ll_schedule_domain *domain,
uint64_t period, struct task *task,
struct task *task,
void (*handler)(void *arg), void *arg)
{
int core = cpu_get_id();
int ret;

assert(domain->ops->domain_register);

ret = domain->ops->domain_register(domain, period, task, handler, arg);
ret = domain->ops->domain_register(domain, task, handler, arg);

if (!ret) {
/* registered one more task, increase the count */
Expand All @@ -142,19 +144,29 @@ static inline void domain_unregister(struct ll_schedule_domain *domain,
struct task *task, uint32_t num_tasks)
{
int core = cpu_get_id();
bool registered = domain->registered[core];
int ret;

assert(domain->ops->domain_unregister);

ret = domain->ops->domain_unregister(domain, task, num_tasks);
/* unregistering a task, decrement the count */
atomic_sub(&domain->total_num_tasks, 1);

if (!ret) {
/* unregistered the task, decrease the count */
atomic_sub(&domain->total_num_tasks, 1);
/* the last task of the core, unregister the client/core */
if (!num_tasks && registered)
domain->registered[core] = false;

/*
* In some cases .domain_unregister() might not return, terminating the
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Can we state these cases in the comment.

* current thread, that's why we had to update state before calling it.
*/
ret = domain->ops->domain_unregister(domain, task, num_tasks);
if (ret < 0) {
/* Failed to unregister the domain, restore state */
atomic_add(&domain->total_num_tasks, 1);

/* the last task of the core, unregister the client/core */
if (!num_tasks && domain->registered[core])
domain->registered[core] = false;
domain->registered[core] = registered;
}
}

Expand Down Expand Up @@ -188,7 +200,13 @@ static inline bool domain_is_pending(struct ll_schedule_domain *domain,
return ret;
}


#ifndef __ZEPHYR__
struct ll_schedule_domain *timer_domain_init(struct timer *timer, int clk);
#else
struct ll_schedule_domain *zephyr_domain_init(struct timer *timer, int clk);
#define timer_domain_init zephyr_domain_init
#endif
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

ditto


struct ll_schedule_domain *dma_multi_chan_domain_init(struct dma *dma_array,
uint32_t num_dma, int clk,
Expand Down
2 changes: 1 addition & 1 deletion src/schedule/dma_multi_chan_domain.c
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ static int dma_multi_chan_domain_irq_register(struct dma_domain_data *data,
* DMA interrupts (different irq number per DMA channel).
*/
static int dma_multi_chan_domain_register(struct ll_schedule_domain *domain,
uint64_t period, struct task *task,
struct task *task,
void (*handler)(void *arg), void *arg)
{
struct dma_domain *dma_domain = ll_sch_domain_get_pdata(domain);
Expand Down
2 changes: 1 addition & 1 deletion src/schedule/dma_single_chan_domain.c
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ static void dma_single_chan_domain_irq_unregister(struct dma_domain_data *data)
* cores need to be notified.
*/
static int dma_single_chan_domain_register(struct ll_schedule_domain *domain,
uint64_t period, struct task *task,
struct task *task,
void (*handler)(void *arg),
void *arg)
{
Expand Down
3 changes: 1 addition & 2 deletions src/schedule/ll_schedule.c
Original file line number Diff line number Diff line change
Expand Up @@ -253,8 +253,7 @@ static int schedule_ll_domain_set(struct ll_schedule_data *sch,

spin_lock(&domain->lock);

ret = domain_register(domain, period, task, &schedule_ll_tasks_run,
sch);
ret = domain_register(domain, task, &schedule_ll_tasks_run, sch);
if (ret < 0) {
tr_err(&ll_tr, "schedule_ll_domain_set: cannot register domain %d",
ret);
Expand Down
6 changes: 3 additions & 3 deletions src/schedule/timer_domain.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ static void timer_report_delay(int id, uint64_t delay)
}

static int timer_domain_register(struct ll_schedule_domain *domain,
uint64_t period, struct task *task,
struct task *task,
void (*handler)(void *arg), void *arg)
{
struct timer_domain *timer_domain = ll_sch_domain_get_pdata(domain);
Expand All @@ -60,8 +60,8 @@ static int timer_domain_register(struct ll_schedule_domain *domain,
timer_domain->arg[core] = arg;
ret = timer_register(timer_domain->timer, handler, arg);

tr_info(&ll_tr, "timer_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d period %d",
domain->type, domain->clk, domain->ticks_per_ms, (uint32_t)period);
tr_info(&ll_tr, "timer_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d",
domain->type, domain->clk, domain->ticks_per_ms);
out:

return ret;
Expand Down
54 changes: 39 additions & 15 deletions src/schedule/zephyr_domain.c
Original file line number Diff line number Diff line change
Expand Up @@ -34,19 +34,19 @@

#define ZEPHYR_LL_STACK_SIZE 8192

#define LL_TIMER_PERIOD_US 1000 /* period in microseconds */
#define LL_TIMER_PERIOD_TICKS (CONFIG_SYS_CLOCK_HW_CYCLES_PER_SEC * LL_TIMER_PERIOD_US / 1000000ULL)

K_KERNEL_STACK_ARRAY_DEFINE(ll_sched_stack, CONFIG_CORE_COUNT, ZEPHYR_LL_STACK_SIZE);

struct zephyr_domain_thread {
struct k_thread ll_thread;
struct k_sem sem;
void (*handler)(void *arg);
void *arg;
};

struct zephyr_domain {
struct k_timer timer;
struct k_sem sem;
struct timer *ll_timer;
struct zephyr_domain_thread domain_thread[CONFIG_CORE_COUNT];
struct ll_schedule_domain *ll_domain;
Expand All @@ -60,7 +60,7 @@ static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3)

for (;;) {
/* immediately go to sleep, waiting to be woken up by the timer */
k_sem_take(&zephyr_domain->sem, K_FOREVER);
k_sem_take(&dt->sem, K_FOREVER);

dt->handler(dt->arg);
}
Expand All @@ -69,19 +69,33 @@ static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3)
/* Timer callback: runs in timer IRQ context */
static void zephyr_domain_timer_fn(struct k_timer *timer)
{
struct zephyr_domain *zephyr_domain = timer->user_data;
struct zephyr_domain *zephyr_domain = k_timer_user_data_get(timer);
uint64_t now = platform_timer_get(NULL);
int core;

if (!zephyr_domain)
return;

for (core = 0; core < CONFIG_CORE_COUNT; core++)
if (zephyr_domain->domain_thread[core].handler)
k_sem_give(&zephyr_domain->sem);
/*
* This loop should only run once, but for the (nearly) impossible
* case of a missed interrupt, add as many periods as needed. In fact
* we don't need struct ll_schedule_domain::next tick and
* struct task::start for a strictly periodic Zephyr-based LL scheduler
* implementation, they will be removed after a short grace period.
*/
while (zephyr_domain->ll_domain->next_tick < now)
zephyr_domain->ll_domain->next_tick += LL_TIMER_PERIOD_TICKS;
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure if we need to finetune it now (if next_tick is removed later), but this looks a bit dangerous. Can next_tick be zero and this end up spinning for a longer time?


for (core = 0; core < CONFIG_CORE_COUNT; core++) {
struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core;

if (dt->handler)
k_sem_give(&dt->sem);
}
}

static int zephyr_domain_register(struct ll_schedule_domain *domain,
uint64_t period, struct task *task,
struct task *task,
void (*handler)(void *arg), void *arg)
{
struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain);
Expand All @@ -99,6 +113,9 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain,
dt->handler = handler;
dt->arg = arg;

/* 10 is rather random, we better not accumulate 10 missed timer interrupts */
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Lets make this a macro too.

k_sem_init(&dt->sem, 0, 10);

thread_name[sizeof(thread_name) - 2] = '0' + core;

thread = k_thread_create(&dt->ll_thread,
Expand All @@ -113,13 +130,15 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain,

k_thread_start(thread);

if (!zephyr_domain->timer.user_data) {
if (!k_timer_user_data_get(&zephyr_domain->timer)) {
k_timeout_t start = {0};

k_timer_init(&zephyr_domain->timer, zephyr_domain_timer_fn, NULL);
zephyr_domain->timer.user_data = zephyr_domain;
k_timer_user_data_set(&zephyr_domain->timer, zephyr_domain);

k_timer_start(&zephyr_domain->timer, start, K_USEC(LL_TIMER_PERIOD_US));
domain->next_tick = platform_timer_get_atomic(zephyr_domain->ll_timer) +
k_ticks_to_cyc_ceil64(k_timer_remaining_ticks(&zephyr_domain->timer));
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not following why we need domain->next_tick when Zephyr will manage the tick wake ups ?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

let's keep .next_tick and .start in this PR, I'll add a comment that they shouldn't be needed for a fully native Zephyr LL scheduling stack, and let's try to remove them in a follow up PR.

}

tr_info(&ll_tr, "zephyr_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d period %d",
Expand All @@ -140,15 +159,22 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain,
if (num_tasks)
return 0;

if (!atomic_read(&domain->total_num_tasks))
if (!atomic_read(&domain->total_num_tasks)) {
k_timer_stop(&zephyr_domain->timer);
k_timer_user_data_set(&zephyr_domain->timer, NULL);
}

k_thread_abort(&zephyr_domain->domain_thread[core].ll_thread);
zephyr_domain->domain_thread[core].handler = NULL;

tr_info(&ll_tr, "zephyr_domain_unregister domain->type %d domain->clk %d",
domain->type, domain->clk);

/*
* If running in the context of the domain thread, k_thread_abort() will
* not return
*/
k_thread_abort(&zephyr_domain->domain_thread[core].ll_thread);

return 0;
}

Expand All @@ -166,7 +192,7 @@ static const struct ll_schedule_domain_ops zephyr_domain_ops = {
.domain_is_pending = zephyr_domain_is_pending
};

struct ll_schedule_domain *timer_domain_init(struct timer *timer, int clk)
struct ll_schedule_domain *zephyr_domain_init(struct timer *timer, int clk)
{
struct ll_schedule_domain *domain;
struct zephyr_domain *zephyr_domain;
Expand All @@ -179,8 +205,6 @@ struct ll_schedule_domain *timer_domain_init(struct timer *timer, int clk)

zephyr_domain->ll_timer = timer;
zephyr_domain->ll_domain = domain;
/* 10 is rather random, we better not accumulate 10 missed timer interrupts */
k_sem_init(&zephyr_domain->sem, 0, 10);

ll_sch_domain_set_pdata(domain, zephyr_domain);

Expand Down
Loading