Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 1 addition & 2 deletions src/schedule/zephyr_dp_schedule.c
Original file line number Diff line number Diff line change
Expand Up @@ -300,8 +300,7 @@ static int scheduler_dp_task_free(void *data, struct task *task)
if (pdata->event != &pdata->event_struct)
k_object_free(pdata->event);
#else
if (pdata->sem != &pdata->sem_struct)
k_object_free(pdata->sem);
k_object_free(pdata->sem);
#endif
if (pdata->thread != &pdata->thread_struct)
k_object_free(pdata->thread);
Expand Down
1 change: 0 additions & 1 deletion src/schedule/zephyr_dp_schedule.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,6 @@ struct task_dp_pdata {
uint32_t ll_cycles_to_start; /* current number of LL cycles till delayed start */
#if CONFIG_SOF_USERSPACE_APPLICATION
struct k_sem *sem; /* pointer to semaphore for task scheduling */
struct k_sem sem_struct; /* semaphore for task scheduling for kernel threads */
struct ipc4_flat *flat;
unsigned char pend_ipc;
unsigned char pend_proc;
Expand Down
66 changes: 24 additions & 42 deletions src/schedule/zephyr_dp_schedule_application.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,9 +27,7 @@
LOG_MODULE_DECLARE(dp_schedule, CONFIG_SOF_LOG_LEVEL);
extern struct tr_ctx dp_tr;

#if CONFIG_USERSPACE
static struct k_mem_domain dp_mdom[CONFIG_CORE_COUNT];
#endif

/* Synchronization semaphore for the scheduler thread to wait for DP startup */
#define DP_SYNC_INIT(i, _) Z_SEM_INITIALIZER(dp_sync[i], 0, 1)
Expand All @@ -49,7 +47,8 @@ struct ipc4_flat {
enum ipc4_pipeline_state state;
int n_sources;
int n_sinks;
void *source_sink[2 * CONFIG_MODULE_MAX_CONNECTIONS];
struct sof_source *source[CONFIG_MODULE_MAX_CONNECTIONS];
struct sof_sink *sink[CONFIG_MODULE_MAX_CONNECTIONS];
} pipeline_state;
};
};
Expand Down Expand Up @@ -84,13 +83,12 @@ static int ipc_thread_flatten(unsigned int cmd, const union scheduler_dp_thread_
flat->pipeline_state.n_sources = param->pipeline_state.n_sources;
flat->pipeline_state.n_sinks = param->pipeline_state.n_sinks;
/* Up to 2 * CONFIG_MODULE_MAX_CONNECTIONS */
memcpy(flat->pipeline_state.source_sink, param->pipeline_state.sources,
memcpy(flat->pipeline_state.source, param->pipeline_state.sources,
flat->pipeline_state.n_sources *
sizeof(flat->pipeline_state.source_sink[0]));
memcpy(flat->pipeline_state.source_sink + flat->pipeline_state.n_sources,
param->pipeline_state.sinks,
sizeof(flat->pipeline_state.source[0]));
memcpy(flat->pipeline_state.sink, param->pipeline_state.sinks,
flat->pipeline_state.n_sinks *
sizeof(flat->pipeline_state.source_sink[0]));
sizeof(flat->pipeline_state.sink[0]));
}
}

Expand Down Expand Up @@ -140,11 +138,10 @@ static void ipc_thread_unflatten_run(struct processing_module *pmod, struct ipc4
break;
case COMP_TRIGGER_PREPARE:
flat->ret = ops->prepare(pmod,
(struct sof_source **)flat->pipeline_state.source_sink,
flat->pipeline_state.n_sources,
(struct sof_sink **)(flat->pipeline_state.source_sink +
flat->pipeline_state.n_sources),
flat->pipeline_state.n_sinks);
flat->pipeline_state.source,
flat->pipeline_state.n_sources,
flat->pipeline_state.sink,
flat->pipeline_state.n_sinks);
}
}
}
Expand Down Expand Up @@ -198,7 +195,7 @@ int scheduler_dp_thread_ipc(struct processing_module *pmod, unsigned int cmd,
}

/* Go through all DP tasks and recalculate their readiness and deadlines
* NOT REENTRANT, should be called with scheduler_dp_lock()
* NOT REENTRANT, called with scheduler_dp_lock() held
*/
void scheduler_dp_recalculate(struct scheduler_dp_data *dp_sch, bool is_ll_post_run)
{
Expand Down Expand Up @@ -387,7 +384,6 @@ void dp_thread_fn(void *p1, void *p2, void *p3)
*/
void scheduler_dp_domain_free(struct processing_module *pmod)
{
#if CONFIG_USERSPACE
unsigned int core = pmod->dev->task->core;

llext_manager_rm_domain(pmod->dev->ipc_config.id, dp_mdom + core);
Expand All @@ -396,9 +392,9 @@ void scheduler_dp_domain_free(struct processing_module *pmod)

k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_HEAP);
k_mem_domain_remove_partition(dp_mdom + core, pdata->mpart + SOF_DP_PART_CFG);
#endif
}

/* Called only in IPC context */
int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid,
const struct task_ops *ops, struct processing_module *mod,
uint16_t core, size_t stack_size, uint32_t options)
Expand Down Expand Up @@ -455,30 +451,22 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid,

struct task_dp_pdata *pdata = &task_memory->pdata;

/* Point to event_struct event for kernel threads synchronization */
/* It will be overwritten for K_USER threads to dynamic ones. */
pdata->sem = &pdata->sem_struct;
pdata->thread = &pdata->thread_struct;
pdata->flat = &task_memory->flat;

#ifdef CONFIG_USERSPACE
if (options & K_USER) {
pdata->sem = k_object_alloc(K_OBJ_SEM);
if (!pdata->sem) {
tr_err(&dp_tr, "Event object allocation failed");
ret = -ENOMEM;
goto e_stack;
}
pdata->sem = k_object_alloc(K_OBJ_SEM);
if (!pdata->sem) {
tr_err(&dp_tr, "Event object allocation failed");
ret = -ENOMEM;
goto e_stack;
}

pdata->thread = k_object_alloc(K_OBJ_THREAD);
if (!pdata->thread) {
tr_err(&dp_tr, "Thread object allocation failed");
ret = -ENOMEM;
goto e_kobj;
}
memset(&pdata->thread->arch, 0, sizeof(pdata->thread->arch));
pdata->thread = k_object_alloc(K_OBJ_THREAD);
if (!pdata->thread) {
tr_err(&dp_tr, "Thread object allocation failed");
ret = -ENOMEM;
goto e_kobj;
}
#endif /* CONFIG_USERSPACE */
memset(&pdata->thread->arch, 0, sizeof(pdata->thread->arch));

/* success, fill the structures */
pdata->p_stack = p_stack;
Expand All @@ -503,7 +491,6 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid,
goto e_thread;
}

#if CONFIG_USERSPACE
k_thread_access_grant(pdata->thread_id, pdata->sem, &dp_sync[core]);
scheduler_dp_grant(pdata->thread_id, core);

Expand Down Expand Up @@ -546,26 +533,21 @@ int scheduler_dp_task_init(struct task **task, const struct sof_uuid_entry *uid,
tr_err(&dp_tr, "failed to add thread to domain %d", ret);
goto e_dom;
}
#endif /* CONFIG_USERSPACE */

/* start the thread, it should immediately stop at the semaphore */
k_sem_init(pdata->sem, 0, 1);
k_thread_start(pdata->thread_id);

return 0;

#ifdef CONFIG_USERSPACE
e_dom:
scheduler_dp_domain_free(mod);
#endif
e_thread:
k_thread_abort(pdata->thread_id);
#ifdef CONFIG_USERSPACE
e_kobj:
/* k_object_free looks for a pointer in the list, any invalid value can be passed */
k_object_free(pdata->thread);
k_object_free(pdata->sem);
#endif
e_stack:
user_stack_free(p_stack);
e_tmem:
Expand Down
Loading