From 692867920d16dcb8f46430fb7b955b530147e7c4 Mon Sep 17 00:00:00 2001 From: Keyon Jie Date: Tue, 12 Oct 2021 19:10:37 +0800 Subject: [PATCH 1/4] scheduler: add scheduler free support To power down a secondary core safely, we need to unregister and free the schedulers that run on the corresponding core cleanly, which will help to create clean one on the next powering up to the core. Move the scheduler data allocation to SYSTEM_RUNTIME zone, and free it at core powering down. Signed-off-by: Keyon Jie --- src/include/sof/schedule/schedule.h | 8 +++++++- src/schedule/edf_schedule.c | 8 +++++++- src/schedule/ll_schedule.c | 14 ++++++++++---- src/schedule/schedule.c | 30 +++++++++++++++++++++++++++-- 4 files changed, 52 insertions(+), 8 deletions(-) diff --git a/src/include/sof/schedule/schedule.h b/src/include/sof/schedule/schedule.h index 17098cf61b99..1f106625a9d6 100644 --- a/src/include/sof/schedule/schedule.h +++ b/src/include/sof/schedule/schedule.h @@ -272,7 +272,7 @@ static inline int schedule_task_free(struct task *task) return -ENODEV; } -/** See scheduler_ops::scheduler_free */ +/** See scheduler_ops::scheduler_free, free all schedulers belong to the core */ static inline void schedule_free(void) { struct schedulers *schedulers = *arch_schedulers_get(); @@ -311,6 +311,12 @@ int schedule_task_init(struct task *task, */ void scheduler_init(int type, const struct scheduler_ops *ops, void *data); +/** + * freeing generic resource of a scheduler + * @param data Scheduler's private data. + */ +void scheduler_free(void *data); + /** @}*/ #endif /* __SOF_SCHEDULE_SCHEDULE_H__ */ diff --git a/src/schedule/edf_schedule.c b/src/schedule/edf_schedule.c index 14d552e123f4..a0bed91d4375 100644 --- a/src/schedule/edf_schedule.c +++ b/src/schedule/edf_schedule.c @@ -259,7 +259,7 @@ int scheduler_init_edf(void) tr_info(&edf_tr, "edf_scheduler_init()"); - edf_sch = rzalloc(SOF_MEM_ZONE_SYS, 0, SOF_MEM_CAPS_RAM, + edf_sch = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(*edf_sch)); list_init(&edf_sch->list); edf_sch->clock = PLATFORM_DEFAULT_CLOCK; @@ -295,6 +295,12 @@ static void scheduler_free_edf(void *data) /* free main task context */ task_main_free(); + /* free the generic scheduler resource */ + scheduler_free(edf_sch); + + /* free edf_schedule_data */ + rfree(edf_sch); + irq_local_enable(flags); } diff --git a/src/schedule/ll_schedule.c b/src/schedule/ll_schedule.c index a650cd065cea..c82d5798b340 100644 --- a/src/schedule/ll_schedule.c +++ b/src/schedule/ll_schedule.c @@ -573,13 +573,19 @@ static int reschedule_ll_task(void *data, struct task *task, uint64_t start) static void scheduler_free_ll(void *data) { - struct ll_schedule_data *sch = data; + struct ll_schedule_data *ll_sch = data; uint32_t flags; irq_local_disable(flags); - notifier_unregister(sch, NULL, - NOTIFIER_CLK_CHANGE_ID(sch->domain->clk)); + notifier_unregister(ll_sch, NULL, + NOTIFIER_CLK_CHANGE_ID(ll_sch->domain->clk)); + + /* free the generic scheduler resource */ + scheduler_free(ll_sch); + + /* free ll_schedule_data */ + rfree(ll_sch); irq_local_enable(flags); } @@ -626,7 +632,7 @@ int scheduler_init_ll(struct ll_schedule_domain *domain) struct ll_schedule_data *sch; /* initialize scheduler private data */ - sch = rzalloc(SOF_MEM_ZONE_SYS, 0, SOF_MEM_CAPS_RAM, sizeof(*sch)); + sch = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(*sch)); list_init(&sch->tasks); atomic_init(&sch->num_tasks, 0); sch->domain = domain; diff --git a/src/schedule/schedule.c b/src/schedule/schedule.c index 64a4d74d00fe..a81470622839 100644 --- a/src/schedule/schedule.c +++ b/src/schedule/schedule.c @@ -49,7 +49,7 @@ static void scheduler_register(struct schedule_data *scheduler) if (!*sch) { /* init schedulers list */ - *sch = rzalloc(SOF_MEM_ZONE_SYS, 0, SOF_MEM_CAPS_RAM, + *sch = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(**sch)); list_init(&(*sch)->list); } @@ -57,11 +57,20 @@ static void scheduler_register(struct schedule_data *scheduler) list_item_append(&scheduler->list, &(*sch)->list); } +static void scheduler_unregister(struct schedule_data *scheduler) +{ + struct schedulers **sch = arch_schedulers_get(); + + list_item_del(&scheduler->list); + if (list_is_empty(&(*sch)->list)) + rfree(*sch); +} + void scheduler_init(int type, const struct scheduler_ops *ops, void *data) { struct schedule_data *sch; - sch = rzalloc(SOF_MEM_ZONE_SYS, 0, SOF_MEM_CAPS_RAM, sizeof(*sch)); + sch = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(*sch)); list_init(&sch->list); sch->type = type; sch->ops = ops; @@ -69,3 +78,20 @@ void scheduler_init(int type, const struct scheduler_ops *ops, void *data) scheduler_register(sch); } + +void scheduler_free(void *data) +{ + struct schedulers **schedulers = arch_schedulers_get(); + struct list_item *slist; + struct schedule_data *sch; + + list_for_item(slist, &(*schedulers)->list) { + sch = container_of(slist, struct schedule_data, list); + if (sch->data == data) { + /* found the scheduler, free it */ + scheduler_unregister(sch); + rfree(sch); + break; + } + } +} From 046992f53b41a9d633ab968c50c196454765b307 Mon Sep 17 00:00:00 2001 From: Keyon Jie Date: Tue, 19 Oct 2021 17:57:23 +0800 Subject: [PATCH 2/4] testbench: scheduler: add scheduler free support Add scheuler free support to align to multi-core cAVS platform implementation. We don't need explicit freeing in tb_pipeline_free() anymore. Signed-off-by: Keyon Jie --- src/platform/library/schedule/schedule.c | 27 ++++++++++++++++++++++++ tools/testbench/common_test.c | 9 -------- 2 files changed, 27 insertions(+), 9 deletions(-) diff --git a/src/platform/library/schedule/schedule.c b/src/platform/library/schedule/schedule.c index 92000b5355cb..e99ec3b6ea6f 100644 --- a/src/platform/library/schedule/schedule.c +++ b/src/platform/library/schedule/schedule.c @@ -53,6 +53,16 @@ static void scheduler_register(struct schedule_data *scheduler) list_item_append(&scheduler->list, &(*sch)->list); } +static void scheduler_unregister(struct schedule_data *scheduler) +{ + struct schedulers **sch = arch_schedulers_get(); + + list_item_del(&scheduler->list); + + if (list_is_empty(&(*sch)->list)) + free(*sch); +} + void scheduler_init(int type, const struct scheduler_ops *ops, void *data) { struct schedule_data *sch; @@ -65,3 +75,20 @@ void scheduler_init(int type, const struct scheduler_ops *ops, void *data) scheduler_register(sch); } + +void scheduler_free(void *data) +{ + struct schedulers **schedulers = arch_schedulers_get(); + struct list_item *slist; + struct schedule_data *sch; + + list_for_item(slist, &(*schedulers)->list) { + sch = container_of(slist, struct schedule_data, list); + if (sch->data == data) { + /* found the scheduler, free it */ + scheduler_unregister(sch); + free(sch); + break; + } + } +} diff --git a/tools/testbench/common_test.c b/tools/testbench/common_test.c index 2e813dd54be3..44358f6be576 100644 --- a/tools/testbench/common_test.c +++ b/tools/testbench/common_test.c @@ -62,9 +62,6 @@ struct ipc_data { void tb_pipeline_free(struct sof *sof) { - struct schedule_data *sch; - struct schedulers **schedulers; - struct list_item *slist, *_slist; struct notify **notify = arch_notify_get(); struct ipc_data *iipc; @@ -73,12 +70,6 @@ void tb_pipeline_free(struct sof *sof) /* free all scheduler data */ schedule_free(); - schedulers = arch_schedulers_get(); - list_for_item_safe(slist, _slist, &(*schedulers)->list) { - sch = container_of(slist, struct schedule_data, list); - free(sch); - } - free(*arch_schedulers_get()); /* free IPC data */ iipc = sof->ipc->private; From 5fc2b97a3e3ad2b7aac73777bf88bcfa3bcb61fb Mon Sep 17 00:00:00 2001 From: Keyon Jie Date: Tue, 19 Oct 2021 17:36:47 +0800 Subject: [PATCH 3/4] idc: add idc free support To power down a secondary core gracefully, we need to free the IDC related memory used on the corresponding core cleanly, which will help to create clean one on the next powering up to the core. Signed-off-by: Keyon Jie --- src/drivers/intel/cavs/idc.c | 4 +--- src/idc/idc.c | 19 ++++++++++++++++++- src/include/sof/drivers/idc.h | 2 +- .../intel/cavs/include/cavs/drivers/idc.h | 4 ++++ 4 files changed, 24 insertions(+), 5 deletions(-) diff --git a/src/drivers/intel/cavs/idc.c b/src/drivers/intel/cavs/idc.c index e34535d7b776..725024f5c466 100644 --- a/src/drivers/intel/cavs/idc.c +++ b/src/drivers/intel/cavs/idc.c @@ -246,7 +246,7 @@ int platform_idc_init(void) /** * \brief Frees IDC data and unregisters interrupt. */ -void idc_free(void) +void platform_idc_free(void) { struct idc *idc = *idc_get(); int core = cpu_get_id(); @@ -265,6 +265,4 @@ void idc_free(void) if (idctfc & IPC_IDCTFC_BUSY) idc_write(IPC_IDCTFC(i), core, idctfc); } - - schedule_task_free(&idc->idc_task); } diff --git a/src/idc/idc.c b/src/idc/idc.c index 31477f88fa3d..b0bbdad01f2c 100644 --- a/src/idc/idc.c +++ b/src/idc/idc.c @@ -321,7 +321,7 @@ int idc_init(void) tr_info(&idc_tr, "idc_init()"); /* initialize idc data */ - *idc = rzalloc(SOF_MEM_ZONE_SYS, 0, SOF_MEM_CAPS_RAM, sizeof(**idc)); + *idc = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(**idc)); (*idc)->payload = cache_to_uncache((struct idc_payload *)static_payload); /* process task */ @@ -336,3 +336,20 @@ int idc_init(void) return 0; #endif } + +/** + * \brief Frees IDC data for current core. + */ +void idc_free(void) +{ + struct idc **idc = idc_get(); + + tr_info(&idc_tr, "idc_free()"); + +#ifndef __ZEPHYR__ + platform_idc_free(); + schedule_task_free(&(*idc)->idc_task); +#endif + + rfree(*idc); +} diff --git a/src/include/sof/drivers/idc.h b/src/include/sof/drivers/idc.h index 2074a7049808..db0b867e8e19 100644 --- a/src/include/sof/drivers/idc.h +++ b/src/include/sof/drivers/idc.h @@ -128,7 +128,7 @@ static inline struct idc_payload *idc_payload_get(struct idc *idc, void idc_enable_interrupts(int target_core, int source_core); -void idc_free(void); +void platform_idc_free(void); int platform_idc_init(void); diff --git a/src/platform/intel/cavs/include/cavs/drivers/idc.h b/src/platform/intel/cavs/include/cavs/drivers/idc.h index 9242450bdcfc..7d1c262588fc 100644 --- a/src/platform/intel/cavs/include/cavs/drivers/idc.h +++ b/src/platform/intel/cavs/include/cavs/drivers/idc.h @@ -20,12 +20,16 @@ int idc_send_msg(struct idc_msg *msg, uint32_t mode); int idc_init(void); +void idc_free(void); + #else static inline int idc_send_msg(struct idc_msg *msg, uint32_t mode) { return 0; } static inline int idc_init(void) { return 0; } +static inline int idc_free(void) { return 0; } + #endif #endif /* __CAVS_DRIVERS_IDC_H__ */ From 485653cfb814887a087fcb5094cceebada8fb31f Mon Sep 17 00:00:00 2001 From: Keyon Jie Date: Thu, 21 Oct 2021 16:15:30 +0800 Subject: [PATCH 4/4] notifier: add notifier free support To power down a secondary core gracefully, we need to free the notifier related memory used on the corresponding core cleanly, which will help to create clean one on the next powering up to the core. Signed-off-by: Keyon Jie --- src/lib/notifier.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/lib/notifier.c b/src/lib/notifier.c index e8182115aeef..48580d96963a 100644 --- a/src/lib/notifier.c +++ b/src/lib/notifier.c @@ -189,7 +189,7 @@ void init_system_notify(struct sof *sof) { struct notify **notify = arch_notify_get(); int i; - *notify = rzalloc(SOF_MEM_ZONE_SYS, 0, SOF_MEM_CAPS_RAM, + *notify = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(**notify)); spinlock_init(&(*notify)->lock); @@ -203,4 +203,7 @@ void init_system_notify(struct sof *sof) void free_system_notify(void) { + struct notify **notify = arch_notify_get(); + + rfree(*notify); }