From c7a23e208ec854a111b54959cabb86ca3927aec5 Mon Sep 17 00:00:00 2001 From: Adrian Bonislawski Date: Tue, 21 Sep 2021 07:10:14 +0200 Subject: [PATCH 1/8] platform: lps_wait_for_interrupt entry core check LPS functionality is limited to the primary core Signed-off-by: Adrian Bonislawski Signed-off-by: Bartosz Kokoszko --- src/platform/intel/cavs/platform.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/platform/intel/cavs/platform.c b/src/platform/intel/cavs/platform.c index 66093ac2ab43..40f6de51d339 100644 --- a/src/platform/intel/cavs/platform.c +++ b/src/platform/intel/cavs/platform.c @@ -539,7 +539,8 @@ void platform_wait_for_interrupt(int level) platform_clock_on_waiti(); #if (CONFIG_CAVS_LPS) - if (pm_runtime_is_active(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID)) + if (pm_runtime_is_active(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID) || + cpu_get_id() != PLATFORM_PRIMARY_CORE_ID) arch_wait_for_interrupt(level); else lps_wait_for_interrupt(level); From 0452b85142450598cec0d271355003e5bb5833ce Mon Sep 17 00:00:00 2001 From: Adrian Bonislawski Date: Tue, 21 Sep 2021 08:02:26 +0200 Subject: [PATCH 2/8] lps: restore secondary cores on wake up This will add missing secondary cores restore on wake up, simple pm_runtime_get(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID) in lps is not enough in multicore scenario Signed-off-by: Adrian Bonislawski --- src/arch/host/include/arch/lib/cpu.h | 5 +++++ src/arch/xtensa/include/arch/lib/cpu.h | 4 ++++ src/arch/xtensa/lib/cpu.c | 25 +++++++++++++++++++++++++ src/include/sof/lib/cpu.h | 5 +++++ src/ipc/ipc3/handler.c | 8 ++++++-- zephyr/wrapper.c | 6 ++++++ 6 files changed, 51 insertions(+), 2 deletions(-) diff --git a/src/arch/host/include/arch/lib/cpu.h b/src/arch/host/include/arch/lib/cpu.h index c6e9ee74693b..6e1dedc4b458 100644 --- a/src/arch/host/include/arch/lib/cpu.h +++ b/src/arch/host/include/arch/lib/cpu.h @@ -34,6 +34,11 @@ static inline int arch_cpu_get_id(void) return 0; } +static inline int arch_cpu_restore_secondary_cores(void) +{ + return 0; +} + static inline void cpu_write_threadptr(int threadptr) { } diff --git a/src/arch/xtensa/include/arch/lib/cpu.h b/src/arch/xtensa/include/arch/lib/cpu.h index 6e52d3fdc2c8..a87645d35393 100644 --- a/src/arch/xtensa/include/arch/lib/cpu.h +++ b/src/arch/xtensa/include/arch/lib/cpu.h @@ -26,6 +26,8 @@ int arch_cpu_is_core_enabled(int id); int arch_cpu_enabled_cores(void); +int arch_cpu_restore_secondary_cores(void); + #else static inline int arch_cpu_enable_core(int id) { return 0; } @@ -36,6 +38,8 @@ static inline int arch_cpu_is_core_enabled(int id) { return 1; } static inline int arch_cpu_enabled_cores(void) { return 1; } +static inline int arch_cpu_restore_secondary_cores(void) {return 0; } + #endif static inline int arch_cpu_get_id(void) diff --git a/src/arch/xtensa/lib/cpu.c b/src/arch/xtensa/lib/cpu.c index dcbe811f2476..fc2f9458d0ff 100644 --- a/src/arch/xtensa/lib/cpu.c +++ b/src/arch/xtensa/lib/cpu.c @@ -189,3 +189,28 @@ void cpu_power_down_core(void) while (1) arch_wait_for_interrupt(0); } + +int arch_cpu_restore_secondary_cores(void) +{ + struct idc_msg power_up = { IDC_MSG_POWER_UP, IDC_MSG_POWER_UP_EXT }; + int ret, id; + + for (id = 0; id < CONFIG_CORE_COUNT; id++) { + if (arch_cpu_is_core_enabled(id) && id != PLATFORM_PRIMARY_CORE_ID) { + power_up.core = id; + + /* Power up secondary core */ + pm_runtime_get(PM_RUNTIME_DSP, id); + + /* enable IDC interrupt for the secondary core */ + idc_enable_interrupts(id, cpu_get_id()); + + /* send IDC power up message */ + ret = idc_send_msg(&power_up, IDC_POWER_UP); + if (ret < 0) + return ret; + } + } + + return 0; +} diff --git a/src/include/sof/lib/cpu.h b/src/include/sof/lib/cpu.h index 21df5fa06141..7138236e9223 100644 --- a/src/include/sof/lib/cpu.h +++ b/src/include/sof/lib/cpu.h @@ -56,6 +56,11 @@ static inline int cpu_enabled_cores(void) return arch_cpu_enabled_cores(); } +static inline int cpu_restore_secondary_cores(void) +{ + return arch_cpu_restore_secondary_cores(); +} + #endif #endif /* __SOF_LIB_CPU_H__ */ diff --git a/src/ipc/ipc3/handler.c b/src/ipc/ipc3/handler.c index a87dd30ef9d7..4020cc306502 100644 --- a/src/ipc/ipc3/handler.c +++ b/src/ipc/ipc3/handler.c @@ -701,10 +701,14 @@ static int ipc_pm_gate(uint32_t header) if (pm_gate.flags & SOF_PM_NO_TRACE) trace_off(); - if (pm_gate.flags & SOF_PM_PPG) + if (pm_gate.flags & SOF_PM_PPG) { pm_runtime_disable(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID); - else +#if (CONFIG_CAVS_LPS) + cpu_restore_secondary_cores(); +#endif + } else { pm_runtime_enable(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID); + } /* resume dma trace if needed */ if (!(pm_gate.flags & SOF_PM_NO_TRACE)) diff --git a/zephyr/wrapper.c b/zephyr/wrapper.c index 1bbe36fd025a..bbf169d36645 100644 --- a/zephyr/wrapper.c +++ b/zephyr/wrapper.c @@ -758,6 +758,12 @@ int arch_cpu_enable_core(int id) return 0; } +int arch_cpu_restore_secondary_cores(void) +{ + /* TODO: use Zephyr version */ + return 0; +} + void arch_cpu_disable_core(int id) { /* TODO: call Zephyr API */ From b5079871fa4e711c9aca1eebf84cdf379a16308a Mon Sep 17 00:00:00 2001 From: Adrian Bonislawski Date: Tue, 21 Sep 2021 08:37:09 +0200 Subject: [PATCH 3/8] idc: add idc_restore Restores idc interrupt if all is ready Signed-off-by: Adrian Bonislawski --- src/drivers/intel/cavs/idc.c | 33 +++++++++++++++++++ src/idc/idc.c | 20 +++++++++++ src/include/sof/drivers/idc.h | 2 ++ .../intel/cavs/include/cavs/drivers/idc.h | 2 ++ .../library/include/platform/drivers/idc.h | 5 +++ 5 files changed, 62 insertions(+) diff --git a/src/drivers/intel/cavs/idc.c b/src/drivers/intel/cavs/idc.c index e34535d7b776..72f7b0d65827 100644 --- a/src/drivers/intel/cavs/idc.c +++ b/src/drivers/intel/cavs/idc.c @@ -243,6 +243,39 @@ int platform_idc_init(void) return 0; } +/** + * \brief Restores IDC interrupt. During D0->D0ix/D0ix->D0 flow primary core + * disables all secondary cores - this is not cold boot process, because + * memory has not been powered off. In that case, we should only enable + * idc interrupts, because all required structures alreade exist. + */ +int platform_idc_restore(void) +{ + struct idc *idc = *idc_get(); + int core = cpu_get_id(); + int ret; + + idc->irq = interrupt_get_irq(PLATFORM_IDC_INTERRUPT, + PLATFORM_IDC_INTERRUPT_NAME); + if (idc->irq < 0) { + tr_err(&idc_tr, "platform_idc_restore(): getting irq failed."); + return idc->irq; + } + + ret = interrupt_register(idc->irq, idc_irq_handler, idc); + if (ret < 0) { + tr_err(&idc_tr, "platform_idc_restore(): registering irq failed."); + return ret; + } + + interrupt_enable(idc->irq, idc); + + /* enable BUSY interrupt */ + idc_write(IPC_IDCCTL, core, idc->busy_bit_mask); + + return 0; +} + /** * \brief Frees IDC data and unregisters interrupt. */ diff --git a/src/idc/idc.c b/src/idc/idc.c index 31477f88fa3d..70509417dfd1 100644 --- a/src/idc/idc.c +++ b/src/idc/idc.c @@ -336,3 +336,23 @@ int idc_init(void) return 0; #endif } + +int idc_restore(void) +{ + struct idc **idc = idc_get(); + + tr_info(&idc_tr, "idc_restore()"); + + /* idc_restore() is invoked during D0->D0ix/D0ix->D0 flow. In that + * case basic core structures e.g. idc struct should be already + * allocated (in D0->D0ix primary core disables all secondary cores, but + * memory has not been powered off). + */ + assert(*idc); + +#ifndef __ZEPHYR__ + return platform_idc_restore(); +#endif + + return 0; +} diff --git a/src/include/sof/drivers/idc.h b/src/include/sof/drivers/idc.h index 2074a7049808..a1481ac610d9 100644 --- a/src/include/sof/drivers/idc.h +++ b/src/include/sof/drivers/idc.h @@ -132,6 +132,8 @@ void idc_free(void); int platform_idc_init(void); +int platform_idc_restore(void); + enum task_state idc_do_cmd(void *data); void idc_cmd(struct idc_msg *msg); diff --git a/src/platform/intel/cavs/include/cavs/drivers/idc.h b/src/platform/intel/cavs/include/cavs/drivers/idc.h index 9242450bdcfc..11d30d7f446b 100644 --- a/src/platform/intel/cavs/include/cavs/drivers/idc.h +++ b/src/platform/intel/cavs/include/cavs/drivers/idc.h @@ -20,6 +20,8 @@ int idc_send_msg(struct idc_msg *msg, uint32_t mode); int idc_init(void); +int idc_restore(void); + #else static inline int idc_send_msg(struct idc_msg *msg, uint32_t mode) { return 0; } diff --git a/src/platform/library/include/platform/drivers/idc.h b/src/platform/library/include/platform/drivers/idc.h index 9732796cbdcb..977b4c008dac 100644 --- a/src/platform/library/include/platform/drivers/idc.h +++ b/src/platform/library/include/platform/drivers/idc.h @@ -28,6 +28,11 @@ static inline int idc_init(void) return 0; } +static inline int idc_restore(void) +{ + return 0; +} + #endif /* __PLATFORM_DRIVERS_IDC_H__ */ #else From 28c23a4e6bddc27b50de0486b8aa7a081ebeacb6 Mon Sep 17 00:00:00 2001 From: Adrian Bonislawski Date: Tue, 21 Sep 2021 08:56:17 +0200 Subject: [PATCH 4/8] schedule: add possibility to restore all schedulers EDF scheduler restores interrupt Signed-off-by: Adrian Bonislawski --- src/include/sof/schedule/schedule.h | 27 +++++++++++++++++++++++++++ src/schedule/edf_schedule.c | 24 ++++++++++++++++++++++++ src/schedule/ll_schedule.c | 1 + 3 files changed, 52 insertions(+) diff --git a/src/include/sof/schedule/schedule.h b/src/include/sof/schedule/schedule.h index df214043dde4..029aa5288fa2 100644 --- a/src/include/sof/schedule/schedule.h +++ b/src/include/sof/schedule/schedule.h @@ -117,6 +117,15 @@ struct scheduler_ops { * This operation is optional. */ void (*scheduler_free)(void *data); + + /** + * Restores scheduler's resources. + * @param data Private data of selected scheduler. + * @return 0 if succeeded, error code otherwise. + * + * This operation is optional. + */ + int (*scheduler_restore)(void *data); }; /** \brief Holds information about scheduler. */ @@ -290,6 +299,24 @@ static inline void schedule_free(void) } } +/** See scheduler_ops::scheduler_restore */ +static inline int schedulers_restore(void) +{ + struct schedulers *schedulers = *arch_schedulers_get(); + struct schedule_data *sch; + struct list_item *slist; + + assert(schedulers); + + list_for_item(slist, &schedulers->list) { + sch = container_of(slist, struct schedule_data, list); + if (sch->ops->scheduler_restore) + return sch->ops->scheduler_restore(sch->data); + } + + return 0; +} + /** * Initializes scheduling task. * @param task Task to be initialized. diff --git a/src/schedule/edf_schedule.c b/src/schedule/edf_schedule.c index a0a3b46ebedf..d0ecfd8e93d5 100644 --- a/src/schedule/edf_schedule.c +++ b/src/schedule/edf_schedule.c @@ -298,6 +298,29 @@ static void scheduler_free_edf(void *data) irq_local_enable(flags); } +static int scheduler_restore_edf(void *data) +{ + struct edf_schedule_data *edf_sch = data; + uint32_t flags; + + irq_local_disable(flags); + + edf_sch->irq = interrupt_get_irq(PLATFORM_SCHEDULE_IRQ, + PLATFORM_SCHEDULE_IRQ_NAME); + + if (edf_sch->irq < 0) { + tr_err(&edf_tr, "scheduler_restore_edf(): getting irq failed."); + return edf_sch->irq; + } + + interrupt_register(edf_sch->irq, edf_scheduler_run, edf_sch); + interrupt_enable(edf_sch->irq, edf_sch); + + irq_local_enable(flags); + + return 0; +} + static void schedule_edf(struct edf_schedule_data *edf_sch) { interrupt_set(edf_sch->irq); @@ -311,4 +334,5 @@ static const struct scheduler_ops schedule_edf_ops = { .schedule_task_cancel = schedule_edf_task_cancel, .schedule_task_free = schedule_edf_task_free, .scheduler_free = scheduler_free_edf, + .scheduler_restore = scheduler_restore_edf, }; diff --git a/src/schedule/ll_schedule.c b/src/schedule/ll_schedule.c index 0a19d3d7e32b..ed7c4f7e4a75 100644 --- a/src/schedule/ll_schedule.c +++ b/src/schedule/ll_schedule.c @@ -646,6 +646,7 @@ static const struct scheduler_ops schedule_ll_ops = { .schedule_task_cancel = schedule_ll_task_cancel, .reschedule_task = reschedule_ll_task, .scheduler_free = scheduler_free_ll, + .scheduler_restore = NULL, .schedule_task_running = NULL, .schedule_task_complete = NULL, }; From 50b8d5af084f62bd086764f01a7c49395f93a9cb Mon Sep 17 00:00:00 2001 From: Adrian Bonislawski Date: Tue, 21 Sep 2021 09:48:57 +0200 Subject: [PATCH 5/8] init: secondary_core_restore possibility on boot In some multicore scenarios dsp can try to restore secondary core state if all memory is still powered up and there was no external request for a core shutdown before For example multicore case with d0ix sleep where secondary core will be put to sleep along with primary core and after wakeup there will be a possibility to restore it Signed-off-by: Adrian Bonislawski Signed-off-by: Bartosz Kokoszko --- src/init/init.c | 70 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 70 insertions(+) diff --git a/src/init/init.c b/src/init/init.c index eb379746129f..591920ebebf9 100644 --- a/src/init/init.c +++ b/src/init/init.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -70,6 +71,66 @@ static inline void lp_sram_unpack(void) #if CONFIG_MULTICORE +#ifndef __ZEPHYR__ + +static int check_restore(void) +{ + struct idc *idc = *idc_get(); + struct task *task = *task_main_get(); + struct notify *notifier = *arch_notify_get(); + struct schedulers *schedulers = *arch_schedulers_get(); + + /* check whether basic core structures has been already allocated. If they + * are available in memory, it means that this is not cold boot and memory + * has not been powered off. + */ + if (!idc || !task || !notifier || !schedulers) + return 0; + + return 1; +} + +static int secondary_core_restore(void) +{ + int err; + + trace_point(TRACE_BOOT_PLATFORM_IRQ); + + /* initialize interrupts */ + platform_interrupt_init(); + + /* As the memory was not turned of in D0->D0ix and basic structures are + * already allocated, in restore process (D0ix->D0) we have only to + * register and enable required interrupts (it is done in + * schedulers_restore() and idc_restore()). + */ + + /* restore schedulers i.e. register and enable scheduler interrupts */ + trace_point(TRACE_BOOT_PLATFORM_SCHED); + err = schedulers_restore(); + if (err < 0) + return err; + + /* restore idc i.e. register and enable idc interrupts */ + trace_point(TRACE_BOOT_PLATFORM_IDC); + err = idc_restore(); + if (err < 0) + return err; + + trace_point(TRACE_BOOT_PLATFORM); + + /* In restore case (D0ix->D0 flow) we do not have to invoke here + * schedule_task(*task_main_get(), 0, UINT64_MAX) as it is done in + * cold boot process (see end of secondary_core_init() function), + * because in restore case memory has not been powered off and task_main + * is already added into scheduler list. + */ + while (1) + wait_for_interrupt(0); +} + +#endif + int secondary_core_init(struct sof *sof) { int err; @@ -80,6 +141,15 @@ int secondary_core_init(struct sof *sof) err = arch_init(); if (err < 0) panic(SOF_IPC_PANIC_ARCH); + + /* check whether we are in a cold boot process or not (e.g. D0->D0ix + * flow when primary core disables all secondary cores). If not, we do + * not have allocate basic structures like e.g. schedulers, notifier, + * because they have been already allocated. In that case we have to + * register and enable required interrupts. + */ + if (check_restore()) + return secondary_core_restore(); #endif trace_point(TRACE_BOOT_SYS_NOTIFIER); From cd59a289160ebb0ffd3aaf1996308c10dcb71c5f Mon Sep 17 00:00:00 2001 From: Bartosz Kokoszko Date: Wed, 20 Oct 2021 16:25:10 +0200 Subject: [PATCH 6/8] idc: allow idc_free() to disable only irqs This commit: - adds flags argument to idc_free() function; - adds IDC_FREE_IRQ_ONLY flags, which allows idc_free() to disable only interrupts. Signed-off-by: Bartosz Kokoszko --- src/arch/xtensa/lib/cpu.c | 2 +- src/drivers/intel/cavs/idc.c | 5 ++++- src/include/sof/drivers/idc.h | 5 ++++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/src/arch/xtensa/lib/cpu.c b/src/arch/xtensa/lib/cpu.c index fc2f9458d0ff..b465dd5d0043 100644 --- a/src/arch/xtensa/lib/cpu.c +++ b/src/arch/xtensa/lib/cpu.c @@ -164,7 +164,7 @@ void cpu_power_down_core(void) { arch_interrupt_global_disable(); - idc_free(); + idc_free(0); schedule_free(); diff --git a/src/drivers/intel/cavs/idc.c b/src/drivers/intel/cavs/idc.c index 72f7b0d65827..cea98740f3f5 100644 --- a/src/drivers/intel/cavs/idc.c +++ b/src/drivers/intel/cavs/idc.c @@ -279,7 +279,7 @@ int platform_idc_restore(void) /** * \brief Frees IDC data and unregisters interrupt. */ -void idc_free(void) +void idc_free(uint32_t flags) { struct idc *idc = *idc_get(); int core = cpu_get_id(); @@ -299,5 +299,8 @@ void idc_free(void) idc_write(IPC_IDCTFC(i), core, idctfc); } + if (flags & IDC_FREE_IRQ_ONLY) + return; + schedule_task_free(&idc->idc_task); } diff --git a/src/include/sof/drivers/idc.h b/src/include/sof/drivers/idc.h index a1481ac610d9..800710c26e3e 100644 --- a/src/include/sof/drivers/idc.h +++ b/src/include/sof/drivers/idc.h @@ -94,6 +94,9 @@ /** \brief Max IDC message payload size in bytes. */ #define IDC_MAX_PAYLOAD_SIZE 96 +/** \brief IDC free function flags */ +#define IDC_FREE_IRQ_ONLY BIT(0) /**< disable only irqs */ + /** \brief IDC message payload. */ struct idc_payload { uint8_t data[IDC_MAX_PAYLOAD_SIZE]; @@ -128,7 +131,7 @@ static inline struct idc_payload *idc_payload_get(struct idc *idc, void idc_enable_interrupts(int target_core, int source_core); -void idc_free(void); +void idc_free(uint32_t flags); int platform_idc_init(void); From 7d559a3ad4a234cb2d7e0addc73afa3ed2e3f905 Mon Sep 17 00:00:00 2001 From: Bartosz Kokoszko Date: Wed, 20 Oct 2021 22:14:31 +0200 Subject: [PATCH 7/8] scheduler: allow scheduler_free functions to disable only irqs This commit: - adds flags argument into scheduler_free function pointer in scheduler_ops struct; - adds SOF_SCHEDULER_FREE_IRQ_ONLY flag, which indicates to disable only interrupts in scheduler_free() functions. Signed-off-by: Bartosz Kokoszko --- src/arch/xtensa/lib/cpu.c | 2 +- src/include/sof/schedule/schedule.h | 12 +++++++++--- src/platform/library/schedule/edf_schedule.c | 2 +- src/schedule/edf_schedule.c | 13 +++++++------ src/schedule/ll_schedule.c | 11 +++++++---- src/schedule/zephyr_ll.c | 2 +- tools/testbench/common_test.c | 2 +- 7 files changed, 27 insertions(+), 17 deletions(-) diff --git a/src/arch/xtensa/lib/cpu.c b/src/arch/xtensa/lib/cpu.c index b465dd5d0043..0072bfb46cae 100644 --- a/src/arch/xtensa/lib/cpu.c +++ b/src/arch/xtensa/lib/cpu.c @@ -166,7 +166,7 @@ void cpu_power_down_core(void) idc_free(0); - schedule_free(); + schedule_free(0); free_system_notify(); diff --git a/src/include/sof/schedule/schedule.h b/src/include/sof/schedule/schedule.h index 029aa5288fa2..96c8fec2ff24 100644 --- a/src/include/sof/schedule/schedule.h +++ b/src/include/sof/schedule/schedule.h @@ -35,6 +35,11 @@ enum { SOF_SCHEDULE_COUNT /**< indicates number of scheduler types */ }; +/** \brief Scheduler free available flags */ +#define SOF_SCHEDULER_FREE_IRQ_ONLY BIT(0) /**< Free function disables only + * interrupts + */ + /** * Scheduler operations. * @@ -112,11 +117,12 @@ struct scheduler_ops { /** * Frees scheduler's resources. * @param data Private data of selected scheduler. + * @param flags Function specific flags. * @return 0 if succeeded, error code otherwise. * * This operation is optional. */ - void (*scheduler_free)(void *data); + void (*scheduler_free)(void *data, uint32_t flags); /** * Restores scheduler's resources. @@ -286,7 +292,7 @@ static inline int schedule_task_free(struct task *task) } /** See scheduler_ops::scheduler_free */ -static inline void schedule_free(void) +static inline void schedule_free(uint32_t flags) { struct schedulers *schedulers = *arch_schedulers_get(); struct schedule_data *sch; @@ -295,7 +301,7 @@ static inline void schedule_free(void) list_for_item(slist, &schedulers->list) { sch = container_of(slist, struct schedule_data, list); if (sch->ops->scheduler_free) - sch->ops->scheduler_free(sch->data); + sch->ops->scheduler_free(sch->data, flags); } } diff --git a/src/platform/library/schedule/edf_schedule.c b/src/platform/library/schedule/edf_schedule.c index 07e0b1d3a5d2..fd6335673b7c 100644 --- a/src/platform/library/schedule/edf_schedule.c +++ b/src/platform/library/schedule/edf_schedule.c @@ -85,7 +85,7 @@ int scheduler_init_edf(void) return 0; } -static void edf_scheduler_free(void *data) +static void edf_scheduler_free(void *data, uint32_t flags) { free(data); } diff --git a/src/schedule/edf_schedule.c b/src/schedule/edf_schedule.c index d0ecfd8e93d5..9c83804fe217 100644 --- a/src/schedule/edf_schedule.c +++ b/src/schedule/edf_schedule.c @@ -281,21 +281,22 @@ int scheduler_init_edf(void) return 0; } -static void scheduler_free_edf(void *data) +static void scheduler_free_edf(void *data, uint32_t flags) { struct edf_schedule_data *edf_sch = data; - uint32_t flags; + uint32_t irq_flags; - irq_local_disable(flags); + irq_local_disable(irq_flags); /* disable and unregister EDF scheduler interrupt */ interrupt_disable(edf_sch->irq, edf_sch); interrupt_unregister(edf_sch->irq, edf_sch); - /* free main task context */ - task_main_free(); + if (!(flags & SOF_SCHEDULER_FREE_IRQ_ONLY)) + /* free main task context */ + task_main_free(); - irq_local_enable(flags); + irq_local_enable(irq_flags); } static int scheduler_restore_edf(void *data) diff --git a/src/schedule/ll_schedule.c b/src/schedule/ll_schedule.c index ed7c4f7e4a75..7af8c23f63e7 100644 --- a/src/schedule/ll_schedule.c +++ b/src/schedule/ll_schedule.c @@ -570,17 +570,20 @@ static int reschedule_ll_task(void *data, struct task *task, uint64_t start) return 0; } -static void scheduler_free_ll(void *data) +static void scheduler_free_ll(void *data, uint32_t flags) { struct ll_schedule_data *sch = data; - uint32_t flags; + uint32_t irq_flags; - irq_local_disable(flags); + if (flags & SOF_SCHEDULER_FREE_IRQ_ONLY) + return; + + irq_local_disable(irq_flags); notifier_unregister(sch, NULL, NOTIFIER_CLK_CHANGE_ID(sch->domain->clk)); - irq_local_enable(flags); + irq_local_enable(irq_flags); } static void ll_scheduler_recalculate_tasks(struct ll_schedule_data *sch, diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c index 59258d397cd1..f8335386bd7d 100644 --- a/src/schedule/zephyr_ll.c +++ b/src/schedule/zephyr_ll.c @@ -434,7 +434,7 @@ static int zephyr_ll_task_cancel(void *data, struct task *task) * be active, but other schedulers ignore them too... And we don't need to free * the scheduler data - it's allocated in the SYS zone. */ -static void zephyr_ll_scheduler_free(void *data) +static void zephyr_ll_scheduler_free(void *data, uint32_t flags) { struct zephyr_ll *sch = data; diff --git a/tools/testbench/common_test.c b/tools/testbench/common_test.c index 2e813dd54be3..6fc421b550fc 100644 --- a/tools/testbench/common_test.c +++ b/tools/testbench/common_test.c @@ -72,7 +72,7 @@ void tb_pipeline_free(struct sof *sof) free(*notify); /* free all scheduler data */ - schedule_free(); + schedule_free(0); schedulers = arch_schedulers_get(); list_for_item_safe(slist, _slist, &(*schedulers)->list) { sch = container_of(slist, struct schedule_data, list); From 5314d67a0c51092ae8058b6dc3c44bc80efa14aa Mon Sep 17 00:00:00 2001 From: Bartosz Kokoszko Date: Mon, 27 Sep 2021 11:18:57 +0200 Subject: [PATCH 8/8] pm_runtime: prepare secondary cores for power down in d0->d0ix flow Primary core during going to D0i3 state disables all another cores in platform_pg_int_handler() function. Before it secondary cores should be prepared (disable interrupts, perform writeback) in order to make proper restore flow after D0i3->D0 transition. This commit adds cpu_secondary_cores_prepare_d0ix() function in ipc_pm_gate() handler, which sends idc to secondary cores with information that they should perform preparation for power down in the next platform_wait_for_interrupt() invocation. In platform_wait_for_interrupt() there is cpu_power_down_core() invocation with CPU_POWER_DOWN_MEMORY_ON (performs writeback/invalidate, disables interrupts), when proper flag is set. Signed-off-by: Bartosz Kokoszko --- src/arch/host/include/arch/lib/cpu.h | 5 ++ src/arch/xtensa/include/arch/lib/cpu.h | 13 +++- src/arch/xtensa/lib/cpu.c | 61 ++++++++++++++++--- src/idc/idc.c | 16 ++++- src/include/sof/drivers/idc.h | 4 ++ src/include/sof/lib/cpu.h | 5 ++ src/ipc/ipc3/handler.c | 9 +++ .../include/platform/lib/pm_runtime.h | 6 ++ .../include/platform/lib/pm_runtime.h | 6 ++ .../icelake/include/platform/lib/pm_runtime.h | 6 ++ .../intel/cavs/include/cavs/lib/pm_runtime.h | 3 + src/platform/intel/cavs/lib/pm_runtime.c | 39 +++++++++++- src/platform/intel/cavs/platform.c | 12 ++++ .../include/platform/lib/pm_runtime.h | 6 ++ .../include/platform/lib/pm_runtime.h | 6 ++ zephyr/wrapper.c | 8 ++- 16 files changed, 191 insertions(+), 14 deletions(-) diff --git a/src/arch/host/include/arch/lib/cpu.h b/src/arch/host/include/arch/lib/cpu.h index 6e1dedc4b458..e2cfb76260b7 100644 --- a/src/arch/host/include/arch/lib/cpu.h +++ b/src/arch/host/include/arch/lib/cpu.h @@ -39,6 +39,11 @@ static inline int arch_cpu_restore_secondary_cores(void) return 0; } +static inline int arch_cpu_secondary_cores_prepare_d0ix(void) +{ + return 0; +} + static inline void cpu_write_threadptr(int threadptr) { } diff --git a/src/arch/xtensa/include/arch/lib/cpu.h b/src/arch/xtensa/include/arch/lib/cpu.h index a87645d35393..acd89a114f08 100644 --- a/src/arch/xtensa/include/arch/lib/cpu.h +++ b/src/arch/xtensa/include/arch/lib/cpu.h @@ -11,10 +11,17 @@ #define __ARCH_LIB_CPU_H__ #include +#include #if CONFIG_MULTICORE -void cpu_power_down_core(void); +/** \brief CPU power down available flags */ +#define CPU_POWER_DOWN_MEMORY_ON BIT(0) /**< Power down core with memory + * enabled (required in d0ix + * flow) + */ + +void cpu_power_down_core(uint32_t flags); void cpu_alloc_core_context(int id); @@ -28,6 +35,8 @@ int arch_cpu_enabled_cores(void); int arch_cpu_restore_secondary_cores(void); +int arch_cpu_secondary_cores_prepare_d0ix(void); + #else static inline int arch_cpu_enable_core(int id) { return 0; } @@ -40,6 +49,8 @@ static inline int arch_cpu_enabled_cores(void) { return 1; } static inline int arch_cpu_restore_secondary_cores(void) {return 0; } +static inline int arch_cpu_secondary_cores_prepare_d0ix(void) {return 0; } + #endif static inline int arch_cpu_get_id(void) diff --git a/src/arch/xtensa/lib/cpu.c b/src/arch/xtensa/lib/cpu.c index 0072bfb46cae..1802384fbedb 100644 --- a/src/arch/xtensa/lib/cpu.c +++ b/src/arch/xtensa/lib/cpu.c @@ -160,25 +160,44 @@ void cpu_alloc_core_context(int core) dcache_writeback_region(sof_get(), sizeof(*sof_get())); } -void cpu_power_down_core(void) +void cpu_power_down_core(uint32_t flags) { arch_interrupt_global_disable(); - idc_free(0); + /* Power down with memory on is performed by secondary cores during + * d0 -> d0ix before they are disabled by primary core. + */ + if (flags & CPU_POWER_DOWN_MEMORY_ON) { + /* disable idc interrupts */ + idc_free(IDC_FREE_IRQ_ONLY); + + /* disable scheduler interrupts */ + schedule_free(SOF_SCHEDULER_FREE_IRQ_ONLY); + + /* data writeback/invalidate */ + dcache_writeback_invalidate_all(); - schedule_free(0); + /* after writeback/invalidate secondary core is prepared for + * powered off - prepare_d0ix_core_mask flag can be disabled + */ + platform_pm_runtime_prepare_d0ix_dis(cpu_get_id()); + } else { + idc_free(0); - free_system_notify(); + schedule_free(0); - /* free entire sys heap, an instance dedicated for this core */ - free_heap(SOF_MEM_ZONE_SYS); + free_system_notify(); - dcache_writeback_invalidate_all(); + /* free entire sys heap, an instance dedicated for this core */ + free_heap(SOF_MEM_ZONE_SYS); - /* Turn off stack memory for core */ - pm_runtime_put(CORE_MEMORY_POW, cpu_get_id()); + dcache_writeback_invalidate_all(); - pm_runtime_put(PM_RUNTIME_DSP, PWRD_BY_TPLG | cpu_get_id()); + /* Turn off stack memory for core */ + pm_runtime_put(CORE_MEMORY_POW, cpu_get_id()); + + pm_runtime_put(PM_RUNTIME_DSP, PWRD_BY_TPLG | cpu_get_id()); + } trace_point(0); @@ -214,3 +233,25 @@ int arch_cpu_restore_secondary_cores(void) return 0; } + +int arch_cpu_secondary_cores_prepare_d0ix(void) +{ + struct idc_msg prepare_msg = { IDC_MSG_PREPARE_D0ix, + IDC_MSG_PREPARE_D0ix_EXT }; + int ret, id; + + for (id = 0; id < CONFIG_CORE_COUNT; id++) { + if (arch_cpu_is_core_enabled(id) && id != PLATFORM_PRIMARY_CORE_ID) { + prepare_msg.core = id; + + /* send IDC prepare message to all enabled secondary + * cores. + */ + ret = idc_send_msg(&prepare_msg, IDC_BLOCKING); + if (ret < 0) + return ret; + } + } + + return 0; +} diff --git a/src/idc/idc.c b/src/idc/idc.c index 70509417dfd1..1c512e2541c9 100644 --- a/src/idc/idc.c +++ b/src/idc/idc.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -268,6 +269,16 @@ static int idc_reset(uint32_t comp_id) return ret; } +static void idc_prepare_d0ix(void) +{ + /* set prepare_d0ix flag, which indicates that in the next + * platform_wait_for_interrupt invocation(), core should get ready for + * d0ix power down - it is required by D0->D0ix flow, when primary + * core disables all secondary cores. + */ + platform_pm_runtime_prepare_d0ix_en(cpu_get_id()); +} + /** * \brief Executes IDC message based on type. * \param[in,out] msg Pointer to IDC message. @@ -279,7 +290,7 @@ void idc_cmd(struct idc_msg *msg) switch (type) { case iTS(IDC_MSG_POWER_DOWN): - cpu_power_down_core(); + cpu_power_down_core(0); break; case iTS(IDC_MSG_NOTIFY): notifier_notify_remote(); @@ -299,6 +310,9 @@ void idc_cmd(struct idc_msg *msg) case iTS(IDC_MSG_RESET): ret = idc_reset(msg->extension); break; + case iTS(IDC_MSG_PREPARE_D0ix): + idc_prepare_d0ix(); + break; default: tr_err(&idc_tr, "idc_cmd(): invalid msg->header = %u", msg->header); diff --git a/src/include/sof/drivers/idc.h b/src/include/sof/drivers/idc.h index 800710c26e3e..1c078b826519 100644 --- a/src/include/sof/drivers/idc.h +++ b/src/include/sof/drivers/idc.h @@ -88,6 +88,10 @@ #define IDC_MSG_RESET IDC_TYPE(0x8) #define IDC_MSG_RESET_EXT(x) IDC_EXTENSION(x) +/** \brief IDC prepare D0ix message. */ +#define IDC_MSG_PREPARE_D0ix IDC_TYPE(0x9) +#define IDC_MSG_PREPARE_D0ix_EXT IDC_EXTENSION(0x0) + /** \brief Decodes IDC message type. */ #define iTS(x) (((x) >> IDC_TYPE_SHIFT) & IDC_TYPE_MASK) diff --git a/src/include/sof/lib/cpu.h b/src/include/sof/lib/cpu.h index 7138236e9223..1b9417f595ca 100644 --- a/src/include/sof/lib/cpu.h +++ b/src/include/sof/lib/cpu.h @@ -61,6 +61,11 @@ static inline int cpu_restore_secondary_cores(void) return arch_cpu_restore_secondary_cores(); } +static inline int cpu_secondary_cores_prepare_d0ix(void) +{ + return arch_cpu_secondary_cores_prepare_d0ix(); +} + #endif #endif /* __SOF_LIB_CPU_H__ */ diff --git a/src/ipc/ipc3/handler.c b/src/ipc/ipc3/handler.c index 4020cc306502..f3a4c47f948b 100644 --- a/src/ipc/ipc3/handler.c +++ b/src/ipc/ipc3/handler.c @@ -707,6 +707,15 @@ static int ipc_pm_gate(uint32_t header) cpu_restore_secondary_cores(); #endif } else { + /* before we enable pm runtime and perform D0->D0ix flow + * (primary core powers off secondary cores in + * platform_pg_int_handler) we have to prepare all secondary + * cores data for powering off (disable interrupt, perform + * cache writeback). + */ +#if (CONFIG_CAVS_LPS) + cpu_secondary_cores_prepare_d0ix(); +#endif pm_runtime_enable(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID); } diff --git a/src/platform/apollolake/include/platform/lib/pm_runtime.h b/src/platform/apollolake/include/platform/lib/pm_runtime.h index 74670e099b18..ff15b499dc35 100644 --- a/src/platform/apollolake/include/platform/lib/pm_runtime.h +++ b/src/platform/apollolake/include/platform/lib/pm_runtime.h @@ -48,6 +48,12 @@ void platform_pm_runtime_enable(uint32_t context, uint32_t index); void platform_pm_runtime_disable(uint32_t context, uint32_t index); +void platform_pm_runtime_prepare_d0ix_en(uint32_t index); + +void platform_pm_runtime_prepare_d0ix_dis(uint32_t index); + +int platform_pm_runtime_prepare_d0ix_is_req(uint32_t index); + bool platform_pm_runtime_is_active(uint32_t context, uint32_t index); /** diff --git a/src/platform/cannonlake/include/platform/lib/pm_runtime.h b/src/platform/cannonlake/include/platform/lib/pm_runtime.h index edda6e691ff1..08d28f1b49b6 100644 --- a/src/platform/cannonlake/include/platform/lib/pm_runtime.h +++ b/src/platform/cannonlake/include/platform/lib/pm_runtime.h @@ -47,6 +47,12 @@ void platform_pm_runtime_enable(uint32_t context, uint32_t index); void platform_pm_runtime_disable(uint32_t context, uint32_t index); +void platform_pm_runtime_prepare_d0ix_en(uint32_t index); + +void platform_pm_runtime_prepare_d0ix_dis(uint32_t index); + +int platform_pm_runtime_prepare_d0ix_is_req(uint32_t index); + bool platform_pm_runtime_is_active(uint32_t context, uint32_t index); /** diff --git a/src/platform/icelake/include/platform/lib/pm_runtime.h b/src/platform/icelake/include/platform/lib/pm_runtime.h index 00ea63026438..4b3194cd2c1a 100644 --- a/src/platform/icelake/include/platform/lib/pm_runtime.h +++ b/src/platform/icelake/include/platform/lib/pm_runtime.h @@ -47,6 +47,12 @@ void platform_pm_runtime_enable(uint32_t context, uint32_t index); void platform_pm_runtime_disable(uint32_t context, uint32_t index); +void platform_pm_runtime_prepare_d0ix_en(uint32_t index); + +void platform_pm_runtime_prepare_d0ix_dis(uint32_t index); + +int platform_pm_runtime_prepare_d0ix_is_req(uint32_t index); + bool platform_pm_runtime_is_active(uint32_t context, uint32_t index); /** diff --git a/src/platform/intel/cavs/include/cavs/lib/pm_runtime.h b/src/platform/intel/cavs/include/cavs/lib/pm_runtime.h index 6dbae65a09e3..1b38e18c632d 100644 --- a/src/platform/intel/cavs/include/cavs/lib/pm_runtime.h +++ b/src/platform/intel/cavs/include/cavs/lib/pm_runtime.h @@ -31,6 +31,9 @@ struct cavs_pm_runtime_data { bool dsp_d0; /**< dsp target D0(true) or D0ix(false) */ int host_dma_l1_sref; /**< ref counter for Host DMA accesses */ uint32_t sleep_core_mask; /**< represents cores in waiti state */ + uint32_t prepare_d0ix_core_mask; /**< indicates whether core needs */ + /**< to prepare to d0ix power down */ + /**< before next waiti */ int dsp_client_bitmap[CONFIG_CORE_COUNT]; /**< simple pwr override */ }; diff --git a/src/platform/intel/cavs/lib/pm_runtime.c b/src/platform/intel/cavs/lib/pm_runtime.c index 957e484ec2f9..e2e3b8c7f6fa 100644 --- a/src/platform/intel/cavs/lib/pm_runtime.c +++ b/src/platform/intel/cavs/lib/pm_runtime.c @@ -128,7 +128,10 @@ static inline bool cavs_pm_runtime_is_active_dsp(void) struct pm_runtime_data *prd = pm_runtime_data_get(); struct cavs_pm_runtime_data *pprd = prd->platform_data; - return pprd->dsp_d0; + /* even if dsp_d0 is false (dsp in D0ix state) function will return true + * until secondary cores be prepared of d0ix power down. + */ + return pprd->dsp_d0 || pprd->prepare_d0ix_core_mask; } #if CONFIG_INTEL_SSP @@ -567,6 +570,40 @@ void platform_pm_runtime_enable(uint32_t context, uint32_t index) } } +void platform_pm_runtime_prepare_d0ix_en(uint32_t index) +{ + struct pm_runtime_data *prd = pm_runtime_data_get(); + struct cavs_pm_runtime_data *pprd = prd->platform_data; + uint32_t flags; + + spin_lock_irq(&prd->lock, flags); + + pprd->prepare_d0ix_core_mask |= BIT(index); + + spin_unlock_irq(&prd->lock, flags); +} + +void platform_pm_runtime_prepare_d0ix_dis(uint32_t index) +{ + struct pm_runtime_data *prd = pm_runtime_data_get(); + struct cavs_pm_runtime_data *pprd = prd->platform_data; + uint32_t flags; + + spin_lock_irq(&prd->lock, flags); + + pprd->prepare_d0ix_core_mask &= ~BIT(index); + + spin_unlock_irq(&prd->lock, flags); +} + +int platform_pm_runtime_prepare_d0ix_is_req(uint32_t index) +{ + struct pm_runtime_data *prd = pm_runtime_data_get(); + struct cavs_pm_runtime_data *pprd = prd->platform_data; + + return pprd->prepare_d0ix_core_mask & BIT(index); +} + void platform_pm_runtime_disable(uint32_t context, uint32_t index) { switch (context) { diff --git a/src/platform/intel/cavs/platform.c b/src/platform/intel/cavs/platform.c index 40f6de51d339..5452d4564234 100644 --- a/src/platform/intel/cavs/platform.c +++ b/src/platform/intel/cavs/platform.c @@ -538,6 +538,18 @@ void platform_wait_for_interrupt(int level) { platform_clock_on_waiti(); +#ifdef CONFIG_MULTICORE + int cpu_id = cpu_get_id(); + + /* for secondary cores, if prepare_d0ix_core_mask flag is set for + * specific core, we should prepare for power down before going to wait + * - it is required by D0->D0ix flow. + */ + if (cpu_id != PLATFORM_PRIMARY_CORE_ID && + platform_pm_runtime_prepare_d0ix_is_req(cpu_id)) + cpu_power_down_core(CPU_POWER_DOWN_MEMORY_ON); +#endif + #if (CONFIG_CAVS_LPS) if (pm_runtime_is_active(PM_RUNTIME_DSP, PLATFORM_PRIMARY_CORE_ID) || cpu_get_id() != PLATFORM_PRIMARY_CORE_ID) diff --git a/src/platform/suecreek/include/platform/lib/pm_runtime.h b/src/platform/suecreek/include/platform/lib/pm_runtime.h index ba2c6f85c2cb..11003c2eb2bd 100644 --- a/src/platform/suecreek/include/platform/lib/pm_runtime.h +++ b/src/platform/suecreek/include/platform/lib/pm_runtime.h @@ -49,6 +49,12 @@ void platform_pm_runtime_disable(uint32_t context, uint32_t index); bool platform_pm_runtime_is_active(uint32_t context, uint32_t index); +void platform_pm_runtime_prepare_d0ix_en(uint32_t index); + +void platform_pm_runtime_prepare_d0ix_dis(uint32_t index); + +int platform_pm_runtime_prepare_d0ix_is_req(uint32_t index); + #endif /* __PLATFORM_LIB_PM_RUNTIME_H__ */ #else diff --git a/src/platform/tigerlake/include/platform/lib/pm_runtime.h b/src/platform/tigerlake/include/platform/lib/pm_runtime.h index 42c027cdc70e..7bea94898146 100644 --- a/src/platform/tigerlake/include/platform/lib/pm_runtime.h +++ b/src/platform/tigerlake/include/platform/lib/pm_runtime.h @@ -47,6 +47,12 @@ void platform_pm_runtime_enable(uint32_t context, uint32_t index); void platform_pm_runtime_disable(uint32_t context, uint32_t index); +void platform_pm_runtime_prepare_d0ix_en(uint32_t index); + +void platform_pm_runtime_prepare_d0ix_dis(uint32_t index); + +int platform_pm_runtime_prepare_d0ix_is_req(uint32_t index); + bool platform_pm_runtime_is_active(uint32_t context, uint32_t index); /** diff --git a/zephyr/wrapper.c b/zephyr/wrapper.c index bbf169d36645..e8875badf7d5 100644 --- a/zephyr/wrapper.c +++ b/zephyr/wrapper.c @@ -764,6 +764,12 @@ int arch_cpu_restore_secondary_cores(void) return 0; } +int arch_cpu_secondary_cores_prepare_d0ix(void) +{ + /* TODO: use Zephyr version */ + return 0; +} + void arch_cpu_disable_core(int id) { /* TODO: call Zephyr API */ @@ -774,7 +780,7 @@ int arch_cpu_is_core_enabled(int id) return arch_cpu_active(id); } -void cpu_power_down_core(void) +void cpu_power_down_core(uint32_t flags) { /* TODO: use Zephyr version */ }