From 9c18e81494fa94411fc9dc1be2fdad941ee64912 Mon Sep 17 00:00:00 2001 From: Liam Girdwood Date: Wed, 19 Jan 2022 13:15:03 +0000 Subject: [PATCH 1/2] zephyr: spinlock: Align spinlock API to use Zephyr API Align the SOF spinlock API with Zephyr spinlock API. Currently SOF spinlock has two locking variants: with and without disabling local IRQs. Since taking a spinlock potentially puts the CPU into busy-looping, periods of time for holding spinlocks should be kept as short as possible. This also means, that usually the CPU, holding a spinlock shouldn't be interrupted. So usually spinlocks should be taken in atomic contexts. Therefore using the version, not locking local IRQs should only be done when IRQs are already disabled. This then saves several CPU cycles, avoiding reading and writing CPU status again. However, this should be only done with extreme care and it introduces potential for current and future bugs, including dead-locks. Zephyr spinlock API always disables local IRQs. This makes it simpler and less error prone. Switching to it potentially wastes several CPU cycles in some locations, but makes the code more robust. This is first part of work for spinlock Zephyr alignment, subsequent updates will align headers and associated splinlock dependecies. Signed-off-by: Liam Girdwood Signed-off-by: Guennadi Liakhovetski --- src/arch/host/include/arch/spinlock.h | 16 ++--- src/arch/xtensa/include/arch/spinlock.h | 64 +++---------------- src/audio/component.c | 12 ++-- src/audio/kpb.c | 14 ++-- src/audio/pipeline/pipeline-graph.c | 15 +++-- src/drivers/amd/renoir/acp_bt_dma.c | 16 ++--- src/drivers/amd/renoir/acp_dma.c | 16 ++--- src/drivers/amd/renoir/acp_dmic_dma.c | 16 ++--- src/drivers/amd/renoir/acp_sp_dma.c | 17 +++-- src/drivers/amd/renoir/interrupt.c | 5 +- src/drivers/dw/dma.c | 26 ++++---- src/drivers/dw/ssi-spi.c | 18 +++--- src/drivers/generic/dummy-dma.c | 20 +++--- src/drivers/imx/edma.c | 16 ++--- src/drivers/imx/interrupt-irqsteer.c | 9 +-- src/drivers/intel/baytrail/ssp.c | 15 +++-- src/drivers/intel/cavs/interrupt.c | 10 +-- src/drivers/intel/cavs/ipc.c | 6 +- src/drivers/intel/dmic/dmic.c | 25 ++++---- src/drivers/intel/haswell/ssp.c | 15 +++-- src/drivers/intel/hda/hda-dma.c | 14 ++-- src/drivers/intel/ssp/mn.c | 30 +++++---- src/drivers/intel/ssp/ssp.c | 20 +++--- src/drivers/interrupt.c | 48 +++++++------- src/drivers/mediatek/mt8195/afe-memif.c | 16 ++--- src/drivers/mediatek/mt8195/interrupt.c | 5 +- src/idc/idc.c | 6 +- src/include/sof/audio/component_ext.h | 2 +- src/include/sof/coherent.h | 33 +++++----- src/include/sof/drivers/interrupt.h | 4 +- src/include/sof/ipc/common.h | 2 +- src/include/sof/ipc/msg.h | 6 +- src/include/sof/lib/clk.h | 2 +- src/include/sof/lib/dai.h | 2 +- src/include/sof/lib/dma.h | 2 +- src/include/sof/lib/mm_heap.h | 2 +- src/include/sof/lib/notifier.h | 2 +- src/include/sof/lib/pm_runtime.h | 2 +- src/include/sof/schedule/ll_schedule_domain.h | 4 +- src/include/sof/spinlock.h | 59 ++++++----------- src/include/sof/trace/dma-trace.h | 2 +- src/ipc/ipc-common.c | 26 ++++---- src/ipc/ipc3/handler.c | 10 +-- src/ipc/ipc3/helper.c | 14 ++-- src/lib/alloc.c | 38 +++++------ src/lib/clk.c | 6 +- src/lib/dai.c | 12 ++-- src/lib/dma.c | 12 ++-- src/lib/notifier.c | 12 ++-- src/lib/pm_runtime.c | 3 +- src/platform/amd/renoir/lib/clk.c | 2 +- src/platform/amd/renoir/lib/dai.c | 6 +- src/platform/amd/renoir/lib/dma.c | 2 +- src/platform/baytrail/lib/clk.c | 3 +- src/platform/baytrail/lib/dai.c | 3 +- src/platform/baytrail/lib/dma.c | 2 +- src/platform/haswell/lib/clk.c | 3 +- src/platform/haswell/lib/dai.c | 2 +- src/platform/haswell/lib/dma.c | 2 +- src/platform/imx8/lib/clk.c | 3 +- src/platform/imx8/lib/dai.c | 4 +- src/platform/imx8/lib/dma.c | 2 +- src/platform/imx8m/lib/clk.c | 3 +- src/platform/imx8m/lib/dai.c | 2 +- src/platform/imx8m/lib/dma.c | 2 +- src/platform/imx8ulp/lib/clk.c | 2 +- src/platform/imx8ulp/lib/dai.c | 2 +- src/platform/imx8ulp/lib/dma.c | 2 +- src/platform/intel/cavs/lib/clk.c | 29 ++++----- src/platform/intel/cavs/lib/dai.c | 9 ++- src/platform/intel/cavs/lib/dma.c | 2 +- src/platform/intel/cavs/lib/pm_runtime.c | 36 +++++------ src/platform/mt8186/lib/clk.c | 2 +- src/platform/mt8186/lib/dma.c | 2 +- src/platform/mt8195/lib/clk.c | 2 +- src/platform/mt8195/lib/dai.c | 2 +- src/platform/mt8195/lib/dma.c | 2 +- src/schedule/ll_schedule.c | 29 +++++---- src/schedule/zephyr.c | 6 +- src/schedule/zephyr_domain.c | 12 ++-- src/spinlock.c | 18 +++--- src/trace/dma-trace.c | 28 ++++---- src/trace/trace.c | 28 ++++---- test/cmocka/src/common_mocks.c | 7 +- 84 files changed, 478 insertions(+), 530 deletions(-) diff --git a/src/arch/host/include/arch/spinlock.h b/src/arch/host/include/arch/spinlock.h index 50fcd8f42d8a..fa6298925efc 100644 --- a/src/arch/host/include/arch/spinlock.h +++ b/src/arch/host/include/arch/spinlock.h @@ -10,16 +10,12 @@ #ifndef __ARCH_SPINLOCK_H__ #define __ARCH_SPINLOCK_H__ -typedef struct { -} spinlock_t; - -static inline void arch_spinlock_init(spinlock_t *lock) { } -static inline void arch_spin_lock(spinlock_t *lock) {} -static inline int arch_try_lock(spinlock_t *lock) -{ - return 1; -} -static inline void arch_spin_unlock(spinlock_t *lock) {} +struct k_spinlock { +}; + +static inline void arch_spinlock_init(struct k_spinlock *lock) {} +static inline void arch_spin_lock(struct k_spinlock *lock) {} +static inline void arch_spin_unlock(struct k_spinlock *lock) {} #endif /* __ARCH_SPINLOCK_H__ */ diff --git a/src/arch/xtensa/include/arch/spinlock.h b/src/arch/xtensa/include/arch/spinlock.h index afb23479f864..44874cd946df 100644 --- a/src/arch/xtensa/include/arch/spinlock.h +++ b/src/arch/xtensa/include/arch/spinlock.h @@ -13,21 +13,21 @@ #include #include -typedef struct { +struct k_spinlock { volatile uint32_t lock; #if CONFIG_DEBUG_LOCKS uint32_t user; #endif -} spinlock_t; +}; -static inline void arch_spinlock_init(spinlock_t *lock) +static inline void arch_spinlock_init(struct k_spinlock *lock) { lock->lock = 0; } #if XCHAL_HAVE_EXCLUSIVE && CONFIG_XTENSA_EXCLUSIVE && __XCC__ -static inline void arch_spin_lock(spinlock_t *lock) +static inline void arch_spin_lock(struct k_spinlock *lock) { uint32_t result; @@ -43,27 +43,9 @@ static inline void arch_spin_lock(spinlock_t *lock) : "memory"); } -static inline int arch_try_lock(spinlock_t *lock) -{ - uint32_t result; - - __asm__ __volatile__( - " movi %0, 0\n" - " l32ex %0, %1\n" - " movi %0, 1\n" - " s32ex %0, %1\n" - " getex %0\n" - : "=&a" (result) - : "a" (&lock->lock) - : "memory"); - - /* return 0 for failed lock, 1 otherwise */ - return result ? 0 : 1; -} - #elif XCHAL_HAVE_S32C1I -static inline void arch_spin_lock(spinlock_t *lock) +static inline void arch_spin_lock(struct k_spinlock *lock) { uint32_t result; @@ -82,23 +64,6 @@ static inline void arch_spin_lock(spinlock_t *lock) : "memory"); } -static inline int arch_try_lock(spinlock_t *lock) -{ - uint32_t result; - - __asm__ __volatile__( - " movi %0, 0\n" - " wsr %0, scompare1\n" - " movi %0, 1\n" - " s32c1i %0, %1, 0\n" - : "=&a" (result) - : "a" (&lock->lock) - : "memory"); - - /* return 0 for failed lock, 1 otherwise */ - return result ? 0 : 1; -} - #else #if CONFIG_CORE_COUNT > 1 @@ -111,7 +76,7 @@ static inline int arch_try_lock(spinlock_t *lock) * The ISA has no atomic operations so use integer arithmetic on uniprocessor systems. * This helps support GCC and qemu emulation of certain targets. */ -static inline void arch_spin_lock(spinlock_t *lock) +static inline void arch_spin_lock(struct k_spinlock *lock) { uint32_t result; @@ -123,24 +88,11 @@ static inline void arch_spin_lock(spinlock_t *lock) } while (!result); } -static inline int arch_try_lock(spinlock_t *lock) -{ - uint32_t result; - - if (lock->lock == 0) { - lock->lock = 1; - result = 1; - } - - /* return 0 for failed lock, 1 otherwise */ - return result ? 0 : 1; -} - #endif /* XCHAL_HAVE_EXCLUSIVE && CONFIG_XTENSA_EXCLUSIVE && __XCC__ */ #if XCHAL_HAVE_EXCLUSIVE || XCHAL_HAVE_S32C1I -static inline void arch_spin_unlock(spinlock_t *lock) +static inline void arch_spin_unlock(struct k_spinlock *lock) { uint32_t result; @@ -164,7 +116,7 @@ static inline void arch_spin_unlock(spinlock_t *lock) * The ISA has no atomic operations so use integer arithmetic on uniprocessor systems. * This helps support GCC and qemu emulation of certain targets. */ -static inline void arch_spin_unlock(spinlock_t *lock) +static inline void arch_spin_unlock(struct k_spinlock *lock) { uint32_t result; diff --git a/src/audio/component.c b/src/audio/component.c index f6d0daf8b9f1..20f6a9e00862 100644 --- a/src/audio/component.c +++ b/src/audio/component.c @@ -32,10 +32,11 @@ DECLARE_TR_CTX(comp_tr, SOF_UUID(comp_uuid), LOG_LEVEL_INFO); int comp_register(struct comp_driver_info *drv) { struct comp_driver_list *drivers = comp_drivers_get(); + k_spinlock_key_t key; - spin_lock(&drivers->lock); + key = k_spin_lock(&drivers->lock); list_item_prepend(&drv->list, &drivers->list); - spin_unlock(&drivers->lock); + k_spin_unlock(&drivers->lock, key); return 0; } @@ -43,10 +44,11 @@ int comp_register(struct comp_driver_info *drv) void comp_unregister(struct comp_driver_info *drv) { struct comp_driver_list *drivers = comp_drivers_get(); + k_spinlock_key_t key; - spin_lock(&drivers->lock); + key = k_spin_lock(&drivers->lock); list_item_del(&drv->list); - spin_unlock(&drivers->lock); + k_spin_unlock(&drivers->lock, key); } /* NOTE: Keep the component state diagram up to date: @@ -141,7 +143,7 @@ void sys_comp_init(struct sof *sof) sof->comp_drivers = platform_shared_get(&cd, sizeof(cd)); list_init(&sof->comp_drivers->list); - spinlock_init(&sof->comp_drivers->lock); + k_spinlock_init(&sof->comp_drivers->lock); } void comp_get_copy_limits(struct comp_buffer *source, struct comp_buffer *sink, diff --git a/src/audio/kpb.c b/src/audio/kpb.c index 51621e1439b7..de5dae05624f 100644 --- a/src/audio/kpb.c +++ b/src/audio/kpb.c @@ -62,7 +62,7 @@ DECLARE_SOF_UUID("kpb-task", kpb_task_uuid, 0xe50057a5, 0x8b27, 0x4db4, struct comp_data { enum kpb_state state; /**< current state of KPB component */ uint32_t state_log; /**< keeps record of KPB recent states */ - spinlock_t lock; /**< locking mechanism for read pointer calculations */ + struct k_spinlock lock; /**< locking mechanism for read pointer calculations */ struct sof_kpb_config config; /**< component configuration data */ struct history_data hd; /** data related to history buffer */ struct task draining_task; @@ -1024,7 +1024,7 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli) (KPB_SAMPLE_CONTAINER_SIZE(sample_width) / 8) * kpb->config.channels; size_t period_bytes_limit; - uint32_t flags; + k_spinlock_key_t key; comp_info(dev, "kpb_init_draining(): requested draining of %d [ms] from history buffer", cli->drain_req); @@ -1045,7 +1045,7 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli) * in the history buffer. All we have to do now is to calculate * read pointer from which we will start draining. */ - spin_lock_irq(&kpb->lock, flags); + key = k_spin_lock(&kpb->lock); kpb_change_state(kpb, KPB_STATE_INIT_DRAINING); @@ -1103,7 +1103,7 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli) } while (buff != first_buff); - spin_unlock_irq(&kpb->lock, flags); + k_spin_unlock(&kpb->lock, key); /* Should we drain in synchronized mode (sync_draining_mode)? * Note! We have already verified host params during @@ -1190,7 +1190,7 @@ static enum task_state kpb_draining_task(void *arg) struct comp_data *kpb = comp_get_drvdata(draining_data->dev); bool sync_mode_on = draining_data->sync_mode_on; bool pm_is_active; - uint32_t flags; + k_spinlock_key_t key; comp_cl_info(&comp_kpb, "kpb_draining_task(), start."); @@ -1280,7 +1280,7 @@ static enum task_state kpb_draining_task(void *arg) */ comp_cl_info(&comp_kpb, "kpb: update drain_req by %d", *rt_stream_update); - spin_lock_irq(&kpb->lock, flags); + key = k_spin_lock(&kpb->lock); drain_req += *rt_stream_update; *rt_stream_update = 0; if (!drain_req && kpb->state == KPB_STATE_DRAINING) { @@ -1292,7 +1292,7 @@ static enum task_state kpb_draining_task(void *arg) */ kpb_change_state(kpb, KPB_STATE_HOST_COPY); } - spin_unlock_irq(&kpb->lock, flags); + k_spin_unlock(&kpb->lock, key); } } diff --git a/src/audio/pipeline/pipeline-graph.c b/src/audio/pipeline/pipeline-graph.c index 5dbcd9f47505..126aba9e0663 100644 --- a/src/audio/pipeline/pipeline-graph.c +++ b/src/audio/pipeline/pipeline-graph.c @@ -36,7 +36,7 @@ DECLARE_TR_CTX(pipe_tr, SOF_UUID(pipe_uuid), LOG_LEVEL_INFO); /* lookup table to determine busy/free pipeline metadata objects */ struct pipeline_posn { bool posn_offset[PPL_POSN_OFFSETS]; /**< available offsets */ - spinlock_t lock; /**< lock mechanism */ + struct k_spinlock lock; /**< lock mechanism */ }; /* the pipeline position lookup table */ static SHARED_DATA struct pipeline_posn pipeline_posn; @@ -60,8 +60,9 @@ static inline int pipeline_posn_offset_get(uint32_t *posn_offset) struct pipeline_posn *pipeline_posn = pipeline_posn_get(); int ret = -EINVAL; uint32_t i; + k_spinlock_key_t key; - spin_lock(&pipeline_posn->lock); + key = k_spin_lock(&pipeline_posn->lock); for (i = 0; i < PPL_POSN_OFFSETS; ++i) { if (!pipeline_posn->posn_offset[i]) { @@ -73,7 +74,7 @@ static inline int pipeline_posn_offset_get(uint32_t *posn_offset) } - spin_unlock(&pipeline_posn->lock); + k_spin_unlock(&pipeline_posn->lock, key); return ret; } @@ -86,20 +87,20 @@ static inline void pipeline_posn_offset_put(uint32_t posn_offset) { struct pipeline_posn *pipeline_posn = pipeline_posn_get(); int i = posn_offset / sizeof(struct sof_ipc_stream_posn); + k_spinlock_key_t key; - spin_lock(&pipeline_posn->lock); + key = k_spin_lock(&pipeline_posn->lock); pipeline_posn->posn_offset[i] = false; - - spin_unlock(&pipeline_posn->lock); + k_spin_unlock(&pipeline_posn->lock, key); } void pipeline_posn_init(struct sof *sof) { sof->pipeline_posn = platform_shared_get(&pipeline_posn, sizeof(pipeline_posn)); - spinlock_init(&sof->pipeline_posn->lock); + k_spinlock_init(&sof->pipeline_posn->lock); } /* create new pipeline - returns pipeline id or negative error */ diff --git a/src/drivers/amd/renoir/acp_bt_dma.c b/src/drivers/amd/renoir/acp_bt_dma.c index 5bfcb8c291c1..9ffd8fd137fc 100644 --- a/src/drivers/amd/renoir/acp_bt_dma.c +++ b/src/drivers/amd/renoir/acp_bt_dma.c @@ -52,24 +52,24 @@ static uint64_t prev_rx_pos; static struct dma_chan_data *acp_dai_bt_dma_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; struct dma_chan_data *channel; - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); tr_err(&acp_bt_dma_tr, "DMA: Channel %d not in range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); tr_err(&acp_bt_dma_tr, "DMA: channel already in use %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); channel->status = COMP_STATE_READY; - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); return channel; } @@ -77,13 +77,13 @@ static struct dma_chan_data *acp_dai_bt_dma_channel_get(struct dma *dma, /* channel must not be running when this is called */ static void acp_dai_bt_dma_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; notifier_unregister_all(NULL, channel); - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock(&channel->dma->lock); channel->status = COMP_STATE_INIT; atomic_sub(&channel->dma->num_channels_busy, 1); - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock(&channel->dma->lock, key); } static int acp_dai_bt_dma_start(struct dma_chan_data *channel) diff --git a/src/drivers/amd/renoir/acp_dma.c b/src/drivers/amd/renoir/acp_dma.c index 75ce182e8297..b736033fca81 100644 --- a/src/drivers/amd/renoir/acp_dma.c +++ b/src/drivers/amd/renoir/acp_dma.c @@ -174,24 +174,24 @@ static void dma_reconfig(struct dma_chan_data *channel, uint32_t bytes) static struct dma_chan_data *acp_dma_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; struct dma_chan_data *channel; - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); tr_err(&acpdma_tr, "DMA: Channel %d not in range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); tr_err(&acpdma_tr, "DMA: channel already in use %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); channel->status = COMP_STATE_READY; - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); /* reset read and write pointers */ struct acp_dma_chan_data *acp_dma_chan = dma_chan_get_data(channel); @@ -202,12 +202,12 @@ static struct dma_chan_data *acp_dma_channel_get(struct dma *dma, static void acp_dma_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock(&channel->dma->lock); channel->status = COMP_STATE_INIT; atomic_sub(&channel->dma->num_channels_busy, 1); - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock(&channel->dma->lock, key); /* reset read and write pointer */ struct acp_dma_chan_data *acp_dma_chan = dma_chan_get_data(channel); diff --git a/src/drivers/amd/renoir/acp_dmic_dma.c b/src/drivers/amd/renoir/acp_dmic_dma.c index 90152353d921..71faeb36090f 100644 --- a/src/drivers/amd/renoir/acp_dmic_dma.c +++ b/src/drivers/amd/renoir/acp_dmic_dma.c @@ -42,38 +42,38 @@ DECLARE_TR_CTX(acp_dmic_dma_tr, SOF_UUID(acp_dmic_dma_uuid), LOG_LEVEL_INFO); static struct dma_chan_data *acp_dmic_dma_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; struct dma_chan_data *channel; - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); tr_err(&acp_dmic_dma_tr, "ACP_DMIC: Channel %d out of range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); tr_err(&acp_dmic_dma_tr, "ACP_DMIC: Cannot reuse channel %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); channel->status = COMP_STATE_READY; - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); return channel; } static void acp_dmic_dma_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; notifier_unregister_all(NULL, channel); - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock(&channel->dma->lock); channel->status = COMP_STATE_INIT; atomic_sub(&channel->dma->num_channels_busy, 1); - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock(&channel->dma->lock, key); } static int acp_dmic_dma_start(struct dma_chan_data *channel) diff --git a/src/drivers/amd/renoir/acp_sp_dma.c b/src/drivers/amd/renoir/acp_sp_dma.c index e291af144a4b..7048cfbeab32 100644 --- a/src/drivers/amd/renoir/acp_sp_dma.c +++ b/src/drivers/amd/renoir/acp_sp_dma.c @@ -48,38 +48,37 @@ static uint32_t sp_buff_size; static struct dma_chan_data *acp_dai_sp_dma_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; struct dma_chan_data *channel; - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); tr_err(&acp_sp_tr, "DMA: Channel %d not in range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); tr_err(&acp_sp_tr, "DMA: channel already in use %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); channel->status = COMP_STATE_READY; - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); return channel; } /* channel must not be running when this is called */ static void acp_dai_sp_dma_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; notifier_unregister_all(NULL, channel); - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock(&channel->dma->lock); channel->status = COMP_STATE_INIT; atomic_sub(&channel->dma->num_channels_busy, 1); - spin_unlock_irq(&channel->dma->lock, flags); - + k_spin_unlock(&channel->dma->lock, key); } static int acp_dai_sp_dma_start(struct dma_chan_data *channel) diff --git a/src/drivers/amd/renoir/interrupt.c b/src/drivers/amd/renoir/interrupt.c index f08649ef2460..80a9f02fc097 100644 --- a/src/drivers/amd/renoir/interrupt.c +++ b/src/drivers/amd/renoir/interrupt.c @@ -98,13 +98,14 @@ static inline void acp_handle_irq(struct irq_cascade_desc *cascade, struct irq_desc *child = NULL; int bit; bool handled; + k_spinlock_key_t key; while (status) { bit = get_first_irq(status); handled = false; status &= ~(1ull << bit); - spin_lock(&cascade->lock); + key = k_spin_lock(&cascade->lock); list_for_item(clist, &cascade->child[bit].list) { child = container_of(clist, struct irq_desc, irq_list); @@ -115,7 +116,7 @@ static inline void acp_handle_irq(struct irq_cascade_desc *cascade, } } - spin_unlock(&cascade->lock); + k_spin_unlock(&cascade->lock, key); if (!handled) { tr_err(&acp_irq_tr, "irq_handler(): not handled, bit %d", diff --git a/src/drivers/dw/dma.c b/src/drivers/dw/dma.c index d09c297cf2b3..14246bde91bb 100644 --- a/src/drivers/dw/dma.c +++ b/src/drivers/dw/dma.c @@ -185,13 +185,13 @@ static inline void dw_dma_chan_reload_lli_cb(void *arg, enum notify_id type, static struct dma_chan_data *dw_dma_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; int i; tr_info(&dwdma_tr, "dw_dma_channel_get(): dma %d request channel %d", dma->plat_data.id, req_chan); - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock(&dma->lock); /* find first free non draining channel */ for (i = 0; i < dma->plat_data.channels; i++) { @@ -209,12 +209,12 @@ static struct dma_chan_data *dw_dma_channel_get(struct dma *dma, #endif /* return channel */ - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); return &dma->chan[i]; } /* DMA controller has no free channels */ - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); tr_err(&dwdma_tr, "dw_dma_channel_get(): dma %d no free channels", dma->plat_data.id); @@ -249,14 +249,14 @@ static void dw_dma_channel_put_unlocked(struct dma_chan_data *channel) /* channel must not be running when this is called */ static void dw_dma_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; tr_info(&dwdma_tr, "dw_dma_channel_put(): dma %d channel %d put", channel->dma->plat_data.id, channel->index); - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock(&channel->dma->lock); dw_dma_channel_put_unlocked(channel); - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock(&channel->dma->lock, key); } static int dw_dma_start(struct dma_chan_data *channel) @@ -860,7 +860,7 @@ static int dw_dma_copy(struct dma_chan_data *channel, int bytes, .elem = { .size = bytes }, .status = DMA_CB_STATUS_END, }; - uint32_t irq_flags; + k_spinlock_key_t key; tr_dbg(&dwdma_tr, "dw_dma_copy(): dma %d channel %d copy", channel->dma->plat_data.id, channel->index); @@ -890,9 +890,9 @@ static int dw_dma_copy(struct dma_chan_data *channel, int bytes, dw_dma_verify_transfer(channel, &next); /* increment current pointer */ - spin_lock_irq(&channel->dma->lock, irq_flags); + key = k_spin_lock(&channel->dma->lock); dw_dma_increment_pointer(dw_chan, bytes); - spin_unlock_irq(&channel->dma->lock, irq_flags); + k_spin_unlock(&channel->dma->lock, key); return ret; } @@ -1103,13 +1103,13 @@ static int dw_dma_get_data_size(struct dma_chan_data *channel, uint32_t *avail, uint32_t *free) { struct dw_dma_chan_data *dw_chan = dma_chan_get_data(channel); - uint32_t flags; + k_spinlock_key_t key; int ret = 0; tr_dbg(&dwdma_tr, "dw_dma_get_data_size(): dma %d channel %d get data size", channel->dma->plat_data.id, channel->index); - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock(&channel->dma->lock); if (channel->direction == DMA_DIR_HMEM_TO_LMEM || channel->direction == DMA_DIR_DEV_TO_MEM) { @@ -1120,7 +1120,7 @@ static int dw_dma_get_data_size(struct dma_chan_data *channel, *avail = dw_chan->ptr_data.buffer_bytes - *free; } - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock(&channel->dma->lock, key); #if CONFIG_DMA_HW_LLI if (!(dma_reg_read(channel->dma, DW_DMA_CHAN_EN) & diff --git a/src/drivers/dw/ssi-spi.c b/src/drivers/dw/ssi-spi.c index 4a02ed3bbfed..acad4053d6ae 100644 --- a/src/drivers/dw/ssi-spi.c +++ b/src/drivers/dw/ssi-spi.c @@ -477,22 +477,23 @@ int spi_probe(struct spi *spi) } /* lock */ -spinlock_t spi_lock; +struct k_spinlock spi_lock; static struct spi *spi_devices; static unsigned int n_spi_devices; struct spi *spi_get(enum spi_type type) { struct spi *spi; - unsigned int i, flags; + unsigned int i; + k_spinlock_key_t key; - spin_lock_irq(&spi_lock, flags); + key = k_spin_lock(&spi_lock); for (i = 0, spi = spi_devices; i < n_spi_devices; i++, spi++) if (spi->plat_data->type == type) break; - spin_unlock_irq(&spi_lock, flags); + k_spin_unlock(&spi_lock, key); return i < n_spi_devices ? spi : NULL; } @@ -500,10 +501,11 @@ struct spi *spi_get(enum spi_type type) int spi_install(const struct spi_platform_data *plat, size_t n) { struct spi *spi; - unsigned int i, flags; + unsigned int i; + k_spinlock_key_t key; int ret; - spin_lock_irq(&spi_lock, flags); + key = k_spin_lock(&spi_lock); if (spi_devices) { ret = -EBUSY; @@ -526,12 +528,12 @@ int spi_install(const struct spi_platform_data *plat, size_t n) } unlock: - spin_unlock_irq(&spi_lock, flags); + k_spin_unlock(&spi_lock, key); return ret; } void spi_init(void) { - spinlock_init(&spi_lock); + k_spinlock_init(&spi_lock); } diff --git a/src/drivers/generic/dummy-dma.c b/src/drivers/generic/dummy-dma.c index 111e9621c6a3..a4654d95d1b8 100644 --- a/src/drivers/generic/dummy-dma.c +++ b/src/drivers/generic/dummy-dma.c @@ -223,10 +223,10 @@ static ssize_t dummy_dma_do_copies(struct dma_chan_pdata *pdata, int bytes) static struct dma_chan_data *dummy_dma_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; int i; - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock(&dma->lock); for (i = 0; i < dma->plat_data.channels; i++) { /* use channel if it's free */ if (dma->chan[i].status == COMP_STATE_INIT) { @@ -235,11 +235,11 @@ static struct dma_chan_data *dummy_dma_channel_get(struct dma *dma, atomic_add(&dma->num_channels_busy, 1); /* return channel */ - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); return &dma->chan[i]; } } - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); tr_err(&ddma_tr, "dummy-dmac: %d no free channel", dma->plat_data.id); return NULL; @@ -272,11 +272,11 @@ static void dummy_dma_channel_put_unlocked(struct dma_chan_data *channel) */ static void dummy_dma_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock(&channel->dma->lock); dummy_dma_channel_put_unlocked(channel); - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock(&channel->dma->lock, key); } /* Since copies are synchronous, the triggers do nothing */ @@ -332,10 +332,10 @@ static int dummy_dma_set_config(struct dma_chan_data *channel, struct dma_sg_config *config) { struct dma_chan_pdata *ch = dma_chan_get_data(channel); - uint32_t flags; + k_spinlock_key_t key; int ret = 0; - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock(&channel->dma->lock); if (!config->elem_array.count) { tr_err(&ddma_tr, "dummy-dmac: %d channel %d no DMA descriptors", @@ -364,7 +364,7 @@ static int dummy_dma_set_config(struct dma_chan_data *channel, channel->status = COMP_STATE_PREPARE; out: - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock(&channel->dma->lock, key); return ret; } diff --git a/src/drivers/imx/edma.c b/src/drivers/imx/edma.c index 04e19e58843c..d43cd92bd7f5 100644 --- a/src/drivers/imx/edma.c +++ b/src/drivers/imx/edma.c @@ -96,28 +96,28 @@ static int edma_encode_tcd_attr(int src_width, int dest_width) static struct dma_chan_data *edma_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; struct dma_chan_data *channel; tr_dbg(&edma_tr, "EDMA: channel_get(%d)", req_chan); - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); tr_err(&edma_tr, "EDMA: Channel %d out of range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); tr_err(&edma_tr, "EDMA: Cannot reuse channel %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); channel->status = COMP_STATE_READY; - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); return channel; } @@ -125,7 +125,7 @@ static struct dma_chan_data *edma_channel_get(struct dma *dma, /* channel must not be running when this is called */ static void edma_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; /* Assuming channel is stopped, we thus don't need hardware to * do anything right now @@ -134,10 +134,10 @@ static void edma_channel_put(struct dma_chan_data *channel) notifier_unregister_all(NULL, channel); - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock(&channel->dma->lock); channel->status = COMP_STATE_INIT; atomic_sub(&channel->dma->num_channels_busy, 1); - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock(&channel->dma->lock, key); } static int edma_start(struct dma_chan_data *channel) diff --git a/src/drivers/imx/interrupt-irqsteer.c b/src/drivers/imx/interrupt-irqsteer.c index 51adefea3952..c2d0e78f6882 100644 --- a/src/drivers/imx/interrupt-irqsteer.c +++ b/src/drivers/imx/interrupt-irqsteer.c @@ -287,13 +287,14 @@ static inline void handle_irq_batch(struct irq_cascade_desc *cascade, struct irq_desc *child = NULL; int bit; bool handled; + k_spinlock_key_t key; while (status) { bit = get_first_irq(status); handled = false; status &= ~(1ull << bit); /* Release interrupt */ - spin_lock(&cascade->lock); + key = k_spin_lock(&cascade->lock); /* Get child if any and run handler */ list_for_item(clist, &cascade->child[bit].list) { @@ -301,16 +302,16 @@ static inline void handle_irq_batch(struct irq_cascade_desc *cascade, if (child->handler && (child->cpu_mask & 1 << core)) { /* run handler in non atomic context */ - spin_unlock(&cascade->lock); + k_spin_unlock(&cascade->lock, key); child->handler(child->handler_arg); - spin_lock(&cascade->lock); + k_spin_lock(&cascade->lock); handled = true; } } - spin_unlock(&cascade->lock); + k_spin_unlock(&cascade->lock, key); if (!handled) { tr_err(&irq_i_tr, "irq_handler(): nobody cared, bit %d", diff --git a/src/drivers/intel/baytrail/ssp.c b/src/drivers/intel/baytrail/ssp.c index 21dd6aa687b7..efc0a7a38a81 100644 --- a/src/drivers/intel/baytrail/ssp.c +++ b/src/drivers/intel/baytrail/ssp.c @@ -68,8 +68,9 @@ static int ssp_set_config(struct dai *dai, struct ipc_config_dai *common_config, bool cfs = false; bool cbs = false; int ret = 0; + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* is playback/capture already running */ if (ssp->state[DAI_DIR_PLAYBACK] == COMP_STATE_ACTIVE || @@ -444,7 +445,7 @@ static int ssp_set_config(struct dai *dai, struct ipc_config_dai *common_config, dai_info(dai, "ssp_set_config(), done"); out: - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); return ret; } @@ -485,8 +486,9 @@ static int ssp_get_hw_params(struct dai *dai, static void ssp_start(struct dai *dai, int direction) { struct ssp_pdata *ssp = dai_get_drvdata(dai); + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* enable port */ ssp_update_bits(dai, SSCR0, SSCR0_SSE, SSCR0_SSE); @@ -500,15 +502,16 @@ static void ssp_start(struct dai *dai, int direction) else ssp_update_bits(dai, SSCR1, SSCR1_RSRE, SSCR1_RSRE); - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } /* stop the SSP for either playback or capture */ static void ssp_stop(struct dai *dai, int direction) { struct ssp_pdata *ssp = dai_get_drvdata(dai); + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* stop Rx if neeed */ if (direction == DAI_DIR_CAPTURE && @@ -534,7 +537,7 @@ static void ssp_stop(struct dai *dai, int direction) dai_info(dai, "ssp_stop(), SSP port disabled"); } - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } static void ssp_pause(struct dai *dai, int direction) diff --git a/src/drivers/intel/cavs/interrupt.c b/src/drivers/intel/cavs/interrupt.c index 07216344e5ce..0a7e1909723f 100644 --- a/src/drivers/intel/cavs/interrupt.c +++ b/src/drivers/intel/cavs/interrupt.c @@ -62,6 +62,7 @@ static inline void irq_lvl2_handler(void *data, int level, uint32_t ilxsd, struct list_item *clist; uint32_t status; uint32_t tries = LVL2_MAX_TRIES; + k_spinlock_key_t key; /* read active interrupt status */ status = irq_read(ilxsd); @@ -75,7 +76,7 @@ static inline void irq_lvl2_handler(void *data, int level, uint32_t ilxsd, status &= ~(1 << bit); - spin_lock(&cascade->lock); + key = k_spin_lock(&cascade->lock); /* get child if any and run handler */ list_for_item(clist, &cascade->child[bit].list) { @@ -83,16 +84,15 @@ static inline void irq_lvl2_handler(void *data, int level, uint32_t ilxsd, if (child->handler && (child->cpu_mask & 1 << core)) { /* run handler in non atomic context */ - spin_unlock(&cascade->lock); + k_spin_unlock(&cascade->lock, key); child->handler(child->handler_arg); - spin_lock(&cascade->lock); + k_spin_lock(&cascade->lock); handled = true; } - } - spin_unlock(&cascade->lock); + k_spin_unlock(&cascade->lock, key); if (!handled) { /* nobody cared ? */ diff --git a/src/drivers/intel/cavs/ipc.c b/src/drivers/intel/cavs/ipc.c index 001e7d13cc01..8176fbdef743 100644 --- a/src/drivers/intel/cavs/ipc.c +++ b/src/drivers/intel/cavs/ipc.c @@ -58,9 +58,9 @@ static void ipc_irq_handler(void *arg) { struct ipc *ipc = arg; uint32_t dipcctl; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock(&ipc->lock); #if CAVS_VERSION == CAVS_VERSION_1_5 uint32_t dipct; @@ -128,7 +128,7 @@ static void ipc_irq_handler(void *arg) ipc_read(IPC_DIPCCTL) | IPC_DIPCCTL_IPCIDIE); } - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock(&ipc->lock, key); } #if CAVS_VERSION >= CAVS_VERSION_1_8 diff --git a/src/drivers/intel/dmic/dmic.c b/src/drivers/intel/dmic/dmic.c index 5ab56a6f3d1e..9a796a102432 100644 --- a/src/drivers/intel/dmic/dmic.c +++ b/src/drivers/intel/dmic/dmic.c @@ -61,18 +61,14 @@ static enum task_state dmic_work(void *data) { struct dai *dai = (struct dai *)data; struct dmic_pdata *dmic = dai_get_drvdata(dai); + k_spinlock_key_t key; int32_t gval; uint32_t val; int i; - int ret; dai_dbg(dai, "dmic_work()"); - ret = spin_try_lock(&dai->lock); - if (!ret) { - dai_dbg(dai, "dmic_work(): spin_try_lock(dai->lock, ret) failed: RESCHEDULE"); - return SOF_TASK_STATE_RESCHEDULE; - } + key = k_spin_lock(&dai->lock); /* Increment gain with logarithmic step. * Gain is Q2.30 and gain modifier is Q12.20. @@ -128,7 +124,7 @@ static enum task_state dmic_work(void *data) } - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); return gval ? SOF_TASK_STATE_RESCHEDULE : SOF_TASK_STATE_COMPLETED; } @@ -155,6 +151,7 @@ static int dmic_set_config(struct dai *dai, struct ipc_config_dai *common_config int32_t step_db; int ret = 0; int di = dai->index; + k_spinlock_key_t key; #if CONFIG_INTEL_DMIC_TPLG_PARAMS struct sof_ipc_dai_config *config = spec_config; int i; @@ -174,7 +171,7 @@ static int dmic_set_config(struct dai *dai, struct ipc_config_dai *common_config } assert(dmic); - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); #if CONFIG_INTEL_DMIC_TPLG_PARAMS /* @@ -255,7 +252,7 @@ static int dmic_set_config(struct dai *dai, struct ipc_config_dai *common_config dmic->state = COMP_STATE_PREPARE; out: - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); return ret; } @@ -263,6 +260,7 @@ static int dmic_set_config(struct dai *dai, struct ipc_config_dai *common_config static void dmic_start(struct dai *dai) { struct dmic_pdata *dmic = dai_get_drvdata(dai); + k_spinlock_key_t key; int i; int mic_a; int mic_b; @@ -270,7 +268,7 @@ static void dmic_start(struct dai *dai) int fir_b; /* enable port */ - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); dai_dbg(dai, "dmic_start()"); dmic->startcount = 0; @@ -367,7 +365,7 @@ static void dmic_start(struct dai *dai) dmic->global->pause_mask &= ~BIT(dai->index); dmic->state = COMP_STATE_ACTIVE; - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); /* Currently there's no DMIC HW internal mutings and wait times * applied into this start sequence. It can be implemented here if @@ -404,10 +402,11 @@ static void dmic_stop_fifo_packers(struct dai *dai, int fifo_index) static void dmic_stop(struct dai *dai, bool stop_is_pause) { struct dmic_pdata *dmic = dai_get_drvdata(dai); + k_spinlock_key_t key; int i; dai_dbg(dai, "dmic_stop()"); - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); dmic_stop_fifo_packers(dai, dai->index); @@ -449,7 +448,7 @@ static void dmic_stop(struct dai *dai, bool stop_is_pause) } schedule_task_cancel(&dmic->dmicwork); - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } static int dmic_trigger(struct dai *dai, int cmd, int direction) diff --git a/src/drivers/intel/haswell/ssp.c b/src/drivers/intel/haswell/ssp.c index 8ba5d7b50c69..d15896962620 100644 --- a/src/drivers/intel/haswell/ssp.c +++ b/src/drivers/intel/haswell/ssp.c @@ -44,8 +44,9 @@ static int ssp_set_config(struct dai *dai, struct ipc_config_dai *common_config, uint32_t format; bool inverted_frame = false; int ret = 0; + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* is playback/capture already running */ if (ssp->state[DAI_DIR_PLAYBACK] == COMP_STATE_ACTIVE || @@ -356,7 +357,7 @@ static int ssp_set_config(struct dai *dai, struct ipc_config_dai *common_config, dai_info(dai, "ssp_set_config(), done"); out: - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); return ret; } @@ -397,8 +398,9 @@ static int ssp_get_hw_params(struct dai *dai, static void ssp_start(struct dai *dai, int direction) { struct ssp_pdata *ssp = dai_get_drvdata(dai); + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); dai_info(dai, "ssp_start()"); @@ -420,15 +422,16 @@ static void ssp_start(struct dai *dai, int direction) /* enable port */ ssp->state[direction] = COMP_STATE_ACTIVE; - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } /* stop the SSP for either playback or capture */ static void ssp_stop(struct dai *dai, int direction) { struct ssp_pdata *ssp = dai_get_drvdata(dai); + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* stop Rx if neeed */ if (direction == DAI_DIR_CAPTURE && @@ -457,7 +460,7 @@ static void ssp_stop(struct dai *dai, int direction) dai_info(dai, "ssp_stop(), SSP port disabled"); } - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } static void ssp_pause(struct dai *dai, int direction) diff --git a/src/drivers/intel/hda/hda-dma.c b/src/drivers/intel/hda/hda-dma.c index 9a3223baee0c..7edb93279c98 100644 --- a/src/drivers/intel/hda/hda-dma.c +++ b/src/drivers/intel/hda/hda-dma.c @@ -534,7 +534,7 @@ static int hda_dma_host_copy(struct dma_chan_data *channel, int bytes, static struct dma_chan_data *hda_dma_channel_get(struct dma *dma, unsigned int channel) { - uint32_t flags; + k_spinlock_key_t key; if (channel >= dma->plat_data.channels) { tr_err(&hdma_tr, "hda-dmac: %d invalid channel %d", @@ -542,7 +542,7 @@ static struct dma_chan_data *hda_dma_channel_get(struct dma *dma, return NULL; } - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock(&dma->lock); tr_dbg(&hdma_tr, "hda-dmac: %d channel %d -> get", dma->plat_data.id, channel); @@ -553,12 +553,12 @@ static struct dma_chan_data *hda_dma_channel_get(struct dma *dma, atomic_add(&dma->num_channels_busy, 1); /* return channel */ - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); return &dma->chan[channel]; } /* DMAC has no free channels */ - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); tr_err(&hdma_tr, "hda-dmac: %d no free channel %d", dma->plat_data.id, channel); return NULL; @@ -583,11 +583,11 @@ static void hda_dma_channel_put_unlocked(struct dma_chan_data *channel) static void hda_dma_channel_put(struct dma_chan_data *channel) { struct dma *dma = channel->dma; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock(&dma->lock); hda_dma_channel_put_unlocked(channel); - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); atomic_sub(&dma->num_channels_busy, 1); } diff --git a/src/drivers/intel/ssp/mn.c b/src/drivers/intel/ssp/mn.c index d230993b0f26..90ffabaf3a4c 100644 --- a/src/drivers/intel/ssp/mn.c +++ b/src/drivers/intel/ssp/mn.c @@ -59,7 +59,7 @@ struct mn { int bclk_source_mn_clock; #endif - spinlock_t lock; /**< lock mechanism */ + struct k_spinlock lock; /**< lock mechanism */ }; static SHARED_DATA struct mn mn; @@ -83,8 +83,7 @@ void mn_init(struct sof *sof) sof->mn->bclk_sources[i] = MN_BCLK_SOURCE_NONE; #endif - spinlock_init(&sof->mn->lock); - + k_spinlock_init(&sof->mn->lock); } /** @@ -237,6 +236,7 @@ static inline int set_mclk_divider(uint16_t mclk_id, uint32_t mdivr_val) int mn_set_mclk(uint16_t mclk_id, uint32_t mclk_rate) { struct mn *mn = mn_get(); + k_spinlock_key_t key; int ret = 0; if (mclk_id >= DAI_NUM_SSP_MCLK) { @@ -244,7 +244,7 @@ int mn_set_mclk(uint16_t mclk_id, uint32_t mclk_rate) return -EINVAL; } - spin_lock(&mn->lock); + key = k_spin_lock(&mn->lock); if (is_mclk_source_in_use()) ret = check_current_mclk_source(mclk_id, mclk_rate); @@ -265,7 +265,7 @@ int mn_set_mclk(uint16_t mclk_id, uint32_t mclk_rate) out: - spin_unlock(&mn->lock); + k_spin_unlock(&mn->lock, key); return ret; } @@ -282,8 +282,9 @@ void mn_release_mclk(uint32_t mclk_id) { struct mn *mn = mn_get(); uint32_t mdivc; + k_spinlock_key_t key; - spin_lock(&mn->lock); + key = k_spin_lock(&mn->lock); mn->mclk_sources_ref[mclk_id]--; @@ -306,7 +307,7 @@ void mn_release_mclk(uint32_t mclk_id) mn->mclk_source_clock = 0; } - spin_unlock(&mn->lock); + k_spin_unlock(&mn->lock, key); } #if CONFIG_INTEL_MN @@ -596,8 +597,9 @@ int mn_set_bclk(uint32_t dai_index, uint32_t bclk_rate, uint32_t n = 1; int ret = 0; bool mn_in_use; + k_spinlock_key_t key; - spin_lock(&mn->lock); + key = k_spin_lock(&mn->lock); mn->bclk_sources[dai_index] = MN_BCLK_SOURCE_NONE; @@ -630,7 +632,7 @@ int mn_set_bclk(uint32_t dai_index, uint32_t bclk_rate, out: - spin_unlock(&mn->lock); + k_spin_unlock(&mn->lock, key); return ret; } @@ -639,25 +641,27 @@ void mn_release_bclk(uint32_t dai_index) { struct mn *mn = mn_get(); bool mn_in_use; + k_spinlock_key_t key; - spin_lock(&mn->lock); + key = k_spin_lock(&mn->lock); mn->bclk_sources[dai_index] = MN_BCLK_SOURCE_NONE; mn_in_use = is_bclk_source_in_use(MN_BCLK_SOURCE_MN); /* release the M/N clock source if not used */ if (!mn_in_use) reset_bclk_mn_source(); - spin_unlock(&mn->lock); + k_spin_unlock(&mn->lock, key); } void mn_reset_bclk_divider(uint32_t dai_index) { struct mn *mn = mn_get(); + k_spinlock_key_t key; - spin_lock(&mn->lock); + key = k_spin_lock(&mn->lock); mn_reg_write(MN_MDIV_M_VAL(dai_index), dai_index, 1); mn_reg_write(MN_MDIV_N_VAL(dai_index), dai_index, 1); - spin_unlock(&mn->lock); + k_spin_unlock(&mn->lock, key); } #endif diff --git a/src/drivers/intel/ssp/ssp.c b/src/drivers/intel/ssp/ssp.c index 90dd9d505a0b..3066611f22f3 100644 --- a/src/drivers/intel/ssp/ssp.c +++ b/src/drivers/intel/ssp/ssp.c @@ -232,6 +232,7 @@ static int ssp_set_config_tplg(struct dai *dai, struct ipc_config_dai *common_co uint32_t active_tx_slots = 2; uint32_t active_rx_slots = 2; uint32_t sample_width = 2; + k_spinlock_key_t key; bool inverted_bclk = false; bool inverted_frame = false; @@ -240,7 +241,7 @@ static int ssp_set_config_tplg(struct dai *dai, struct ipc_config_dai *common_co int ret = 0; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* ignore config if SSP is already configured */ if (ssp->state[DAI_DIR_PLAYBACK] > COMP_STATE_READY || @@ -765,7 +766,7 @@ static int ssp_set_config_tplg(struct dai *dai, struct ipc_config_dai *common_co } out: - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); return ret; } @@ -934,8 +935,9 @@ static int ssp_get_hw_params(struct dai *dai, static void ssp_early_start(struct dai *dai, int direction) { struct ssp_pdata *ssp = dai_get_drvdata(dai); + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* request mclk/bclk */ ssp_pre_start(dai); @@ -952,15 +954,16 @@ static void ssp_early_start(struct dai *dai, int direction) } - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } /* start the SSP for either playback or capture */ static void ssp_start(struct dai *dai, int direction) { struct ssp_pdata *ssp = dai_get_drvdata(dai); + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); dai_info(dai, "ssp_start()"); @@ -986,15 +989,16 @@ static void ssp_start(struct dai *dai, int direction) break; } - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } /* stop the SSP for either playback or capture */ static void ssp_stop(struct dai *dai, int direction) { struct ssp_pdata *ssp = dai_get_drvdata(dai); + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* * Wait to get valid fifo status in clock consumer mode. TODO it's @@ -1044,7 +1048,7 @@ static void ssp_stop(struct dai *dai, int direction) ssp_post_stop(dai); - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } static void ssp_pause(struct dai *dai, int direction) diff --git a/src/drivers/interrupt.c b/src/drivers/interrupt.c index 60c7d7b103df..b9b0779906da 100644 --- a/src/drivers/interrupt.c +++ b/src/drivers/interrupt.c @@ -53,14 +53,14 @@ int interrupt_cascade_register(const struct irq_cascade_tmpl *tmpl) { struct cascade_root *root = cascade_root_get(); struct irq_cascade_desc **cascade; - unsigned long flags; + k_spinlock_key_t key; unsigned int i; int ret; if (!tmpl->name || !tmpl->ops) return -EINVAL; - spin_lock_irq(&root->lock, flags); + key = k_spin_lock(&root->lock); for (cascade = &root->list; *cascade; cascade = &(*cascade)->next) { @@ -74,7 +74,7 @@ int interrupt_cascade_register(const struct irq_cascade_tmpl *tmpl) *cascade = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(**cascade)); - spinlock_init(&(*cascade)->lock); + k_spinlock_init(&(*cascade)->lock); for (i = 0; i < PLATFORM_IRQ_CHILDREN; i++) list_init(&(*cascade)->child[i].list); @@ -95,7 +95,7 @@ int interrupt_cascade_register(const struct irq_cascade_tmpl *tmpl) unlock: - spin_unlock_irq(&root->lock, flags); + k_spin_unlock(&root->lock, key); return ret; } @@ -104,8 +104,8 @@ int interrupt_get_irq(unsigned int irq, const char *name) { struct cascade_root *root = cascade_root_get(); struct irq_cascade_desc *cascade; - unsigned long flags; int ret = -ENODEV; + k_spinlock_key_t key; if (!name || name[0] == '\0') return irq; @@ -117,7 +117,7 @@ int interrupt_get_irq(unsigned int irq, const char *name) return -EINVAL; } - spin_lock_irq(&root->lock, flags); + key = k_spin_lock(&root->lock); for (cascade = root->list; cascade; cascade = cascade->next) { /* .name is non-volatile */ @@ -129,7 +129,7 @@ int interrupt_get_irq(unsigned int irq, const char *name) } - spin_unlock_irq(&root->lock, flags); + k_spin_unlock(&root->lock, key); return ret; } @@ -138,12 +138,12 @@ struct irq_cascade_desc *interrupt_get_parent(uint32_t irq) { struct cascade_root *root = cascade_root_get(); struct irq_cascade_desc *cascade, *c = NULL; - unsigned long flags; + k_spinlock_key_t key; if (irq < PLATFORM_IRQ_HW_NUM) return NULL; - spin_lock_irq(&root->lock, flags); + key = k_spin_lock(&root->lock); for (cascade = root->list; cascade; cascade = cascade->next) { if (irq >= cascade->irq_base && @@ -155,7 +155,7 @@ struct irq_cascade_desc *interrupt_get_parent(uint32_t irq) } - spin_unlock_irq(&root->lock, flags); + k_spin_unlock(&root->lock, key); return c; } @@ -166,7 +166,7 @@ void interrupt_init(struct sof *sof) sizeof(cascade_root)); sof->cascade_root->last_irq = PLATFORM_IRQ_FIRST_CHILD - 1; - spinlock_init(&sof->cascade_root->lock); + k_spinlock_init(&sof->cascade_root->lock); } static int irq_register_child(struct irq_cascade_desc *cascade, int irq, @@ -277,14 +277,14 @@ static uint32_t irq_enable_child(struct irq_cascade_desc *cascade, int irq, struct irq_child *child; unsigned int child_idx; struct list_item *list; - unsigned long flags; + k_spinlock_key_t key; /* * Locking is child to parent: when called recursively we are already * holding the child's lock and then also taking the parent's lock. The * same holds for the interrupt_(un)register() paths. */ - spin_lock_irq(&cascade->lock, flags); + key = k_spin_lock(&cascade->lock); child = cascade->child + hw_irq; child_idx = cascade->global_mask ? 0 : core; @@ -311,7 +311,7 @@ static uint32_t irq_enable_child(struct irq_cascade_desc *cascade, int irq, } - spin_unlock_irq(&cascade->lock, flags); + k_spin_unlock(&cascade->lock, key); return 0; } @@ -324,9 +324,9 @@ static uint32_t irq_disable_child(struct irq_cascade_desc *cascade, int irq, struct irq_child *child; unsigned int child_idx; struct list_item *list; - unsigned long flags; + k_spinlock_key_t key; - spin_lock_irq(&cascade->lock, flags); + key = k_spin_lock(&cascade->lock); child = cascade->child + hw_irq; child_idx = cascade->global_mask ? 0 : core; @@ -356,7 +356,7 @@ static uint32_t irq_disable_child(struct irq_cascade_desc *cascade, int irq, } - spin_unlock_irq(&cascade->lock, flags); + k_spin_unlock(&cascade->lock, key); return 0; } @@ -370,8 +370,7 @@ static int interrupt_register_internal(uint32_t irq, void (*handler)(void *arg), void *arg, struct irq_desc *desc) { struct irq_cascade_desc *cascade; - /* Avoid a bogus compiler warning */ - unsigned long flags = 0; + k_spinlock_key_t key; int ret; /* no parent means we are registering DSP internal IRQ */ @@ -389,9 +388,9 @@ static int interrupt_register_internal(uint32_t irq, void (*handler)(void *arg), #endif } - spin_lock_irq(&cascade->lock, flags); + key = k_spin_lock(&cascade->lock); ret = irq_register_child(cascade, irq, handler, arg, desc); - spin_unlock_irq(&cascade->lock, flags); + k_spin_unlock(&cascade->lock, key); return ret; } @@ -405,8 +404,7 @@ static void interrupt_unregister_internal(uint32_t irq, const void *arg, struct irq_desc *desc) { struct irq_cascade_desc *cascade; - /* Avoid a bogus compiler warning */ - unsigned long flags = 0; + k_spinlock_key_t key; /* no parent means we are unregistering DSP internal IRQ */ cascade = interrupt_get_parent(irq); @@ -424,9 +422,9 @@ static void interrupt_unregister_internal(uint32_t irq, const void *arg, return; } - spin_lock_irq(&cascade->lock, flags); + key = k_spin_lock(&cascade->lock); irq_unregister_child(cascade, irq, arg, desc); - spin_unlock_irq(&cascade->lock, flags); + k_spin_unlock(&cascade->lock, key); } uint32_t interrupt_enable(uint32_t irq, void *arg) diff --git a/src/drivers/mediatek/mt8195/afe-memif.c b/src/drivers/mediatek/mt8195/afe-memif.c index 3058d52987df..9ce192d9a537 100644 --- a/src/drivers/mediatek/mt8195/afe-memif.c +++ b/src/drivers/mediatek/mt8195/afe-memif.c @@ -144,28 +144,28 @@ struct afe_memif_dma { /* acquire the specific DMA channel */ static struct dma_chan_data *memif_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; struct dma_chan_data *channel; tr_dbg(&memif_tr, "MEMIF: channel_get(%d)", req_chan); - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); tr_err(&memif_tr, "MEMIF: Channel %d out of range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); tr_err(&memif_tr, "MEMIF: Cannot reuse channel %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); channel->status = COMP_STATE_READY; - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock(&dma->lock, key); return channel; } @@ -173,7 +173,7 @@ static struct dma_chan_data *memif_channel_get(struct dma *dma, unsigned int req /* channel must not be running when this is called */ static void memif_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; /* Assuming channel is stopped, we thus don't need hardware to * do anything right now @@ -182,10 +182,10 @@ static void memif_channel_put(struct dma_chan_data *channel) notifier_unregister_all(NULL, channel); - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock(&channel->dma->lock); channel->status = COMP_STATE_INIT; atomic_sub(&channel->dma->num_channels_busy, 1); - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock(&channel->dma->lock, key); } #if TEST_SGEN diff --git a/src/drivers/mediatek/mt8195/interrupt.c b/src/drivers/mediatek/mt8195/interrupt.c index 6b49387fa6cc..02ce1684a1ff 100644 --- a/src/drivers/mediatek/mt8195/interrupt.c +++ b/src/drivers/mediatek/mt8195/interrupt.c @@ -121,6 +121,7 @@ static inline void mtk_handle_irq(struct irq_cascade_desc *cascade, int core = cpu_get_id(); struct list_item *clist; struct irq_desc *child = NULL; + k_spinlock_key_t key; int bit; bool handled; @@ -129,7 +130,7 @@ static inline void mtk_handle_irq(struct irq_cascade_desc *cascade, handled = false; status &= ~(1ull << bit); - spin_lock(&cascade->lock); + key = k_spin_lock(&cascade->lock); list_for_item(clist, &cascade->child[bit].list) { child = container_of(clist, struct irq_desc, irq_list); @@ -140,7 +141,7 @@ static inline void mtk_handle_irq(struct irq_cascade_desc *cascade, } } - spin_unlock(&cascade->lock); + k_spin_unlock(&cascade->lock, key); if (!handled) { tr_err(&int_tr, "irq_handler(): not handled, bit %d", bit); diff --git a/src/idc/idc.c b/src/idc/idc.c index 141db1aeede7..3eaa643537aa 100644 --- a/src/idc/idc.c +++ b/src/idc/idc.c @@ -323,15 +323,15 @@ static void idc_complete(void *data) struct ipc *ipc = ipc_get(); struct idc *idc = data; uint32_t type = iTS(idc->received_msg.header); - uint32_t flags; + k_spinlock_key_t key; switch (type) { case iTS(IDC_MSG_IPC): /* Signal the host */ - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock(&ipc->lock); ipc->task_mask &= ~IPC_TASK_SECONDARY_CORE; ipc_complete_cmd(ipc); - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock(&ipc->lock, key); } } #endif diff --git a/src/include/sof/audio/component_ext.h b/src/include/sof/audio/component_ext.h index 837fc90d355a..290e124be92e 100644 --- a/src/include/sof/audio/component_ext.h +++ b/src/include/sof/audio/component_ext.h @@ -30,7 +30,7 @@ /** \brief Holds list of registered components' drivers */ struct comp_driver_list { struct list_item list; /**< list of component drivers */ - spinlock_t lock; /**< list lock */ + struct k_spinlock lock; /**< list lock */ }; /** \brief Retrieves the component device buffer list. */ diff --git a/src/include/sof/coherent.h b/src/include/sof/coherent.h index 981060669779..7b18927928de 100644 --- a/src/include/sof/coherent.h +++ b/src/include/sof/coherent.h @@ -54,8 +54,8 @@ * The shared flag is only set at coherent init and thereafter it's RO. */ struct coherent { - spinlock_t lock; /* locking mechanism */ - uint32_t flags; /* lock flags */ + struct k_spinlock lock; /* locking mechanism */ + k_spinlock_key_t key; /* lock flags */ uint16_t shared; /* shared on other non coherent cores */ uint16_t core; /* owner core if not shared */ struct list_item list; /* coherent list iteration */ @@ -99,7 +99,7 @@ __must_check static inline struct coherent *coherent_acquire(struct coherent *c, if (c->shared) { CHECK_COHERENT_CORE(c); - spin_lock(&c->lock); + c->key = k_spin_lock(&c->lock); /* invalidate local copy */ dcache_invalidate_region(uncache_to_cache(c), size); @@ -125,7 +125,7 @@ static inline struct coherent *coherent_release(struct coherent *c, const size_t dcache_writeback_invalidate_region(c, size); /* unlock on uncache alias */ - spin_unlock(&(cache_to_uncache(c))->lock); + k_spin_unlock(&cache_to_uncache(c)->lock, cache_to_uncache(c)->key); } return cache_to_uncache(c); @@ -141,7 +141,7 @@ __must_check static inline struct coherent *coherent_acquire_irq(struct coherent if (c->shared) { CHECK_COHERENT_CORE(c); - spin_lock_irq(&c->lock, c->flags); + c->key = k_spin_lock(&c->lock); /* invalidate local copy */ dcache_invalidate_region(uncache_to_cache(c), size); @@ -164,8 +164,7 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si dcache_writeback_invalidate_region(c, size); /* unlock on uncache alias */ - spin_unlock_irq(&(cache_to_uncache(c))->lock, - (cache_to_uncache(c))->flags); + k_spin_unlock(&cache_to_uncache(c)->lock, cache_to_uncache(c)->key); } return cache_to_uncache(c); @@ -176,7 +175,7 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si /* assert if someone passes a cache/local address in here. */ \ ADDR_IS_COHERENT(object); \ /* TODO static assert if we are not cache aligned */ \ - spinlock_init(&object->member.lock); \ + k_spinlock_init(&object->member.lock); \ object->member.shared = false; \ object->member.core = cpu_get_id(); \ list_init(&object->member.list); \ @@ -197,10 +196,10 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si do { \ /* assert if someone passes a cache/local address in here. */ \ ADDR_IS_COHERENT(object); \ - spin_lock(&(object)->member.lock); \ + (object)->member.key = k_spin_lock(&(object)->member.lock); \ (object)->member.shared = true; \ dcache_invalidate_region(object, sizeof(*object)); \ - spin_unlock(&(object)->member.lock); \ + k_spin_unlock(&(object)->member.lock, (object)->member.key); \ } while (0) /* set the object to shared mode with coherency managed by SW */ @@ -208,10 +207,10 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si do { \ /* assert if someone passes a cache/local address in here. */ \ ADDR_IS_COHERENT(object); \ - spin_lock_irq(&(object)->member.lock, &(object)->member.flags); \ + (object)->member.key = k_spin_lock(&(object)->member.lock); \ (object)->member.shared = true; \ dcache_invalidate_region(object, sizeof(*object)); \ - spin_unlock_irq(&(object)->member.lock, &(object)->member.flags); \ + k_spin_unlock(&(object)->member.lock, &(object)->member.key); \ } while (0) #else @@ -221,7 +220,7 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si __must_check static inline struct coherent *coherent_acquire(struct coherent *c, const size_t size) { if (c->shared) { - spin_lock(&c->lock); + c->key = k_spin_lock(&c->lock); /* invalidate local copy */ dcache_invalidate_region(uncache_to_cache(c), size); @@ -236,7 +235,7 @@ static inline struct coherent *coherent_release(struct coherent *c, const size_t /* wtb and inv local data to coherent object */ dcache_writeback_invalidate_region(uncache_to_cache(c), size); - spin_unlock(&c->lock); + k_spin_unlock(&c->lock, c->key); } return c; @@ -246,7 +245,7 @@ __must_check static inline struct coherent *coherent_acquire_irq(struct coherent const size_t size) { if (c->shared) { - spin_lock_irq(&c->lock, c->flags); + c->key = k_spin_lock(&c->lock); /* invalidate local copy */ dcache_invalidate_region(uncache_to_cache(c), size); @@ -261,7 +260,7 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si /* wtb and inv local data to coherent object */ dcache_writeback_invalidate_region(uncache_to_cache(c), size); - spin_unlock_irq(&c->lock, c->flags); + k_spin_unlock(&c->lock, c->key); } return c; @@ -270,7 +269,7 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si #define coherent_init(object, member) \ do { \ /* TODO static assert if we are not cache aligned */ \ - spinlock_init(&object->member.lock); \ + k_spinlock_init(&object->member.lock); \ object->member.shared = 0; \ object->member.core = cpu_get_id(); \ list_init(&object->member.list); \ diff --git a/src/include/sof/drivers/interrupt.h b/src/include/sof/drivers/interrupt.h index cdefd2849681..d5da3af20470 100644 --- a/src/include/sof/drivers/interrupt.h +++ b/src/include/sof/drivers/interrupt.h @@ -81,7 +81,7 @@ struct irq_cascade_desc { * cannot mask input * interrupts per core */ - spinlock_t lock; /**< protect child + struct k_spinlock lock; /**< protect child * lists, enable and * child counters */ @@ -109,7 +109,7 @@ struct irq_cascade_tmpl { * \brief Cascading interrupt controller root. */ struct cascade_root { - spinlock_t lock; /**< locking mechanism */ + struct k_spinlock lock; /**< locking mechanism */ struct irq_cascade_desc *list; /**< list of child cascade irqs */ int last_irq; /**< last registered cascade irq */ }; diff --git a/src/include/sof/ipc/common.h b/src/include/sof/ipc/common.h index 8be355f426c3..d798681d6d04 100644 --- a/src/include/sof/ipc/common.h +++ b/src/include/sof/ipc/common.h @@ -66,7 +66,7 @@ extern struct tr_ctx ipc_tr; #define IPC_TASK_SECONDARY_CORE BIT(2) struct ipc { - spinlock_t lock; /* locking mechanism */ + struct k_spinlock lock; /* locking mechanism */ void *comp_data; /* PM */ diff --git a/src/include/sof/ipc/msg.h b/src/include/sof/ipc/msg.h index 4c5cea20e586..4a536b03b2f2 100644 --- a/src/include/sof/ipc/msg.h +++ b/src/include/sof/ipc/msg.h @@ -72,15 +72,15 @@ static inline void ipc_msg_free(struct ipc_msg *msg) return; struct ipc *ipc = ipc_get(); - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock(&ipc->lock); list_item_del(&msg->list); rfree(msg->tx_data); rfree(msg); - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock(&ipc->lock, key); } /** diff --git a/src/include/sof/lib/clk.h b/src/include/sof/lib/clk.h index b16c18d6313f..5b6902ceb8b5 100644 --- a/src/include/sof/lib/clk.h +++ b/src/include/sof/lib/clk.h @@ -41,7 +41,7 @@ struct clock_info { uint32_t lowest_freq_idx; /* lowest possible clock */ uint32_t notification_id; uint32_t notification_mask; - spinlock_t lock; + struct k_spinlock lock; /* persistent change clock value in active state */ int (*set_freq)(int clock, int freq_idx); diff --git a/src/include/sof/lib/dai.h b/src/include/sof/lib/dai.h index 5d7d224139de..1838312c4aa2 100644 --- a/src/include/sof/lib/dai.h +++ b/src/include/sof/lib/dai.h @@ -190,7 +190,7 @@ struct dai_data { struct dai { uint32_t index; /**< index */ - spinlock_t lock; /**< locking mechanism */ + struct k_spinlock lock; /**< locking mechanism */ int sref; /**< simple ref counter, guarded by lock */ struct dai_plat_data plat_data; const struct dai_driver *drv; diff --git a/src/include/sof/lib/dma.h b/src/include/sof/lib/dma.h index cd2e3469d7a1..1edc72a2bc4e 100644 --- a/src/include/sof/lib/dma.h +++ b/src/include/sof/lib/dma.h @@ -200,7 +200,7 @@ struct dma_plat_data { struct dma { struct dma_plat_data plat_data; - spinlock_t lock; /**< locking mechanism */ + struct k_spinlock lock; /**< locking mechanism */ int sref; /**< simple ref counter, guarded by lock */ const struct dma_ops *ops; atomic_t num_channels_busy; /* number of busy channels */ diff --git a/src/include/sof/lib/mm_heap.h b/src/include/sof/lib/mm_heap.h index aa56f288242a..29c0ba57f993 100644 --- a/src/include/sof/lib/mm_heap.h +++ b/src/include/sof/lib/mm_heap.h @@ -78,7 +78,7 @@ struct mm { struct mm_info total; uint32_t heap_trace_updated; /* updates that can be presented */ - spinlock_t lock; /* all allocs and frees are atomic */ + struct k_spinlock lock; /* all allocs and frees are atomic */ }; /* Heap save/restore contents and context for PM D0/D3 events */ diff --git a/src/include/sof/lib/notifier.h b/src/include/sof/lib/notifier.h index 2f10b2d97aee..9bdc7bce0449 100644 --- a/src/include/sof/lib/notifier.h +++ b/src/include/sof/lib/notifier.h @@ -39,7 +39,7 @@ enum notify_id { struct notify { struct list_item list[NOTIFIER_ID_COUNT]; /* list of callback handles */ - spinlock_t lock; /* list lock */ + struct k_spinlock lock; /* list lock */ }; struct notify_data { diff --git a/src/include/sof/lib/pm_runtime.h b/src/include/sof/lib/pm_runtime.h index 1644cbd0a998..064b7584a1a7 100644 --- a/src/include/sof/lib/pm_runtime.h +++ b/src/include/sof/lib/pm_runtime.h @@ -46,7 +46,7 @@ enum pm_runtime_context { /** \brief Runtime power management data. */ struct pm_runtime_data { - spinlock_t lock; /**< lock mechanism */ + struct k_spinlock lock; /**< lock mechanism */ void *platform_data; /**< platform specific data */ #if CONFIG_DSP_RESIDENCY_COUNTERS struct r_counters_data *r_counters; /**< diagnostic DSP residency counters */ diff --git a/src/include/sof/schedule/ll_schedule_domain.h b/src/include/sof/schedule/ll_schedule_domain.h index 6f91061cd681..c2424338be03 100644 --- a/src/include/sof/schedule/ll_schedule_domain.h +++ b/src/include/sof/schedule/ll_schedule_domain.h @@ -46,7 +46,7 @@ struct ll_schedule_domain_ops { struct ll_schedule_domain { uint64_t next_tick; /**< ticks just set for next run */ uint64_t new_target_tick; /**< for the next set, used during the reschedule stage */ - spinlock_t lock; /**< standard lock */ + struct k_spinlock lock; /**< standard lock */ atomic_t total_num_tasks; /**< total number of registered tasks */ atomic_t enabled_cores; /**< number of enabled cores */ uint32_t ticks_per_ms; /**< number of clock ticks per ms */ @@ -90,7 +90,7 @@ static inline struct ll_schedule_domain *domain_init domain->next_tick = UINT64_MAX; domain->new_target_tick = UINT64_MAX; - spinlock_init(&domain->lock); + k_spinlock_init(&domain->lock); atomic_init(&domain->total_num_tasks, 0); atomic_init(&domain->enabled_cores, 0); diff --git a/src/include/sof/spinlock.h b/src/include/sof/spinlock.h index 83549be342fa..0d68459aa2c2 100644 --- a/src/include/sof/spinlock.h +++ b/src/include/sof/spinlock.h @@ -13,7 +13,10 @@ #ifndef __SOF_SPINLOCK_H__ #define __SOF_SPINLOCK_H__ +#ifndef __ZEPHYR__ #include +typedef uint32_t k_spinlock_key_t; +#endif #include #include @@ -38,7 +41,7 @@ * src/drivers/dw-dma.c:840: spinlock_init(&dma->lock); * * grep -rn lock --include *.c | grep 439 - * src/lib/alloc.c:439: spin_lock_irq(&memmap.lock, flags); + * src/lib/alloc.c:439: k_spin_lock_irq(&memmap.lock, flags); * * Every lock entry and exit shows LcE and LcX in trace alongside the lock * line numbers in hex. e.g. @@ -140,16 +143,10 @@ extern struct tr_ctx sl_tr; #endif /* CONFIG_DEBUG_LOCKS */ -static inline int _spin_try_lock(spinlock_t *lock, int line) -{ - spin_lock_dbg(line); - return arch_try_lock(lock); -} - -#define spin_try_lock(lock) _spin_try_lock(lock, __LINE__) +#ifndef __ZEPHYR__ /* all SMP spinlocks need init, nothing todo on UP */ -static inline void _spinlock_init(spinlock_t *lock, int line) +static inline void _spinlock_init(struct k_spinlock *lock, int line) { arch_spinlock_init(lock); #if CONFIG_DEBUG_LOCKS @@ -157,44 +154,26 @@ static inline void _spinlock_init(spinlock_t *lock, int line) #endif } -#define spinlock_init(lock) _spinlock_init(lock, __LINE__) - -/* does nothing on UP systems */ -static inline void _spin_lock(spinlock_t *lock, int line) -{ - spin_lock_dbg(line); -#if CONFIG_DEBUG_LOCKS - spin_lock_log(lock, line); - spin_try_lock_dbg(lock, line); -#else - arch_spin_lock(lock); -#endif - - /* spinlock has to be in a shared memory */ -} - -#define spin_lock(lock) _spin_lock(lock, __LINE__) +#define k_spinlock_init(lock) _spinlock_init(lock, __LINE__) /* disables all IRQ sources and takes lock - enter atomic context */ -uint32_t _spin_lock_irq(spinlock_t *lock); +k_spinlock_key_t _k_spin_lock_irq(struct k_spinlock *lock); +#define k_spin_lock(lock) _k_spin_lock_irq(lock) -#define spin_lock_irq(lock, flags) (flags = _spin_lock_irq(lock)) +/* re-enables current IRQ sources and releases lock - leave atomic context */ +void _k_spin_unlock_irq(struct k_spinlock *lock, k_spinlock_key_t key, int line); +#define k_spin_unlock(lock, key) _k_spin_unlock_irq(lock, key, __LINE__) + +#else -static inline void _spin_unlock(spinlock_t *lock, int line) +/* This has to be moved to Zephyr */ +static inline void k_spinlock_init(struct k_spinlock *lock) { - arch_spin_unlock(lock); -#if CONFIG_DEBUG_LOCKS - spin_unlock_dbg(line); +#ifdef CONFIG_SMP + atomic_set(&lock->locked, 0); #endif - - /* spinlock has to be in a shared memory */ } -#define spin_unlock(lock) _spin_unlock(lock, __LINE__) - -/* re-enables current IRQ sources and releases lock - leave atomic context */ -void _spin_unlock_irq(spinlock_t *lock, uint32_t flags, int line); - -#define spin_unlock_irq(lock, flags) _spin_unlock_irq(lock, flags, __LINE__) +#endif /* __ZEPHYR__ */ #endif /* __SOF_SPINLOCK_H__ */ diff --git a/src/include/sof/trace/dma-trace.h b/src/include/sof/trace/dma-trace.h index a9f49dfecdc4..e56634c538a4 100644 --- a/src/include/sof/trace/dma-trace.h +++ b/src/include/sof/trace/dma-trace.h @@ -44,7 +44,7 @@ struct dma_trace_data { * copied by dma connected to host */ uint32_t dropped_entries; /* amount of dropped entries */ - spinlock_t lock; /* dma trace lock */ + struct k_spinlock lock; /* dma trace lock */ }; int dma_trace_init_early(struct sof *sof); diff --git a/src/ipc/ipc-common.c b/src/ipc/ipc-common.c index 247c78ff8718..72ffe5cb8274 100644 --- a/src/ipc/ipc-common.c +++ b/src/ipc/ipc-common.c @@ -59,12 +59,12 @@ int ipc_process_on_core(uint32_t core, bool blocking) * will also reply to the host */ if (!blocking) { - uint32_t flags; + k_spinlock_key_t key; ipc->core = core; - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock(&ipc->lock); ipc->task_mask |= IPC_TASK_SECONDARY_CORE; - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock(&ipc->lock, key); } /* send IDC message */ @@ -176,9 +176,9 @@ void ipc_send_queued_msg(void) { struct ipc *ipc = ipc_get(); struct ipc_msg *msg; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock(&ipc->lock); /* any messages to send ? */ if (list_is_empty(&ipc->msg_list)) @@ -190,16 +190,16 @@ void ipc_send_queued_msg(void) ipc_platform_send_msg(msg); out: - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock(&ipc->lock, key); } void ipc_msg_send(struct ipc_msg *msg, void *data, bool high_priority) { struct ipc *ipc = ipc_get(); - uint32_t flags; + k_spinlock_key_t key; int ret; - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock(&ipc->lock); /* copy mailbox data to message */ if (msg->tx_size > 0 && msg->tx_size < SOF_IPC_MSG_MAX_SIZE) { @@ -223,7 +223,7 @@ void ipc_msg_send(struct ipc_msg *msg, void *data, bool high_priority) } out: - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock(&ipc->lock, key); } void ipc_schedule_process(struct ipc *ipc) @@ -240,7 +240,7 @@ int ipc_init(struct sof *sof) sof->ipc->comp_data = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, SOF_IPC_MSG_MAX_SIZE); - spinlock_init(&sof->ipc->lock); + k_spinlock_init(&sof->ipc->lock); list_init(&sof->ipc->msg_list); list_init(&sof->ipc->comp_list); @@ -271,12 +271,12 @@ void ipc_complete_cmd(struct ipc *ipc) static void ipc_complete_task(void *data) { struct ipc *ipc = data; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock(&ipc->lock); ipc->task_mask &= ~IPC_TASK_INLINE; ipc_complete_cmd(ipc); - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock(&ipc->lock, key); } static enum task_state ipc_do_cmd(void *data) diff --git a/src/ipc/ipc3/handler.c b/src/ipc/ipc3/handler.c index 36c9266f0885..896a00850840 100644 --- a/src/ipc/ipc3/handler.c +++ b/src/ipc/ipc3/handler.c @@ -456,17 +456,17 @@ static int ipc_stream_trigger(uint32_t header) * synchronously. */ if (pipeline_is_timer_driven(pcm_dev->cd->pipeline)) { - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock(&ipc->lock); ipc->task_mask |= IPC_TASK_IN_THREAD; - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock(&ipc->lock, key); ret = pipeline_trigger(pcm_dev->cd->pipeline, pcm_dev->cd, cmd); if (ret <= 0) { - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock(&ipc->lock); ipc->task_mask &= ~IPC_TASK_IN_THREAD; - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock(&ipc->lock, key); } } else { ret = pipeline_trigger_run(pcm_dev->cd->pipeline, pcm_dev->cd, cmd); diff --git a/src/ipc/ipc3/helper.c b/src/ipc/ipc3/helper.c index 51a2b449805b..4b2a6fde5b9a 100644 --- a/src/ipc/ipc3/helper.c +++ b/src/ipc/ipc3/helper.c @@ -73,6 +73,7 @@ static const struct comp_driver *get_drv(struct sof_ipc_comp *comp) const struct comp_driver *drv = NULL; struct comp_driver_info *info; struct sof_ipc_comp_ext *comp_ext; + k_spinlock_key_t key; /* do we have extended data ? */ if (!comp->ext_data_length) { @@ -114,7 +115,8 @@ static const struct comp_driver *get_drv(struct sof_ipc_comp *comp) } /* search driver list with UUID */ - spin_lock(&drivers->lock); + key = k_spin_lock(&drivers->lock); + list_for_item(clist, &drivers->list) { info = container_of(clist, struct comp_driver_info, list); @@ -133,13 +135,13 @@ static const struct comp_driver *get_drv(struct sof_ipc_comp *comp) *(uint32_t *)(&comp_ext->uuid[8]), *(uint32_t *)(&comp_ext->uuid[12])); + k_spin_unlock(&drivers->lock, key); + out: if (drv) tr_dbg(&comp_tr, "get_drv(), found driver type %d, uuid %pU", drv->type, drv->tctx->uuid_p); - spin_unlock(&drivers->lock); - return drv; } @@ -647,12 +649,12 @@ int ipc_comp_new(struct ipc *ipc, ipc_comp *_comp) void ipc_msg_reply(struct sof_ipc_reply *reply) { struct ipc *ipc = ipc_get(); - uint32_t flags; + k_spinlock_key_t key; mailbox_hostbox_write(0, reply, reply->hdr.size); - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock(&ipc->lock); ipc->task_mask &= ~IPC_TASK_IN_THREAD; ipc_complete_cmd(ipc); - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock(&ipc->lock, key); } diff --git a/src/lib/alloc.c b/src/lib/alloc.c index ccaff0a227a7..8d3aef5b75aa 100644 --- a/src/lib/alloc.c +++ b/src/lib/alloc.c @@ -751,14 +751,14 @@ static void *_malloc_unlocked(enum mem_zone zone, uint32_t flags, uint32_t caps, void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes) { struct mm *memmap = memmap_get(); - uint32_t lock_flags; + k_spinlock_key_t key; void *ptr = NULL; - spin_lock_irq(&memmap->lock, lock_flags); + key = k_spin_lock(&memmap->lock); ptr = _malloc_unlocked(zone, flags, caps, bytes); - spin_unlock_irq(&memmap->lock, lock_flags); + k_spin_unlock(&memmap->lock, key); DEBUG_TRACE_PTR(ptr, bytes, zone, caps, flags); return ptr; @@ -779,16 +779,16 @@ void *rzalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes) void *rzalloc_core_sys(int core, size_t bytes) { struct mm *memmap = memmap_get(); - uint32_t flags; + k_spinlock_key_t key; void *ptr = NULL; - spin_lock_irq(&memmap->lock, flags); + key = k_spin_lock(&memmap->lock); ptr = rmalloc_sys(memmap->system + core, 0, 0, bytes); if (ptr) bzero(ptr, bytes); - spin_unlock_irq(&memmap->lock, flags); + k_spin_unlock(&memmap->lock, key); return ptr; } @@ -944,13 +944,13 @@ void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes, { struct mm *memmap = memmap_get(); void *ptr = NULL; - uint32_t lock_flags; + k_spinlock_key_t key; - spin_lock_irq(&memmap->lock, lock_flags); + key = k_spin_lock(&memmap->lock); ptr = _balloc_unlocked(flags, caps, bytes, alignment); - spin_unlock_irq(&memmap->lock, lock_flags); + k_spin_unlock(&memmap->lock, key); DEBUG_TRACE_PTR(ptr, bytes, SOF_MEM_ZONE_BUFFER, caps, flags); return ptr; @@ -995,11 +995,11 @@ static void _rfree_unlocked(void *ptr) void rfree(void *ptr) { struct mm *memmap = memmap_get(); - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&memmap->lock, flags); + key = k_spin_lock(&memmap->lock); _rfree_unlocked(ptr); - spin_unlock_irq(&memmap->lock, flags); + k_spin_unlock(&memmap->lock, key); } void *rbrealloc_align(void *ptr, uint32_t flags, uint32_t caps, size_t bytes, @@ -1007,13 +1007,13 @@ void *rbrealloc_align(void *ptr, uint32_t flags, uint32_t caps, size_t bytes, { struct mm *memmap = memmap_get(); void *new_ptr = NULL; - uint32_t lock_flags; + k_spinlock_key_t key; size_t copy_bytes = MIN(bytes, old_bytes); if (!bytes) return new_ptr; - spin_lock_irq(&memmap->lock, lock_flags); + key = k_spin_lock(&memmap->lock); new_ptr = _balloc_unlocked(flags, caps, bytes, alignment); @@ -1023,7 +1023,7 @@ void *rbrealloc_align(void *ptr, uint32_t flags, uint32_t caps, size_t bytes, if (new_ptr) _rfree_unlocked(ptr); - spin_unlock_irq(&memmap->lock, lock_flags); + k_spin_unlock(&memmap->lock, key); DEBUG_TRACE_PTR(ptr, bytes, SOF_MEM_ZONE_BUFFER, caps, flags); return new_ptr; @@ -1085,8 +1085,7 @@ void init_heap(struct sof *sof) DEBUG_BLOCK_FREE_VALUE_8BIT); #endif - spinlock_init(&memmap->lock); - + k_spinlock_init(&memmap->lock); } #if CONFIG_DEBUG_MEMORY_USAGE_SCAN @@ -1094,6 +1093,7 @@ int heap_info(enum mem_zone zone, int index, struct mm_info *out) { struct mm *memmap = memmap_get(); struct mm_heap *heap; + k_spinlock_key_t key; if (!out) goto error; @@ -1135,9 +1135,9 @@ int heap_info(enum mem_zone zone, int index, struct mm_info *out) goto error; } - spin_lock(&memmap->lock); + key = k_spin_lock(&memmap->lock); *out = heap->info; - spin_unlock(&memmap->lock); + k_spin_unlock(&memmap->lock, key); return 0; error: tr_err(&mem_tr, "heap_info(): failed for zone 0x%x index %d out ptr 0x%x", zone, index, diff --git a/src/lib/clk.c b/src/lib/clk.c index e13e10e8541c..7eb9199c9016 100644 --- a/src/lib/clk.c +++ b/src/lib/clk.c @@ -53,7 +53,7 @@ void clock_set_freq(int clock, uint32_t hz) { struct clock_info *clk_info = clocks_get() + clock; uint32_t idx; - uint32_t flags; + k_spinlock_key_t key; clk_notify_data.old_freq = clk_info->freqs[clk_info->current_freq_idx].freq; @@ -61,7 +61,7 @@ void clock_set_freq(int clock, uint32_t hz) clk_info->freqs[clk_info->current_freq_idx].ticks_per_msec; /* atomic context for changing clocks */ - spin_lock_irq(&clk_info->lock, flags); + key = k_spin_lock(&clk_info->lock); /* get nearest frequency that is >= requested Hz */ idx = clock_get_nearest_freq_idx(clk_info->freqs, clk_info->freqs_num, @@ -88,7 +88,7 @@ void clock_set_freq(int clock, uint32_t hz) clk_info->notification_mask, &clk_notify_data, sizeof(clk_notify_data)); - spin_unlock_irq(&clk_info->lock, flags); + k_spin_unlock(&clk_info->lock, key); } void clock_low_power_mode(int clock, bool enable) diff --git a/src/lib/dai.c b/src/lib/dai.c index c98e3fd012f3..f64c60be21cd 100644 --- a/src/lib/dai.c +++ b/src/lib/dai.c @@ -139,7 +139,7 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) int ret = 0; const struct dai_type_info *dti; struct dai *d; - uint32_t flags_irq; + k_spinlock_key_t key; dti = dai_find_type(type); if (!dti) @@ -150,7 +150,7 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) continue; } /* device created? */ - spin_lock_irq(&d->lock, flags_irq); + key = k_spin_lock(&d->lock); if (d->sref == 0) { if (flags & DAI_CREAT) ret = dai_probe(d); @@ -163,7 +163,7 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) tr_info(&dai_tr, "dai_get type %d index %d new sref %d", type, index, d->sref); - spin_unlock_irq(&d->lock, flags_irq); + k_spin_unlock(&d->lock, key); return !ret ? d : NULL; } @@ -174,9 +174,9 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) void dai_put(struct dai *dai) { int ret; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&dai->lock, flags); + key = k_spin_lock(&dai->lock); if (--dai->sref == 0) { ret = dai_remove(dai); if (ret < 0) { @@ -186,5 +186,5 @@ void dai_put(struct dai *dai) } tr_info(&dai_tr, "dai_put type %d index %d new sref %d", dai->drv->type, dai->index, dai->sref); - spin_unlock_irq(&dai->lock, flags); + k_spin_unlock(&dai->lock, key); } diff --git a/src/lib/dma.c b/src/lib/dma.c index f2fd898fedb5..1d6ddd25ba2f 100644 --- a/src/lib/dma.c +++ b/src/lib/dma.c @@ -32,7 +32,7 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) int users, ret; int min_users = INT32_MAX; struct dma *d = NULL, *dmin = NULL; - unsigned int flags_irq; + k_spinlock_key_t key; if (!info->num_dmas) { tr_err(&dma_tr, "dma_get(): No DMACs installed"); @@ -103,7 +103,7 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) * may be requested many times, let the probe() * do on-first-use initialization. */ - spin_lock_irq(&dmin->lock, flags_irq); + key = k_spin_lock(&dmin->lock); ret = 0; if (!dmin->sref) { @@ -120,16 +120,16 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) dmin->plat_data.id, dmin->sref, atomic_read(&dmin->num_channels_busy)); - spin_unlock_irq(&dmin->lock, flags_irq); + k_spin_unlock(&dmin->lock, key); return !ret ? dmin : NULL; } void dma_put(struct dma *dma) { - unsigned int flags_irq; + k_spinlock_key_t key; int ret; - spin_lock_irq(&dma->lock, flags_irq); + key = k_spin_lock(&dma->lock); if (--dma->sref == 0) { ret = dma_remove(dma); if (ret < 0) { @@ -139,7 +139,7 @@ void dma_put(struct dma *dma) } tr_info(&dma_tr, "dma_put(), dma = %p, sref = %d", dma, dma->sref); - spin_unlock_irq(&dma->lock, flags_irq); + k_spin_unlock(&dma->lock, key); } int dma_sg_alloc(struct dma_sg_elem_array *elem_array, diff --git a/src/lib/notifier.c b/src/lib/notifier.c index e8182115aeef..cad4225c2847 100644 --- a/src/lib/notifier.c +++ b/src/lib/notifier.c @@ -41,11 +41,12 @@ int notifier_register(void *receiver, void *caller, enum notify_id type, { struct notify *notify = *arch_notify_get(); struct callback_handle *handle; + k_spinlock_key_t key; int ret = 0; assert(type >= NOTIFIER_ID_CPU_FREQ && type < NOTIFIER_ID_COUNT); - spin_lock(¬ify->lock); + key = k_spin_lock(¬ify->lock); /* Find already registered event of this type */ if (flags & NOTIFIER_FLAG_AGGREGATE && @@ -74,7 +75,7 @@ int notifier_register(void *receiver, void *caller, enum notify_id type, list_item_prepend(&handle->list, ¬ify->list[type]); out: - spin_unlock(¬ify->lock); + k_spin_unlock(¬ify->lock, key); return ret; } @@ -84,10 +85,11 @@ void notifier_unregister(void *receiver, void *caller, enum notify_id type) struct list_item *wlist; struct list_item *tlist; struct callback_handle *handle; + k_spinlock_key_t key; assert(type >= NOTIFIER_ID_CPU_FREQ && type < NOTIFIER_ID_COUNT); - spin_lock(¬ify->lock); + key = k_spin_lock(¬ify->lock); /* * Unregister all matching callbacks @@ -110,7 +112,7 @@ void notifier_unregister(void *receiver, void *caller, enum notify_id type) } } - spin_unlock(¬ify->lock); + k_spin_unlock(¬ify->lock, key); } void notifier_unregister_all(void *receiver, void *caller) @@ -192,7 +194,7 @@ void init_system_notify(struct sof *sof) *notify = rzalloc(SOF_MEM_ZONE_SYS, 0, SOF_MEM_CAPS_RAM, sizeof(**notify)); - spinlock_init(&(*notify)->lock); + k_spinlock_init(&(*notify)->lock); for (i = NOTIFIER_ID_CPU_FREQ; i < NOTIFIER_ID_COUNT; i++) list_init(&(*notify)->list[i]); diff --git a/src/lib/pm_runtime.c b/src/lib/pm_runtime.c index d5111b80c88d..0489bbdc44d6 100644 --- a/src/lib/pm_runtime.c +++ b/src/lib/pm_runtime.c @@ -29,10 +29,9 @@ DECLARE_TR_CTX(pm_tr, SOF_UUID(pm_runtime_uuid), LOG_LEVEL_INFO); void pm_runtime_init(struct sof *sof) { sof->prd = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*sof->prd)); - spinlock_init(&sof->prd->lock); + k_spinlock_init(&sof->prd->lock); platform_pm_runtime_init(sof->prd); - } /* Warning: the terms in this API (enable, active,... ) apply sometimes diff --git a/src/platform/amd/renoir/lib/clk.c b/src/platform/amd/renoir/lib/clk.c index 75e203084011..34ba91a97087 100644 --- a/src/platform/amd/renoir/lib/clk.c +++ b/src/platform/amd/renoir/lib/clk.c @@ -134,7 +134,7 @@ void platform_clock_init(struct sof *sof) .notification_mask = NOTIFIER_TARGET_CORE_MASK(i), .set_freq = NULL, }; - spinlock_init(&sof->clocks[i].lock); + k_spinlock_init(&sof->clocks[i].lock); } acp_change_clock_notify(600000000); } diff --git a/src/platform/amd/renoir/lib/dai.c b/src/platform/amd/renoir/lib/dai.c index 0966a648959d..b34edafa78c0 100644 --- a/src/platform/amd/renoir/lib/dai.c +++ b/src/platform/amd/renoir/lib/dai.c @@ -107,14 +107,14 @@ int dai_init(struct sof *sof) int i; /* initialize spin locks early to enable ref counting */ for (i = 0; i < ARRAY_SIZE(acp_dmic_dai); i++) - spinlock_init(&acp_dmic_dai[i].lock); + k_spinlock_init(&acp_dmic_dai[i].lock); /* initialize spin locks early to enable ref counting */ for (i = 0; i < ARRAY_SIZE(spdai); i++) - spinlock_init(&spdai[i].lock); + k_spinlock_init(&spdai[i].lock); #ifdef ACP_BT_ENABLE /* initialize spin locks early to enable ref counting */ for (i = 0; i < ARRAY_SIZE(btdai); i++) - spinlock_init(&btdai[i].lock); + k_spinlock_init(&btdai[i].lock); #endif sof->dai_info = &lib_dai; return 0; diff --git a/src/platform/amd/renoir/lib/dma.c b/src/platform/amd/renoir/lib/dma.c index 3352ee6b3aa9..b0affebf6e45 100644 --- a/src/platform/amd/renoir/lib/dma.c +++ b/src/platform/amd/renoir/lib/dma.c @@ -89,7 +89,7 @@ int acp_dma_init(struct sof *sof) /* early lock initialization for ref counting */ for (i = 0; i < ARRAY_SIZE(dma); i++) - spinlock_init(&dma[i].lock); + k_spinlock_init(&dma[i].lock); sof->dma_info = &lib_dma; return 0; } diff --git a/src/platform/baytrail/lib/clk.c b/src/platform/baytrail/lib/clk.c index d3a5e5e945c1..c2e3d267a2ab 100644 --- a/src/platform/baytrail/lib/clk.c +++ b/src/platform/baytrail/lib/clk.c @@ -118,6 +118,5 @@ void platform_clock_init(struct sof *sof) sof->clocks = platform_clocks_info; for (i = 0; i < NUM_CLOCKS; i++) - spinlock_init(&sof->clocks[i].lock); - + k_spinlock_init(&sof->clocks[i].lock); } diff --git a/src/platform/baytrail/lib/dai.c b/src/platform/baytrail/lib/dai.c index 5c177c1f7611..e89cad4b9455 100644 --- a/src/platform/baytrail/lib/dai.c +++ b/src/platform/baytrail/lib/dai.c @@ -135,8 +135,7 @@ int dai_init(struct sof *sof) /* initialize spin locks early to enable ref counting */ for (i = 0; i < ARRAY_SIZE(ssp); i++) - spinlock_init(&ssp[i].lock); - + k_spinlock_init(&ssp[i].lock); sof->dai_info = &lib_dai; diff --git a/src/platform/baytrail/lib/dma.c b/src/platform/baytrail/lib/dma.c index 24b950af9479..cca99fe54c38 100644 --- a/src/platform/baytrail/lib/dma.c +++ b/src/platform/baytrail/lib/dma.c @@ -200,7 +200,7 @@ int dmac_init(struct sof *sof) /* early lock initialization for ref counting */ for (i = 0; i < ARRAY_SIZE(dma); i++) - spinlock_init(&dma[i].lock); + k_spinlock_init(&dma[i].lock); sof->dma_info = &lib_dma; diff --git a/src/platform/haswell/lib/clk.c b/src/platform/haswell/lib/clk.c index 79be3843f256..9f811dd1b710 100644 --- a/src/platform/haswell/lib/clk.c +++ b/src/platform/haswell/lib/clk.c @@ -90,6 +90,5 @@ void platform_clock_init(struct sof *sof) sof->clocks = platform_clocks_info; for (i = 0; i < NUM_CLOCKS; i++) - spinlock_init(&sof->clocks[i].lock); - + k_spinlock_init(&sof->clocks[i].lock); } diff --git a/src/platform/haswell/lib/dai.c b/src/platform/haswell/lib/dai.c index af7529afc5b5..5ac16e69e302 100644 --- a/src/platform/haswell/lib/dai.c +++ b/src/platform/haswell/lib/dai.c @@ -68,7 +68,7 @@ int dai_init(struct sof *sof) /* initialize spin locks early to enable ref counting */ for (i = 0; i < ARRAY_SIZE(ssp); i++) - spinlock_init(&ssp[i].lock); + k_spinlock_init(&ssp[i].lock); sof->dai_info = &lib_dai; diff --git a/src/platform/haswell/lib/dma.c b/src/platform/haswell/lib/dma.c index 15364f6ac84f..9072b4593080 100644 --- a/src/platform/haswell/lib/dma.c +++ b/src/platform/haswell/lib/dma.c @@ -129,7 +129,7 @@ int dmac_init(struct sof *sof) /* early lock initialization for ref counting */ for (i = 0; i < ARRAY_SIZE(dma); i++) - spinlock_init(&dma[i].lock); + k_spinlock_init(&dma[i].lock); /* clear the masks for dsp of the dmacs */ io_reg_update_bits(SHIM_BASE + SHIM_IMRD, diff --git a/src/platform/imx8/lib/clk.c b/src/platform/imx8/lib/clk.c index 3414fd97c569..002c82b3b308 100644 --- a/src/platform/imx8/lib/clk.c +++ b/src/platform/imx8/lib/clk.c @@ -46,7 +46,6 @@ void platform_clock_init(struct sof *sof) .set_freq = NULL, }; - spinlock_init(&sof->clocks[i].lock); + k_spinlock_init(&sof->clocks[i].lock); } - } diff --git a/src/platform/imx8/lib/dai.c b/src/platform/imx8/lib/dai.c index 7ef400c3b14b..dae28a21ba54 100644 --- a/src/platform/imx8/lib/dai.c +++ b/src/platform/imx8/lib/dai.c @@ -94,10 +94,10 @@ int dai_init(struct sof *sof) /* initialize spin locks early to enable ref counting */ for (i = 0; i < ARRAY_SIZE(esai); i++) - spinlock_init(&esai[i].lock); + k_spinlock_init(&esai[i].lock); for (i = 0; i < ARRAY_SIZE(sai); i++) - spinlock_init(&sai[i].lock); + k_spinlock_init(&sai[i].lock); sof->dai_info = &lib_dai; diff --git a/src/platform/imx8/lib/dma.c b/src/platform/imx8/lib/dma.c index f999cf550660..83baf370cbba 100644 --- a/src/platform/imx8/lib/dma.c +++ b/src/platform/imx8/lib/dma.c @@ -57,7 +57,7 @@ int dmac_init(struct sof *sof) /* early lock initialization for ref counting */ for (i = 0; i < ARRAY_SIZE(dma); i++) - spinlock_init(&dma[i].lock); + k_spinlock_init(&dma[i].lock); sof->dma_info = &lib_dma; diff --git a/src/platform/imx8m/lib/clk.c b/src/platform/imx8m/lib/clk.c index 998aede61100..52d103218e73 100644 --- a/src/platform/imx8m/lib/clk.c +++ b/src/platform/imx8m/lib/clk.c @@ -41,7 +41,6 @@ void platform_clock_init(struct sof *sof) .set_freq = NULL, }; - spinlock_init(&sof->clocks[i].lock); + k_spinlock_init(&sof->clocks[i].lock); } - } diff --git a/src/platform/imx8m/lib/dai.c b/src/platform/imx8m/lib/dai.c index cd572ddbc767..b9c9ce4912b5 100644 --- a/src/platform/imx8m/lib/dai.c +++ b/src/platform/imx8m/lib/dai.c @@ -75,7 +75,7 @@ int dai_init(struct sof *sof) /* initialize spin locks early to enable ref counting */ for (i = 0; i < ARRAY_SIZE(sai); i++) - spinlock_init(&sai[i].lock); + k_spinlock_init(&sai[i].lock); sof->dai_info = &lib_dai; diff --git a/src/platform/imx8m/lib/dma.c b/src/platform/imx8m/lib/dma.c index 58e63548fb48..2731fa2b7271 100644 --- a/src/platform/imx8m/lib/dma.c +++ b/src/platform/imx8m/lib/dma.c @@ -52,7 +52,7 @@ int dmac_init(struct sof *sof) /* early lock initialization for ref counting */ for (i = 0; i < ARRAY_SIZE(dma); i++) - spinlock_init(&dma[i].lock); + k_spinlock_init(&dma[i].lock); sof->dma_info = &lib_dma; diff --git a/src/platform/imx8ulp/lib/clk.c b/src/platform/imx8ulp/lib/clk.c index 576fa577047c..6fb91aecb07c 100644 --- a/src/platform/imx8ulp/lib/clk.c +++ b/src/platform/imx8ulp/lib/clk.c @@ -38,7 +38,7 @@ void platform_clock_init(struct sof *sof) .set_freq = NULL, }; - spinlock_init(&sof->clocks[i].lock); + k_spinlock_init(&sof->clocks[i].lock); } platform_shared_commit(sof->clocks, sizeof(*sof->clocks) * NUM_CLOCKS); diff --git a/src/platform/imx8ulp/lib/dai.c b/src/platform/imx8ulp/lib/dai.c index 519940fdf043..892973942469 100644 --- a/src/platform/imx8ulp/lib/dai.c +++ b/src/platform/imx8ulp/lib/dai.c @@ -87,7 +87,7 @@ int dai_init(struct sof *sof) /* initialize spin locks early to enable ref counting */ for (i = 0; i < ARRAY_SIZE(sai); i++) - spinlock_init(&sai[i].lock); + k_spinlock_init(&sai[i].lock); platform_shared_commit(sai, sizeof(*sai)); diff --git a/src/platform/imx8ulp/lib/dma.c b/src/platform/imx8ulp/lib/dma.c index 6881f29287da..ddd1b0cd2f00 100644 --- a/src/platform/imx8ulp/lib/dma.c +++ b/src/platform/imx8ulp/lib/dma.c @@ -55,7 +55,7 @@ int dmac_init(struct sof *sof) /* early lock initialization for ref counting */ for (i = 0; i < ARRAY_SIZE(dma); i++) - spinlock_init(&dma[i].lock); + k_spinlock_init(&dma[i].lock); platform_shared_commit(dma, sizeof(*dma)); diff --git a/src/platform/intel/cavs/lib/clk.c b/src/platform/intel/cavs/lib/clk.c index 4a2b4888af1d..6b33db07ed93 100644 --- a/src/platform/intel/cavs/lib/clk.c +++ b/src/platform/intel/cavs/lib/clk.c @@ -79,12 +79,12 @@ static inline void select_cpu_clock_hw(int freq_idx, bool release_unused) static inline void select_cpu_clock(int freq_idx, bool release_unused) { struct clock_info *clk_info = clocks_get(); - int flags[CONFIG_CORE_COUNT]; + k_spinlock_key_t key[CONFIG_CORE_COUNT]; int i; /* lock clock for all cores */ for (i = 0; i < CONFIG_CORE_COUNT; i++) - spin_lock_irq(&clk_info[CLK_CPU(i)].lock, flags[i]); + key[i] = k_spin_lock(&clk_info[CLK_CPU(i)].lock); /* change clock */ select_cpu_clock_hw(freq_idx, release_unused); @@ -93,7 +93,7 @@ static inline void select_cpu_clock(int freq_idx, bool release_unused) /* unlock clock for all cores */ for (i = CONFIG_CORE_COUNT - 1; i >= 0; i--) - spin_unlock_irq(&clk_info[CLK_CPU(i)].lock, flags[i]); + k_spin_unlock(&clk_info[CLK_CPU(i)].lock, key[i]); } /* LPRO_ONLY mode */ @@ -178,13 +178,13 @@ static void platform_clock_low_power_mode(int clock, bool enable) void platform_clock_on_waiti(void) { struct pm_runtime_data *prd = pm_runtime_data_get(); - uint32_t flags; + k_spinlock_key_t key; int freq_idx; int lowest_freq_idx; bool pm_is_active; /* hold the prd->lock for possible active_freq_idx switching */ - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock(&prd->lock); freq_idx = *cache_to_uncache(&active_freq_idx); lowest_freq_idx = get_lowest_freq_idx(CLK_CPU(cpu_get_id())); @@ -200,7 +200,7 @@ void platform_clock_on_waiti(void) set_cpu_current_freq_idx(lowest_freq_idx, true); } - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock(&prd->lock, key); /* check if waiti HPRO->LPRO switching is needed */ pm_runtime_put(CORE_HP_CLK, cpu_get_id()); @@ -253,13 +253,13 @@ static void platform_clock_low_power_mode(int clock, bool enable) void platform_clock_on_waiti(void) { struct pm_runtime_data *prd = pm_runtime_data_get(); - uint32_t flags; + k_spinlock_key_t key; int freq_idx; int lowest_freq_idx; bool pm_is_active; /* hold the prd->lock for possible active_freq_idx switching */ - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock(&prd->lock); freq_idx = *cache_to_uncache(&active_freq_idx); lowest_freq_idx = get_lowest_freq_idx(CLK_CPU(cpu_get_id())); @@ -275,18 +275,18 @@ void platform_clock_on_waiti(void) set_cpu_current_freq_idx(lowest_freq_idx, true); } - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock(&prd->lock, key); } void platform_clock_on_wakeup(void) { struct pm_runtime_data *prd = pm_runtime_data_get(); - uint32_t flags; + k_spinlock_key_t key; int current_idx; int target_idx; /* hold the prd->lock for possible active_freq_idx switching */ - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock(&prd->lock); current_idx = get_current_freq_idx(CLK_CPU(cpu_get_id())); target_idx = *cache_to_uncache(&active_freq_idx); @@ -295,7 +295,7 @@ void platform_clock_on_wakeup(void) if (current_idx != target_idx) set_cpu_current_freq_idx(target_idx, true); - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock(&prd->lock, key); } #endif @@ -340,7 +340,7 @@ void platform_clock_init(struct sof *sof) .low_power_mode = platform_clock_low_power_mode, }; - spinlock_init(&sof->clocks[i].lock); + k_spinlock_init(&sof->clocks[i].lock); } sof->clocks[CLK_SSP] = (struct clock_info) { @@ -353,6 +353,5 @@ void platform_clock_init(struct sof *sof) .set_freq = NULL, }; - spinlock_init(&sof->clocks[CLK_SSP].lock); - + k_spinlock_init(&sof->clocks[CLK_SSP].lock); } diff --git a/src/platform/intel/cavs/lib/dai.c b/src/platform/intel/cavs/lib/dai.c index f4c56193bb5d..cf9fcc084224 100644 --- a/src/platform/intel/cavs/lib/dai.c +++ b/src/platform/intel/cavs/lib/dai.c @@ -147,7 +147,7 @@ int dai_init(struct sof *sof) dai[i].plat_data.fifo[SOF_IPC_STREAM_CAPTURE].handshake = DMA_HANDSHAKE_SSP0_RX + 2 * i; /* initialize spin locks early to enable ref counting */ - spinlock_init(&dai[i].lock); + k_spinlock_init(&dai[i].lock); } #endif @@ -162,7 +162,7 @@ int dai_init(struct sof *sof) for (i = 0; i < ARRAY_SIZE(hda); i++) { dai[i].index = i; dai[i].drv = &hda_driver; - spinlock_init(&dai[i].lock); + k_spinlock_init(&dai[i].lock); } #if (CONFIG_INTEL_DMIC) @@ -170,8 +170,7 @@ int dai_init(struct sof *sof) /* init dmic */ for (i = 0; i < ARRAY_SIZE(dmic); i++) - spinlock_init(&dai[i].lock); - + k_spinlock_init(&dai[i].lock); #endif #if CONFIG_INTEL_ALH @@ -189,7 +188,7 @@ int dai_init(struct sof *sof) ALH_GPDMA_BURST_LENGTH; dai[i].plat_data.fifo[SOF_IPC_STREAM_CAPTURE].depth = ALH_GPDMA_BURST_LENGTH; - spinlock_init(&dai[i].lock); + k_spinlock_init(&dai[i].lock); } #endif diff --git a/src/platform/intel/cavs/lib/dma.c b/src/platform/intel/cavs/lib/dma.c index 83678240cfda..0ec854083ad9 100644 --- a/src/platform/intel/cavs/lib/dma.c +++ b/src/platform/intel/cavs/lib/dma.c @@ -255,7 +255,7 @@ int dmac_init(struct sof *sof) /* early lock initialization for ref counting */ for (i = 0; i < sof->dma_info->num_dmas; i++) - spinlock_init(&sof->dma_info->dma_array[i].lock); + k_spinlock_init(&sof->dma_info->dma_array[i].lock); return 0; } diff --git a/src/platform/intel/cavs/lib/pm_runtime.c b/src/platform/intel/cavs/lib/pm_runtime.c index 69ec66c6977e..085e148fd6c7 100644 --- a/src/platform/intel/cavs/lib/pm_runtime.c +++ b/src/platform/intel/cavs/lib/pm_runtime.c @@ -61,13 +61,13 @@ static void cavs_pm_runtime_host_dma_l1_get(void) { struct pm_runtime_data *prd = pm_runtime_data_get(); struct cavs_pm_runtime_data *pprd = prd->platform_data; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock(&prd->lock); pprd->host_dma_l1_sref++; - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock(&prd->lock, key); } /** @@ -78,9 +78,9 @@ static inline void cavs_pm_runtime_host_dma_l1_put(void) { struct pm_runtime_data *prd = pm_runtime_data_get(); struct cavs_pm_runtime_data *pprd = prd->platform_data; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock(&prd->lock); if (!--pprd->host_dma_l1_sref) { shim_write(SHIM_SVCFG, @@ -92,7 +92,7 @@ static inline void cavs_pm_runtime_host_dma_l1_put(void) shim_read(SHIM_SVCFG) & ~(SHIM_SVCFG_FORCE_L1_EXIT)); } - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock(&prd->lock, key); } static inline void cavs_pm_runtime_enable_dsp(bool enable) @@ -376,9 +376,9 @@ static inline void cavs_pm_runtime_core_dis_hp_clk(uint32_t index) int enabled_cores = cpu_enabled_cores(); struct pm_runtime_data *prd = pm_runtime_data_get(); struct cavs_pm_runtime_data *pprd = prd->platform_data; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock(&prd->lock); pprd->sleep_core_mask |= BIT(index); @@ -388,21 +388,21 @@ static inline void cavs_pm_runtime_core_dis_hp_clk(uint32_t index) if (all_active_cores_sleep) clock_low_power_mode(CLK_CPU(index), true); - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock(&prd->lock, key); } static inline void cavs_pm_runtime_core_en_hp_clk(uint32_t index) { struct pm_runtime_data *prd = pm_runtime_data_get(); struct cavs_pm_runtime_data *pprd = prd->platform_data; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock(&prd->lock); pprd->sleep_core_mask &= ~BIT(index); clock_low_power_mode(CLK_CPU(index), false); - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock(&prd->lock, key); } static inline void cavs_pm_runtime_dis_dsp_pg(uint32_t index) @@ -589,26 +589,26 @@ void platform_pm_runtime_prepare_d0ix_en(uint32_t index) { struct pm_runtime_data *prd = pm_runtime_data_get(); struct cavs_pm_runtime_data *pprd = prd->platform_data; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock(&prd->lock); pprd->prepare_d0ix_core_mask |= BIT(index); - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock(&prd->lock, key); } void platform_pm_runtime_prepare_d0ix_dis(uint32_t index) { struct pm_runtime_data *prd = pm_runtime_data_get(); struct cavs_pm_runtime_data *pprd = prd->platform_data; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock(&prd->lock); pprd->prepare_d0ix_core_mask &= ~BIT(index); - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock(&prd->lock, key); } int platform_pm_runtime_prepare_d0ix_is_req(uint32_t index) diff --git a/src/platform/mt8186/lib/clk.c b/src/platform/mt8186/lib/clk.c index b3841abee501..f9169e6e7186 100644 --- a/src/platform/mt8186/lib/clk.c +++ b/src/platform/mt8186/lib/clk.c @@ -99,7 +99,7 @@ void platform_clock_init(struct sof *sof) .set_freq = clock_platform_set_dsp_freq, }; - spinlock_init(&sof->clocks[i].lock); + k_spinlock_init(&sof->clocks[i].lock); } /* DSP bus clock */ diff --git a/src/platform/mt8186/lib/dma.c b/src/platform/mt8186/lib/dma.c index 970d788906bd..e8568f88724d 100644 --- a/src/platform/mt8186/lib/dma.c +++ b/src/platform/mt8186/lib/dma.c @@ -34,7 +34,7 @@ int dmac_init(struct sof *sof) /* early lock initialization for ref counting */ for (i = 0; i < ARRAY_SIZE(dma); i++) - spinlock_init(&dma[i].lock); + k_spinlock_init(&dma[i].lock); sof->dma_info = &lib_dma; diff --git a/src/platform/mt8195/lib/clk.c b/src/platform/mt8195/lib/clk.c index 27d2377c74ae..3a548417491e 100644 --- a/src/platform/mt8195/lib/clk.c +++ b/src/platform/mt8195/lib/clk.c @@ -191,7 +191,7 @@ void platform_clock_init(struct sof *sof) .set_freq = clock_platform_set_cpu_freq, }; - spinlock_init(&sof->clocks[i].lock); + k_spinlock_init(&sof->clocks[i].lock); } adsp_clock = 0; diff --git a/src/platform/mt8195/lib/dai.c b/src/platform/mt8195/lib/dai.c index 978146d2d83e..2a0472704469 100644 --- a/src/platform/mt8195/lib/dai.c +++ b/src/platform/mt8195/lib/dai.c @@ -43,7 +43,7 @@ int dai_init(struct sof *sof) /* initialize spin locks early to enable ref counting */ for (i = 0; i < ARRAY_SIZE(afe_dai); i++) { - spinlock_init(&afe_dai[i].lock); + k_spinlock_init(&afe_dai[i].lock); afe_dai[i].index = AFE_HS_GET_DAI(afe_dai_handshake[i]); afe_dai[i].drv = &afe_dai_driver; /* TODO, fifo[0] change to target playback or capture */ diff --git a/src/platform/mt8195/lib/dma.c b/src/platform/mt8195/lib/dma.c index 3841a3badbbc..115fe2d891f3 100644 --- a/src/platform/mt8195/lib/dma.c +++ b/src/platform/mt8195/lib/dma.c @@ -47,7 +47,7 @@ int dmac_init(struct sof *sof) /* early lock initialization for ref counting */ for (i = 0; i < ARRAY_SIZE(dma); i++) - spinlock_init(&dma[i].lock); + k_spinlock_init(&dma[i].lock); sof->dma_info = &lib_dma; diff --git a/src/schedule/ll_schedule.c b/src/schedule/ll_schedule.c index 61b8077a3cbe..c6cf0ccfcd0a 100644 --- a/src/schedule/ll_schedule.c +++ b/src/schedule/ll_schedule.c @@ -92,8 +92,9 @@ static bool schedule_ll_is_pending(struct ll_schedule_data *sch) struct task *task; uint32_t pending_count = 0; struct comp_dev *sched_comp; + k_spinlock_key_t key; - spin_lock(&domain->lock); + key = k_spin_lock(&domain->lock); do { sched_comp = NULL; @@ -117,7 +118,7 @@ static bool schedule_ll_is_pending(struct ll_schedule_data *sch) } } while (sched_comp); - spin_unlock(&domain->lock); + k_spin_unlock(&domain->lock, key); return pending_count > 0; } @@ -154,6 +155,7 @@ static void schedule_ll_tasks_execute(struct ll_schedule_data *sch) struct ll_schedule_domain *domain = sch->domain; struct list_item *wlist; struct task *task; + k_spinlock_key_t key; /* check each task in the list for pending */ wlist = sch->tasks.next; @@ -183,7 +185,7 @@ static void schedule_ll_tasks_execute(struct ll_schedule_data *sch) wlist = task->list.next; - spin_lock(&domain->lock); + key = k_spin_lock(&domain->lock); /* do we need to reschedule this task */ if (task->state == SOF_TASK_STATE_COMPLETED) { @@ -196,7 +198,7 @@ static void schedule_ll_tasks_execute(struct ll_schedule_data *sch) (uint32_t)domain->next_tick); } - spin_unlock(&domain->lock); + k_spin_unlock(&domain->lock, key); } } @@ -239,6 +241,7 @@ static void schedule_ll_tasks_run(void *data) { struct ll_schedule_data *sch = data; struct ll_schedule_domain *domain = sch->domain; + k_spinlock_key_t key; uint32_t flags; uint32_t core = cpu_get_id(); @@ -248,7 +251,7 @@ static void schedule_ll_tasks_run(void *data) (unsigned int)domain->next_tick); irq_local_disable(flags); - spin_lock(&domain->lock); + key = k_spin_lock(&domain->lock); /* disable domain on current core until tasks are finished */ domain_disable(domain, core); @@ -258,7 +261,7 @@ static void schedule_ll_tasks_run(void *data) domain_clear(domain); } - spin_unlock(&domain->lock); + k_spin_unlock(&domain->lock, key); perf_cnt_init(&sch->pcd); @@ -271,7 +274,7 @@ static void schedule_ll_tasks_run(void *data) perf_cnt_stamp(&sch->pcd, perf_ll_sched_trace, 0 /* ignored */); - spin_lock(&domain->lock); + key = k_spin_lock(&domain->lock); /* reset the new_target_tick for the first core */ if (domain->new_target_tick < platform_timer_get_atomic(timer_get())) @@ -291,7 +294,7 @@ static void schedule_ll_tasks_run(void *data) if (atomic_read(&sch->num_tasks)) domain_enable(domain, core); - spin_unlock(&domain->lock); + k_spin_unlock(&domain->lock, key); irq_local_enable(flags); } @@ -306,9 +309,10 @@ static int schedule_ll_domain_set(struct ll_schedule_data *sch, uint64_t task_start_ticks; uint64_t task_start; uint64_t offset; + k_spinlock_key_t key; int ret; - spin_lock(&domain->lock); + key = k_spin_lock(&domain->lock); ret = domain_register(domain, task, &schedule_ll_tasks_run, sch); if (ret < 0) { @@ -364,7 +368,7 @@ static int schedule_ll_domain_set(struct ll_schedule_data *sch, atomic_read(&domain->total_num_tasks)); done: - spin_unlock(&domain->lock); + k_spin_unlock(&domain->lock, key); return ret; } @@ -373,8 +377,9 @@ static void schedule_ll_domain_clear(struct ll_schedule_data *sch, struct task *task) { struct ll_schedule_domain *domain = sch->domain; + k_spinlock_key_t key; - spin_lock(&domain->lock); + key = k_spin_lock(&domain->lock); /* * Decrement the number of tasks on the core. @@ -390,7 +395,7 @@ static void schedule_ll_domain_clear(struct ll_schedule_data *sch, atomic_read(&sch->num_tasks), atomic_read(&domain->total_num_tasks)); - spin_unlock(&domain->lock); + k_spin_unlock(&domain->lock, key); } static void schedule_ll_task_insert(struct task *task, struct list_item *tasks) diff --git a/src/schedule/zephyr.c b/src/schedule/zephyr.c index 6d1530fecf1b..5db1a660291d 100644 --- a/src/schedule/zephyr.c +++ b/src/schedule/zephyr.c @@ -59,7 +59,7 @@ static void idc_handler(struct k_p4wq_work *work) struct ipc *ipc = ipc_get(); struct idc_msg *msg = &zmsg->msg; int payload = -1; - uint32_t flags; + k_spinlock_key_t key; SOC_DCACHE_INVALIDATE(msg, sizeof(*msg)); @@ -81,10 +81,10 @@ static void idc_handler(struct k_p4wq_work *work) case IDC_MSG_IPC: idc_cmd(&idc->received_msg); /* Signal the host */ - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock(&ipc->lock); ipc->task_mask &= ~IPC_TASK_SECONDARY_CORE; ipc_complete_cmd(ipc); - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock(&ipc->lock, key); } } diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index f085ac5f0a4b..6b9960268f3e 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -132,7 +132,7 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; char thread_name[] = "ll_thread0"; k_tid_t thread; - uint32_t flags; + k_spinlock_key_t key; tr_dbg(&ll_tr, "zephyr_domain_register()"); @@ -160,7 +160,7 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, k_thread_start(thread); - spin_lock_irq(&domain->lock, flags); + key = k_spin_lock(&domain->lock); if (!k_timer_user_data_get(&zephyr_domain->timer)) { k_timeout_t start = {0}; @@ -173,7 +173,7 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, k_timer_remaining_ticks(&zephyr_domain->timer); } - spin_unlock_irq(&domain->lock, flags); + k_spin_unlock(&domain->lock, key); tr_info(&ll_tr, "zephyr_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d period %d", domain->type, domain->clk, domain->ticks_per_ms, (uint32_t)LL_TIMER_PERIOD_US); @@ -186,7 +186,7 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain, { struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); int core = cpu_get_id(); - uint32_t flags; + k_spinlock_key_t key; tr_dbg(&ll_tr, "zephyr_domain_unregister()"); @@ -194,7 +194,7 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain, if (num_tasks) return 0; - spin_lock_irq(&domain->lock, flags); + key = k_spin_lock(&domain->lock); if (!atomic_read(&domain->total_num_tasks)) { k_timer_stop(&zephyr_domain->timer); @@ -203,7 +203,7 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain, zephyr_domain->domain_thread[core].handler = NULL; - spin_unlock_irq(&domain->lock, flags); + k_spin_unlock(&domain->lock, key); tr_info(&ll_tr, "zephyr_domain_unregister domain->type %d domain->clk %d", domain->type, domain->clk); diff --git a/src/spinlock.c b/src/spinlock.c index 2895f42103b5..34cc5e533968 100644 --- a/src/spinlock.c +++ b/src/spinlock.c @@ -22,27 +22,27 @@ DECLARE_TR_CTX(sl_tr, SOF_UUID(spinlock_uuid), LOG_LEVEL_INFO); #endif -uint32_t _spin_lock_irq(spinlock_t *lock) +#ifndef __ZEPHYR__ +k_spinlock_key_t _k_spin_lock_irq(struct k_spinlock *lock) { - uint32_t flags; - - flags = interrupt_global_disable(); + k_spinlock_key_t key = interrupt_global_disable(); #if CONFIG_DEBUG_LOCKS lock_dbg_atomic++; #endif - spin_lock(lock); + arch_spin_lock(lock); #if CONFIG_DEBUG_LOCKS if (lock_dbg_atomic < DBG_LOCK_USERS) lock_dbg_user[lock_dbg_atomic - 1] = (lock)->user; #endif - return flags; + return key; } -void _spin_unlock_irq(spinlock_t *lock, uint32_t flags, int line) +void _k_spin_unlock_irq(struct k_spinlock *lock, k_spinlock_key_t key, int line) { - _spin_unlock(lock, line); + arch_spin_unlock(lock); #if CONFIG_DEBUG_LOCKS lock_dbg_atomic--; #endif - interrupt_global_enable(flags); + interrupt_global_enable(key); } +#endif diff --git a/src/trace/dma-trace.c b/src/trace/dma-trace.c index 35d5f338277b..e3bd79207f5c 100644 --- a/src/trace/dma-trace.c +++ b/src/trace/dma-trace.c @@ -54,7 +54,7 @@ static enum task_state trace_work(void *data) struct dma_trace_data *d = data; struct dma_trace_buf *buffer = &d->dmatb; struct dma_sg_config *config = &d->config; - unsigned long flags; + k_spinlock_key_t key; uint32_t avail = buffer->avail; int32_t size; uint32_t overflow; @@ -103,7 +103,7 @@ static enum task_state trace_work(void *data) ipc_msg_send(d->msg, &d->posn, false); out: - spin_lock_irq(&d->lock, flags); + key = k_spin_lock(&d->lock); /* disregard any old messages and don't resend them if we overflow */ if (size > 0) { @@ -116,7 +116,7 @@ static enum task_state trace_work(void *data) /* DMA trace copying is done, allow reschedule */ d->copy_in_progress = 0; - spin_unlock_irq(&d->lock, flags); + k_spin_unlock(&d->lock, key); /* reschedule the trace copying work */ return SOF_TASK_STATE_RESCHEDULE; @@ -142,7 +142,7 @@ int dma_trace_init_early(struct sof *sof) sof->dmat = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*sof->dmat)); dma_sg_init(&sof->dmat->config.elem_array); - spinlock_init(&sof->dmat->lock); + k_spinlock_init(&sof->dmat->lock); ipc_build_trace_posn(&sof->dmat->posn); sof->dmat->msg = ipc_msg_init(sof->dmat->posn.rhdr.hdr.cmd, @@ -222,7 +222,7 @@ static int dma_trace_buffer_init(struct dma_trace_data *d) { struct dma_trace_buf *buffer = &d->dmatb; void *buf; - unsigned int flags; + k_spinlock_key_t key; /* allocate new buffer */ buf = rballoc(0, SOF_MEM_CAPS_RAM | SOF_MEM_CAPS_DMA, @@ -236,7 +236,7 @@ static int dma_trace_buffer_init(struct dma_trace_data *d) dcache_writeback_region(buf, DMA_TRACE_LOCAL_SIZE); /* initialise the DMA buffer, whole sequence in section */ - spin_lock_irq(&d->lock, flags); + key = k_spin_lock(&d->lock); buffer->addr = buf; buffer->size = DMA_TRACE_LOCAL_SIZE; @@ -245,7 +245,7 @@ static int dma_trace_buffer_init(struct dma_trace_data *d) buffer->end_addr = (char *)buffer->addr + buffer->size; buffer->avail = 0; - spin_unlock_irq(&d->lock, flags); + k_spin_unlock(&d->lock, key); return 0; } @@ -253,14 +253,14 @@ static int dma_trace_buffer_init(struct dma_trace_data *d) static void dma_trace_buffer_free(struct dma_trace_data *d) { struct dma_trace_buf *buffer = &d->dmatb; - unsigned int flags; + k_spinlock_key_t key; - spin_lock_irq(&d->lock, flags); + key = k_spin_lock(&d->lock); rfree(buffer->addr); memset(buffer, 0, sizeof(*buffer)); - spin_unlock_irq(&d->lock, flags); + k_spin_unlock(&d->lock, key); } #if CONFIG_DMA_GW @@ -663,7 +663,7 @@ void dtrace_event(const char *e, uint32_t length) { struct dma_trace_data *trace_data = dma_trace_data_get(); struct dma_trace_buf *buffer = NULL; - unsigned long flags; + k_spinlock_key_t key; if (!dma_trace_initialized(trace_data) || length > DMA_TRACE_LOCAL_SIZE / 8 || length == 0) { @@ -672,7 +672,7 @@ void dtrace_event(const char *e, uint32_t length) buffer = &trace_data->dmatb; - spin_lock_irq(&trace_data->lock, flags); + key = k_spin_lock(&trace_data->lock); dtrace_add_event(e, length); /* if DMA trace copying is working or secondary core @@ -680,11 +680,11 @@ void dtrace_event(const char *e, uint32_t length) */ if (trace_data->copy_in_progress || cpu_get_id() != PLATFORM_PRIMARY_CORE_ID) { - spin_unlock_irq(&trace_data->lock, flags); + k_spin_unlock(&trace_data->lock, key); return; } - spin_unlock_irq(&trace_data->lock, flags); + k_spin_unlock(&trace_data->lock, key); /* schedule copy now if buffer > 50% full */ if (trace_data->enabled && diff --git a/src/trace/trace.c b/src/trace/trace.c index 0e0eaadd89d9..bd948d30e734 100644 --- a/src/trace/trace.c +++ b/src/trace/trace.c @@ -49,7 +49,7 @@ struct trace { #if CONFIG_TRACE_FILTERING_ADAPTIVE bool user_filter_override; /* whether filtering was overridden by user or not */ #endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */ - spinlock_t lock; /* locking mechanism */ + struct k_spinlock lock; /* locking mechanism */ #if CONFIG_TRACE_FILTERING_ADAPTIVE struct recent_trace_context trace_core_context[CONFIG_CORE_COUNT]; @@ -456,9 +456,9 @@ void trace_flush_dma_to_mbox(void) { struct trace *trace = trace_get(); volatile uint64_t *t; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&trace->lock, flags); + key = k_spin_lock(&trace->lock); /* get mailbox position */ t = (volatile uint64_t *)(MAILBOX_TRACE_BASE + trace->pos); @@ -466,33 +466,33 @@ void trace_flush_dma_to_mbox(void) /* flush dma trace messages */ dma_trace_flush((void *)t); - spin_unlock_irq(&trace->lock, flags); + k_spin_unlock(&trace->lock, key); } void trace_on(void) { struct trace *trace = trace_get(); - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&trace->lock, flags); + key = k_spin_lock(&trace->lock); trace->enable = 1; dma_trace_on(); - spin_unlock_irq(&trace->lock, flags); + k_spin_unlock(&trace->lock, key); } void trace_off(void) { struct trace *trace = trace_get(); - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&trace->lock, flags); + key = k_spin_lock(&trace->lock); trace->enable = 0; dma_trace_off(); - spin_unlock_irq(&trace->lock, flags); + k_spin_unlock(&trace->lock, key); } void trace_init(struct sof *sof) @@ -503,7 +503,7 @@ void trace_init(struct sof *sof) #if CONFIG_TRACE_FILTERING_ADAPTIVE sof->trace->user_filter_override = false; #endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */ - spinlock_init(&sof->trace->lock); + k_spinlock_init(&sof->trace->lock); #ifndef __ZEPHYR__ /* Zephyr owns and has already initialized this buffer (and @@ -536,11 +536,11 @@ static void mtrace_dict_entry_vl(bool atomic_context, uint32_t dict_entry_addres mtrace_event(packet, MESSAGE_SIZE(n_args)); } else { struct trace * const trace = trace_get(); - uint32_t saved_flags; + k_spinlock_key_t key; - spin_lock_irq(&trace->lock, saved_flags); + key = k_spin_lock(&trace->lock); mtrace_event(packet, MESSAGE_SIZE(n_args)); - spin_unlock_irq(&trace->lock, saved_flags); + k_spin_unlock(&trace->lock, key); } } diff --git a/test/cmocka/src/common_mocks.c b/test/cmocka/src/common_mocks.c index 62bbb4396059..b69af01804cf 100644 --- a/test/cmocka/src/common_mocks.c +++ b/test/cmocka/src/common_mocks.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -146,19 +147,21 @@ volatile void * WEAK task_context_get(void) return NULL; } -uint32_t WEAK _spin_lock_irq(spinlock_t *lock) +#ifndef __ZEPHYR__ +uint32_t WEAK _k_spin_lock_irq(struct k_spinlock *lock) { (void)lock; return 0; } -void WEAK _spin_unlock_irq(spinlock_t *lock, uint32_t flags, int line) +void WEAK _k_spin_unlock_irq(struct k_spinlock *lock, uint32_t flags, int line) { (void)lock; (void)flags; (void)line; } +#endif uint64_t WEAK platform_timer_get(struct timer *timer) { From 70ceabca9ed01bd2c4065468b2cc28e13e575c1c Mon Sep 17 00:00:00 2001 From: Guennadi Liakhovetski Date: Thu, 3 Feb 2022 09:10:41 +0100 Subject: [PATCH 2/2] coherent: remove now redundant _irq version of functions Now coherent_acquire_irq() has become equivalent to coherent_acquire(), similar for coherent_release() and coherent_shared(). Remove the _irq version and its only user - the buffer locking API. Signed-off-by: Guennadi Liakhovetski --- src/audio/asrc/asrc.c | 8 +- src/audio/buffer.c | 8 +- src/audio/crossover/crossover.c | 10 +-- src/audio/dai.c | 4 +- src/audio/google_hotword_detect.c | 4 +- src/audio/google_rtc_audio_processing.c | 8 +- src/audio/host.c | 12 +-- src/audio/kpb.c | 18 ++-- src/audio/mixer.c | 12 +-- src/audio/mux/mux.c | 28 +++--- src/audio/pipeline/pipeline-graph.c | 8 +- src/audio/pipeline/pipeline-params.c | 8 +- src/audio/selector/selector.c | 14 +-- src/audio/smart_amp/smart_amp.c | 30 +++---- src/audio/src/src.c | 8 +- src/audio/tone.c | 12 +-- src/include/sof/audio/buffer.h | 22 +---- src/include/sof/audio/component.h | 8 +- src/include/sof/coherent.h | 108 ++---------------------- src/ipc/ipc-helper.c | 12 +-- src/samples/audio/detect_test.c | 4 +- src/samples/audio/smart_amp_test.c | 30 +++---- 22 files changed, 136 insertions(+), 240 deletions(-) diff --git a/src/audio/asrc/asrc.c b/src/audio/asrc/asrc.c index e5304254f568..2758f7580bd9 100644 --- a/src/audio/asrc/asrc.c +++ b/src/audio/asrc/asrc.c @@ -987,14 +987,14 @@ static int asrc_copy(struct comp_dev *dev) sink = list_first_item(&dev->bsink_list, struct comp_buffer, source_list); - source = buffer_acquire_irq(source); - sink = buffer_acquire_irq(sink); + source = buffer_acquire(source); + sink = buffer_acquire(sink); frames_src = audio_stream_get_avail_frames(&source->stream); frames_snk = audio_stream_get_free_frames(&sink->stream); - buffer_release_irq(sink); - buffer_release_irq(source); + buffer_release(sink); + buffer_release(source); if (cd->mode == ASRC_OM_PULL) { /* Let ASRC access max number of source frames in pull mode. diff --git a/src/audio/buffer.c b/src/audio/buffer.c index 02e7df1cc3d4..59dce65905b6 100644 --- a/src/audio/buffer.c +++ b/src/audio/buffer.c @@ -190,7 +190,7 @@ void comp_update_buffer_produce(struct comp_buffer *buffer, uint32_t bytes) return; } - buffer = buffer_acquire_irq(buffer); + buffer = buffer_acquire(buffer); audio_stream_produce(&buffer->stream, bytes); @@ -205,7 +205,7 @@ void comp_update_buffer_produce(struct comp_buffer *buffer, uint32_t bytes) ((char *)buffer->stream.r_ptr - (char *)buffer->stream.addr) << 16 | ((char *)buffer->stream.w_ptr - (char *)buffer->stream.addr)); - buffer = buffer_release_irq(buffer); + buffer = buffer_release(buffer); } void comp_update_buffer_consume(struct comp_buffer *buffer, uint32_t bytes) @@ -226,7 +226,7 @@ void comp_update_buffer_consume(struct comp_buffer *buffer, uint32_t bytes) return; } - buffer = buffer_acquire_irq(buffer); + buffer = buffer_acquire(buffer); audio_stream_consume(&buffer->stream, bytes); @@ -240,5 +240,5 @@ void comp_update_buffer_consume(struct comp_buffer *buffer, uint32_t bytes) ((char *)buffer->stream.r_ptr - (char *)buffer->stream.addr) << 16 | ((char *)buffer->stream.w_ptr - (char *)buffer->stream.addr)); - buffer = buffer_release_irq(buffer); + buffer = buffer_release(buffer); } diff --git a/src/audio/crossover/crossover.c b/src/audio/crossover/crossover.c index be8129256b0f..bdd5b90057eb 100644 --- a/src/audio/crossover/crossover.c +++ b/src/audio/crossover/crossover.c @@ -671,11 +671,11 @@ static int crossover_copy(struct comp_dev *dev) else num_sinks = num_assigned_sinks; - source = buffer_acquire_irq(source); + source = buffer_acquire(source); /* Check if source is active */ if (source->source->state != dev->state) { - source = buffer_release_irq(source); + source = buffer_release(source); return -EINVAL; } @@ -683,14 +683,14 @@ static int crossover_copy(struct comp_dev *dev) for (i = 0; i < num_sinks; i++) { if (!sinks[i]) continue; - sinks[i] = buffer_acquire_irq(sinks[i]); + sinks[i] = buffer_acquire(sinks[i]); avail = audio_stream_avail_frames(&source->stream, &sinks[i]->stream); frames = MIN(frames, avail); - buffer_release_irq(sinks[i]); + buffer_release(sinks[i]); } - source = buffer_release_irq(source); + source = buffer_release(source); source_bytes = frames * audio_stream_frame_bytes(&source->stream); diff --git a/src/audio/dai.c b/src/audio/dai.c index 167606d1aa52..2b77e7372779 100644 --- a/src/audio/dai.c +++ b/src/audio/dai.c @@ -910,7 +910,7 @@ static int dai_copy(struct comp_dev *dev) return ret; } - buf = buffer_acquire_irq(buf); + buf = buffer_acquire(buf); /* calculate minimum size to copy */ if (dev->direction == SOF_IPC_STREAM_PLAYBACK) { @@ -930,7 +930,7 @@ static int dai_copy(struct comp_dev *dev) copy_bytes = samples * sampling; - buffer_release_irq(buf); + buffer_release(buf); comp_dbg(dev, "dai_copy(), dir: %d copy_bytes= 0x%x, frames= %d", dev->direction, copy_bytes, diff --git a/src/audio/google_hotword_detect.c b/src/audio/google_hotword_detect.c index f96903f40129..916509d3842c 100644 --- a/src/audio/google_hotword_detect.c +++ b/src/audio/google_hotword_detect.c @@ -396,9 +396,9 @@ static int ghd_copy(struct comp_dev *dev) struct comp_buffer, sink_list); stream = &source->stream; - source = buffer_acquire_irq(source); + source = buffer_acquire(source); bytes = audio_stream_get_avail_bytes(&source->stream); - source = buffer_release_irq(source); + source = buffer_release(source); comp_dbg(dev, "ghd_copy() avail_bytes %u", bytes); comp_dbg(dev, "buffer begin/r_ptr/end [0x%x 0x%x 0x%x]", diff --git a/src/audio/google_rtc_audio_processing.c b/src/audio/google_rtc_audio_processing.c index 4f5ca6901354..c1d4149613d4 100644 --- a/src/audio/google_rtc_audio_processing.c +++ b/src/audio/google_rtc_audio_processing.c @@ -248,12 +248,12 @@ static int google_rtc_audio_processing_prepare(struct comp_dev *dev) list_for_item(source_buffer_list_item, &dev->bsource_list) { source_buffer = container_of(source_buffer_list_item, struct comp_buffer, sink_list); - source_buffer = buffer_acquire_irq(source_buffer); + source_buffer = buffer_acquire(source_buffer); if (source_buffer->source->ipc_config.type == SOF_COMP_DEMUX) cd->aec_reference = source_buffer; else if (source_buffer->source->ipc_config.type == SOF_COMP_DAI) cd->raw_microphone = source_buffer; - source_buffer = buffer_release_irq(source_buffer); + source_buffer = buffer_release(source_buffer); } cd->output = list_first_item(&dev->bsink_list, struct comp_buffer, source_list); @@ -297,10 +297,10 @@ static int google_rtc_audio_processing_copy(struct comp_dev *dev) uint32_t num_aec_reference_frames; uint32_t num_aec_reference_bytes; - cd->aec_reference = buffer_acquire_irq(cd->aec_reference); + cd->aec_reference = buffer_acquire(cd->aec_reference); num_aec_reference_frames = audio_stream_get_avail_frames(&cd->aec_reference->stream); num_aec_reference_bytes = audio_stream_get_avail_bytes(&cd->aec_reference->stream); - cd->aec_reference = buffer_release_irq(cd->aec_reference); + cd->aec_reference = buffer_release(cd->aec_reference); buffer_stream_invalidate(cd->aec_reference, num_aec_reference_bytes); diff --git a/src/audio/host.c b/src/audio/host.c index 8f0495723569..5e48665d5928 100644 --- a/src/audio/host.c +++ b/src/audio/host.c @@ -170,7 +170,7 @@ static uint32_t host_get_copy_bytes_one_shot(struct comp_dev *dev) struct comp_buffer *buffer = hd->local_buffer; uint32_t copy_bytes = 0; - buffer = buffer_acquire_irq(buffer); + buffer = buffer_acquire(buffer); /* calculate minimum size to copy */ if (dev->direction == SOF_IPC_STREAM_PLAYBACK) @@ -178,7 +178,7 @@ static uint32_t host_get_copy_bytes_one_shot(struct comp_dev *dev) else copy_bytes = audio_stream_get_avail_bytes(&buffer->stream); - buffer_release_irq(buffer); + buffer_release(buffer); /* copy_bytes should be aligned to minimum possible chunk of * data to be copied by dma. @@ -240,7 +240,7 @@ static uint32_t host_get_copy_bytes_one_shot(struct comp_dev *dev) uint32_t copy_bytes = 0; uint32_t split_value; - buffer = buffer_acquire_irq(buffer); + buffer = buffer_acquire(buffer); /* calculate minimum size to copy */ if (dev->direction == SOF_IPC_STREAM_PLAYBACK) @@ -248,7 +248,7 @@ static uint32_t host_get_copy_bytes_one_shot(struct comp_dev *dev) else copy_bytes = audio_stream_get_avail_bytes(&buffer->stream); - buffer_release_irq(buffer); + buffer_release(buffer); /* copy_bytes should be aligned to minimum possible chunk of * data to be copied by dma. @@ -450,7 +450,7 @@ static uint32_t host_get_copy_bytes_normal(struct comp_dev *dev) return 0; } - buffer = buffer_acquire_irq(buffer); + buffer = buffer_acquire(buffer); /* calculate minimum size to copy */ if (dev->direction == SOF_IPC_STREAM_PLAYBACK) { @@ -470,7 +470,7 @@ static uint32_t host_get_copy_bytes_normal(struct comp_dev *dev) avail_bytes, free_bytes); } - buffer = buffer_release_irq(buffer); + buffer = buffer_release(buffer); /* copy_bytes should be aligned to minimum possible chunk of * data to be copied by dma. diff --git a/src/audio/kpb.c b/src/audio/kpb.c index de5dae05624f..4d3b504ec732 100644 --- a/src/audio/kpb.c +++ b/src/audio/kpb.c @@ -640,17 +640,17 @@ static int kpb_copy(struct comp_dev *dev) goto out; } - source = buffer_acquire_irq(source); + source = buffer_acquire(source); /* Validate source */ if (!source->stream.r_ptr) { comp_err(dev, "kpb_copy(): invalid source pointers."); ret = -EINVAL; - source = buffer_release_irq(source); + source = buffer_release(source); goto out; } - source = buffer_release_irq(source); + source = buffer_release(source); switch (kpb->state) { case KPB_STATE_RUN: @@ -664,17 +664,17 @@ static int kpb_copy(struct comp_dev *dev) goto out; } - sink = buffer_acquire_irq(sink); + sink = buffer_acquire(sink); /* Validate sink */ if (!sink->stream.w_ptr) { comp_err(dev, "kpb_copy(): invalid selector sink pointers."); ret = -EINVAL; - sink = buffer_release_irq(sink); + sink = buffer_release(sink); goto out; } - sink = buffer_release_irq(sink); + sink = buffer_release(sink); copy_bytes = audio_stream_get_copy_bytes(&source->stream, &sink->stream); if (!copy_bytes) { @@ -723,17 +723,17 @@ static int kpb_copy(struct comp_dev *dev) goto out; } - sink = buffer_acquire_irq(sink); + sink = buffer_acquire(sink); /* Validate sink */ if (!sink->stream.w_ptr) { comp_err(dev, "kpb_copy(): invalid host sink pointers."); ret = -EINVAL; - sink = buffer_release_irq(sink); + sink = buffer_release(sink); goto out; } - sink = buffer_release_irq(sink); + sink = buffer_release(sink); copy_bytes = audio_stream_get_copy_bytes(&source->stream, &sink->stream); if (!copy_bytes) { diff --git a/src/audio/mixer.c b/src/audio/mixer.c index 96bbe5fd906e..35b40be9ef1b 100644 --- a/src/audio/mixer.c +++ b/src/audio/mixer.c @@ -351,11 +351,11 @@ static int mixer_copy(struct comp_dev *dev) /* write zeros if all sources are inactive */ if (num_mix_sources == 0) { uint32_t free_frames; - sink = buffer_acquire_irq(sink); + sink = buffer_acquire(sink); free_frames = audio_stream_get_free_frames(&sink->stream); frames = MIN(frames, free_frames); sink_bytes = frames * audio_stream_frame_bytes(&sink->stream); - sink = buffer_release_irq(sink); + sink = buffer_release(sink); if (!audio_stream_set_zero(&sink->stream, sink_bytes)) { buffer_stream_writeback(sink, sink_bytes); @@ -365,19 +365,19 @@ static int mixer_copy(struct comp_dev *dev) return 0; } - sink = buffer_acquire_irq(sink); + sink = buffer_acquire(sink); /* check for underruns */ for (i = 0; i < num_mix_sources; i++) { uint32_t avail_frames; - sources[i] = buffer_acquire_irq(sources[i]); + sources[i] = buffer_acquire(sources[i]); avail_frames = audio_stream_avail_frames(sources_stream[i], &sink->stream); frames = MIN(frames, avail_frames); - sources[i] = buffer_release_irq(sources[i]); + sources[i] = buffer_release(sources[i]); } - sink = buffer_release_irq(sink); + sink = buffer_release(sink); /* Every source has the same format, so calculate bytes based * on the first one. diff --git a/src/audio/mux/mux.c b/src/audio/mux/mux.c index 1ead72ecc9e4..658c9b91f98c 100644 --- a/src/audio/mux/mux.c +++ b/src/audio/mux/mux.c @@ -438,7 +438,7 @@ static int demux_copy(struct comp_dev *dev) // align sink streams with their respective configurations list_for_item(clist, &dev->bsink_list) { sink = container_of(clist, struct comp_buffer, source_list); - sink = buffer_acquire_irq(sink); + sink = buffer_acquire(sink); if (sink->sink->state == dev->state) { num_sinks++; i = get_stream_index(cd, sink->pipeline_id); @@ -449,7 +449,7 @@ static int demux_copy(struct comp_dev *dev) sinks[i] = sink; look_ups[i] = look_up; } - sink = buffer_release_irq(sink); + sink = buffer_release(sink); } /* if there are no sinks active */ @@ -459,25 +459,25 @@ static int demux_copy(struct comp_dev *dev) source = list_first_item(&dev->bsource_list, struct comp_buffer, sink_list); - source = buffer_acquire_irq(source); + source = buffer_acquire(source); /* check if source is active */ if (source->source->state != dev->state) { - source = buffer_release_irq(source); + source = buffer_release(source); return 0; } for (i = 0; i < MUX_MAX_STREAMS; i++) { if (!sinks[i]) continue; - sinks[i] = buffer_acquire_irq(sinks[i]); + sinks[i] = buffer_acquire(sinks[i]); avail = audio_stream_avail_frames(&source->stream, &sinks[i]->stream); frames = MIN(frames, avail); - buffer_release_irq(sinks[i]); + buffer_release(sinks[i]); } - source = buffer_release_irq(source); + source = buffer_release(source); source_bytes = frames * audio_stream_frame_bytes(&source->stream); for (i = 0; i < MUX_MAX_STREAMS; i++) { @@ -537,7 +537,7 @@ static int mux_copy(struct comp_dev *dev) /* align source streams with their respective configurations */ list_for_item(clist, &dev->bsource_list) { source = container_of(clist, struct comp_buffer, sink_list); - source = buffer_acquire_irq(source); + source = buffer_acquire(source); if (source->source->state == dev->state) { num_sources++; i = get_stream_index(cd, source->pipeline_id); @@ -547,7 +547,7 @@ static int mux_copy(struct comp_dev *dev) sources[i] = source; sources_stream[i] = &source->stream; } else { - source = buffer_release_irq(source); + source = buffer_release(source); } } @@ -558,17 +558,17 @@ static int mux_copy(struct comp_dev *dev) sink = list_first_item(&dev->bsink_list, struct comp_buffer, source_list); - sink = buffer_acquire_irq(sink); + sink = buffer_acquire(sink); /* check if sink is active */ if (sink->sink->state != dev->state) { for (i = 0; i < MUX_MAX_STREAMS; i++) { if (!sources[i]) continue; - buffer_release_irq(sources[i]); + buffer_release(sources[i]); } - sink = buffer_release_irq(sink); + sink = buffer_release(sink); return 0; } @@ -579,10 +579,10 @@ static int mux_copy(struct comp_dev *dev) avail_frames = audio_stream_avail_frames(sources_stream[i], &sink->stream); frames = MIN(frames, avail_frames); - sources[i] = buffer_release_irq(sources[i]); + sources[i] = buffer_release(sources[i]); } - sink = buffer_release_irq(sink); + sink = buffer_release(sink); for (i = 0; i < MUX_MAX_STREAMS; i++) { if (!sources[i]) diff --git a/src/audio/pipeline/pipeline-graph.c b/src/audio/pipeline/pipeline-graph.c index 126aba9e0663..aab792f43542 100644 --- a/src/audio/pipeline/pipeline-graph.c +++ b/src/audio/pipeline/pipeline-graph.c @@ -385,15 +385,15 @@ int pipeline_for_each_comp(struct comp_dev *current, /* continue further */ if (ctx->comp_func) { - buffer = buffer_acquire_irq(buffer); + buffer = buffer_acquire(buffer); buffer->walking = true; - buffer = buffer_release_irq(buffer); + buffer = buffer_release(buffer); int err = ctx->comp_func(buffer_comp, buffer, ctx, dir); - buffer = buffer_acquire_irq(buffer); + buffer = buffer_acquire(buffer); buffer->walking = false; - buffer = buffer_release_irq(buffer); + buffer = buffer_release(buffer); if (err < 0 || err == PPL_STATUS_PATH_STOP) return err; } diff --git a/src/audio/pipeline/pipeline-params.c b/src/audio/pipeline/pipeline-params.c index c7a0cd80e5be..2d2e0d840eee 100644 --- a/src/audio/pipeline/pipeline-params.c +++ b/src/audio/pipeline/pipeline-params.c @@ -56,11 +56,11 @@ static int pipeline_comp_params_neg(struct comp_dev *current, * should explicitly configure the channels of the branched buffers. */ if (calling_buf) { - calling_buf = buffer_acquire_irq(calling_buf); + calling_buf = buffer_acquire(calling_buf); err = buffer_set_params(calling_buf, &ppl_data->params->params, BUFFER_UPDATE_FORCE); - calling_buf = buffer_release_irq(calling_buf); + calling_buf = buffer_release(calling_buf); } return err; @@ -170,10 +170,10 @@ static int pipeline_comp_hw_params(struct comp_dev *current, /* set buffer parameters */ if (calling_buf) { - calling_buf = buffer_acquire_irq(calling_buf); + calling_buf = buffer_acquire(calling_buf); ret = buffer_set_params(calling_buf, &ppl_data->params->params, BUFFER_UPDATE_IF_UNSET); - calling_buf = buffer_release_irq(calling_buf); + calling_buf = buffer_release(calling_buf); if (ret < 0) pipe_err(current->pipeline, "pipeline_comp_hw_params(): buffer_set_params(): %d", ret); diff --git a/src/audio/selector/selector.c b/src/audio/selector/selector.c index 14f93b8f8eb8..90bd11c2ab8d 100644 --- a/src/audio/selector/selector.c +++ b/src/audio/selector/selector.c @@ -126,7 +126,7 @@ static int selector_verify_params(struct comp_dev *dev, } in_channels = cd->config.in_channels_count; - buffer = buffer_acquire_irq(buffer); + buffer = buffer_acquire(buffer); /* if cd->config.out_channels_count are equal to 0 * (it can vary), we set params->channels to sink buffer @@ -147,7 +147,7 @@ static int selector_verify_params(struct comp_dev *dev, } out_channels = cd->config.out_channels_count; - buffer = buffer_acquire_irq(buffer); + buffer = buffer_acquire(buffer); /* if cd->config.in_channels_count are equal to 0 * (it can vary), we set params->channels to source buffer @@ -165,7 +165,7 @@ static int selector_verify_params(struct comp_dev *dev, /* set component period frames */ component_set_nearest_period_frames(dev, sinkb->stream.rate); - buffer_release_irq(buffer); + buffer_release(buffer); /* verify input channels */ switch (in_channels) { @@ -389,15 +389,15 @@ static int selector_copy(struct comp_dev *dev) if (!source->stream.avail) return PPL_STATUS_PATH_STOP; - source = buffer_acquire_irq(source); - sink = buffer_acquire_irq(sink); + source = buffer_acquire(source); + sink = buffer_acquire(sink); frames = audio_stream_avail_frames(&source->stream, &sink->stream); source_bytes = frames * audio_stream_frame_bytes(&source->stream); sink_bytes = frames * audio_stream_frame_bytes(&sink->stream); - sink = buffer_release_irq(sink); - source = buffer_release_irq(source); + sink = buffer_release(sink); + source = buffer_release(source); comp_dbg(dev, "selector_copy(), source_bytes = 0x%x, sink_bytes = 0x%x", source_bytes, sink_bytes); diff --git a/src/audio/smart_amp/smart_amp.c b/src/audio/smart_amp/smart_amp.c index d7de97f5bd7f..e7d70c096d97 100644 --- a/src/audio/smart_amp/smart_amp.c +++ b/src/audio/smart_amp/smart_amp.c @@ -579,23 +579,23 @@ static int smart_amp_copy(struct comp_dev *dev) comp_dbg(dev, "smart_amp_copy()"); - source_buf = buffer_acquire_irq(source_buf); - sink_buf = buffer_acquire_irq(sink_buf); + source_buf = buffer_acquire(source_buf); + sink_buf = buffer_acquire(sink_buf); /* available bytes and samples calculation */ avail_passthrough_frames = audio_stream_avail_frames(&sad->source_buf->stream, &sad->sink_buf->stream); - source_buf = buffer_release_irq(source_buf); - sink_buf = buffer_release_irq(sink_buf); + source_buf = buffer_release(source_buf); + sink_buf = buffer_release(sink_buf); avail_frames = avail_passthrough_frames; if (sad->feedback_buf) { struct comp_buffer *buf = sad->feedback_buf; - buf = buffer_acquire_irq(buf); + buf = buffer_acquire(buf); if (comp_get_state(dev, buf->source) == dev->state) { /* feedback */ avail_feedback_frames = @@ -607,7 +607,7 @@ static int smart_amp_copy(struct comp_dev *dev) feedback_bytes = avail_frames * audio_stream_frame_bytes(&buf->stream); - buffer_release_irq(buf); + buffer_release(buf); comp_dbg(dev, "smart_amp_copy(): processing %d feedback frames (avail_passthrough_frames: %d)", avail_frames, avail_passthrough_frames); @@ -620,18 +620,18 @@ static int smart_amp_copy(struct comp_dev *dev) comp_update_buffer_consume(sad->feedback_buf, feedback_bytes); } else { - buffer_release_irq(buf); + buffer_release(buf); } } /* bytes calculation */ - source_buf = buffer_acquire_irq(source_buf); + source_buf = buffer_acquire(source_buf); source_bytes = avail_frames * audio_stream_frame_bytes(&source_buf->stream); - source_buf = buffer_release_irq(source_buf); + source_buf = buffer_release(source_buf); - sink_buf = buffer_acquire_irq(sink_buf); + sink_buf = buffer_acquire(sink_buf); sink_bytes = avail_frames * audio_stream_frame_bytes(&sink_buf->stream); - sink_buf = buffer_release_irq(sink_buf); + sink_buf = buffer_release(sink_buf); /* process data */ buffer_stream_invalidate(sad->source_buf, source_bytes); @@ -673,12 +673,12 @@ static int smart_amp_prepare(struct comp_dev *dev) list_for_item(blist, &dev->bsource_list) { source_buffer = container_of(blist, struct comp_buffer, sink_list); - source_buffer = buffer_acquire_irq(source_buffer); + source_buffer = buffer_acquire(source_buffer); if (source_buffer->source->ipc_config.type == SOF_COMP_DEMUX) sad->feedback_buf = source_buffer; else sad->source_buf = source_buffer; - source_buffer = buffer_release_irq(source_buffer); + source_buffer = buffer_release(source_buffer); } sad->sink_buf = list_first_item(&dev->bsink_list, struct comp_buffer, @@ -690,10 +690,10 @@ static int smart_amp_prepare(struct comp_dev *dev) if (sad->feedback_buf) { struct comp_buffer *buf = sad->feedback_buf; - buf = buffer_acquire_irq(buf); + buf = buffer_acquire(buf); buf->stream.channels = sad->config.feedback_channels; buf->stream.rate = sad->source_buf->stream.rate; - buffer_release_irq(buf); + buffer_release(buf); ret = smart_amp_check_audio_fmt(sad->source_buf->stream.rate, sad->source_buf->stream.channels); if (ret) { diff --git a/src/audio/src/src.c b/src/audio/src/src.c index 02dd49186deb..ab17de80aa64 100644 --- a/src/audio/src/src.c +++ b/src/audio/src/src.c @@ -873,8 +873,8 @@ static int src_copy(struct comp_dev *dev) sink = list_first_item(&dev->bsink_list, struct comp_buffer, source_list); - source = buffer_acquire_irq(source); - sink = buffer_acquire_irq(sink); + source = buffer_acquire(source); + sink = buffer_acquire(sink); /* Get from buffers and SRC conversion specific block constraints * how many frames can be processed. If sufficient number of samples @@ -882,8 +882,8 @@ static int src_copy(struct comp_dev *dev) */ ret = src_get_copy_limits(cd, source, sink); - buffer_release_irq(sink); - buffer_release_irq(source); + buffer_release(sink); + buffer_release(source); if (ret) { comp_info(dev, "No data to process."); diff --git a/src/audio/tone.c b/src/audio/tone.c index 17f42544f187..9b9320ac696e 100644 --- a/src/audio/tone.c +++ b/src/audio/tone.c @@ -440,8 +440,8 @@ static int tone_params(struct comp_dev *dev, if (dev->ipc_config.frame_fmt != SOF_IPC_FRAME_S32_LE) return -EINVAL; - sourceb = buffer_acquire_irq(sourceb); - sinkb = buffer_acquire_irq(sinkb); + sourceb = buffer_acquire(sourceb); + sinkb = buffer_acquire(sinkb); sourceb->stream.frame_fmt = dev->ipc_config.frame_fmt; sinkb->stream.frame_fmt = dev->ipc_config.frame_fmt; @@ -450,8 +450,8 @@ static int tone_params(struct comp_dev *dev, cd->period_bytes = dev->frames * audio_stream_frame_bytes(&sourceb->stream); - sinkb = buffer_release_irq(sinkb); - sourceb = buffer_release_irq(sourceb); + sinkb = buffer_release(sinkb); + sourceb = buffer_release(sourceb); return 0; } @@ -639,9 +639,9 @@ static int tone_copy(struct comp_dev *dev) sink = list_first_item(&dev->bsink_list, struct comp_buffer, source_list); - sink = buffer_acquire_irq(sink); + sink = buffer_acquire(sink); free = audio_stream_get_free_bytes(&sink->stream); - sink = buffer_release_irq(sink); + sink = buffer_release(sink); /* Test that sink has enough free frames. Then run once to maintain * low latency and steady load for tones. diff --git a/src/include/sof/audio/buffer.h b/src/include/sof/audio/buffer.h index 9f22684a817b..3a38b83b759e 100644 --- a/src/include/sof/audio/buffer.h +++ b/src/include/sof/audio/buffer.h @@ -217,23 +217,9 @@ static inline struct comp_buffer *buffer_release(struct comp_buffer *buffer) return container_of(c, struct comp_buffer, c); } -__must_check static inline struct comp_buffer *buffer_acquire_irq(struct comp_buffer *buffer) -{ - struct coherent *c = coherent_acquire_irq(&buffer->c, sizeof(*buffer)); - - return container_of(c, struct comp_buffer, c); -} - -static inline struct comp_buffer *buffer_release_irq(struct comp_buffer *buffer) -{ - struct coherent *c = coherent_release_irq(&buffer->c, sizeof(*buffer)); - - return container_of(c, struct comp_buffer, c); -} - static inline void buffer_reset_pos(struct comp_buffer *buffer, void *data) { - buffer = buffer_acquire_irq(buffer); + buffer = buffer_acquire(buffer); /* reset rw pointers and avail/free bytes counters */ audio_stream_reset(&buffer->stream); @@ -241,7 +227,7 @@ static inline void buffer_reset_pos(struct comp_buffer *buffer, void *data) /* clear buffer contents */ buffer_zero(buffer); - buffer = buffer_release_irq(buffer); + buffer = buffer_release(buffer); } static inline void buffer_init(struct comp_buffer *buffer, uint32_t size, uint32_t caps) @@ -254,11 +240,11 @@ static inline void buffer_init(struct comp_buffer *buffer, uint32_t size, uint32 static inline void buffer_reset_params(struct comp_buffer *buffer, void *data) { - buffer = buffer_acquire_irq(buffer); + buffer = buffer_acquire(buffer); buffer->hw_params_configured = false; - buffer = buffer_release_irq(buffer); + buffer = buffer_release(buffer); } #endif /* __SOF_AUDIO_BUFFER_H__ */ diff --git a/src/include/sof/audio/component.h b/src/include/sof/audio/component.h index 6f1016fd69c4..aa87f2f3884c 100644 --- a/src/include/sof/audio/component.h +++ b/src/include/sof/audio/component.h @@ -795,13 +795,13 @@ void comp_get_copy_limits_with_lock(struct comp_buffer *source, struct comp_buffer *sink, struct comp_copy_limits *cl) { - source = buffer_acquire_irq(source); - sink = buffer_acquire_irq(sink); + source = buffer_acquire(source); + sink = buffer_acquire(sink); comp_get_copy_limits(source, sink, cl); - source = buffer_release_irq(source); - sink = buffer_release_irq(sink); + source = buffer_release(source); + sink = buffer_release(sink); } /** diff --git a/src/include/sof/coherent.h b/src/include/sof/coherent.h index 7b18927928de..6c1588436b42 100644 --- a/src/include/sof/coherent.h +++ b/src/include/sof/coherent.h @@ -35,16 +35,15 @@ * as that memory was never accessed by other cores, except by using this API. * The first call must be coherent_init(), which initializes the header. If the * object will be used by multiple cores, next coherent_shared() must be called. - * After that to use that memory, coherent_acquire_irq() or coherent_acquire() - * must be called, which acquires ownership of the object and returns a cached - * address of the memory. After that the user can perform cached access to the - * memory. To release the memory, one of coherent_release_irq() or - * coherent_release() must be called. The only time when the memory is accessed - * using cache is between those two calls, so only when releasing the memory we - * have to write back and invalidate caches to make sure, that next time we - * acquire this memory, our uncached header access will not be overwritten! When - * memory is not needed any more, typically before freeing the memory, - * coherent_free() should be called. + * After that to use that memory, coherent_acquire() must be called, which + * acquires ownership of the object and returns a cached address of the memory. + * After that the user can perform cached access to the memory. To release the + * memory, coherent_release() must be called. The only time when the memory is + * accessed using cache is between those two calls, so only when releasing the + * memory we have to write back and invalidate caches to make sure, that next + * time we acquire this memory, our uncached header access will not be + * overwritten! When memory is not needed any more, typically before freeing the + * memory, coherent_free() should be called. * * This structure needs to be embedded at the start of any container to ensure * container object cache alignment and to minimise non cache access when @@ -77,13 +76,6 @@ struct coherent { #define CHECK_COHERENT_CORE(_c) #endif -/* debug usage in IRQ contexts - check non IRQ being used in IRQ context TODO */ -#ifdef COHERENT_CHECK_IN_IRQ -#define CHECK_COHERENT_IRQ(_c) assert(1) -#else -#define CHECK_COHERENT_IRQ(_c) -#endif - #if CONFIG_INCOHERENT /* When coherent_acquire() is called, we are sure not to have cache for this memory */ __must_check static inline struct coherent *coherent_acquire(struct coherent *c, @@ -92,9 +84,6 @@ __must_check static inline struct coherent *coherent_acquire(struct coherent *c, /* assert if someone passes a cache/local address in here. */ ADDR_IS_COHERENT(c); - /* this flavour should not be used in IRQ context */ - CHECK_COHERENT_IRQ(c); - /* access the shared coherent object */ if (c->shared) { CHECK_COHERENT_CORE(c); @@ -114,48 +103,6 @@ static inline struct coherent *coherent_release(struct coherent *c, const size_t /* assert if someone passes a coherent address in here. */ ADDR_IS_INCOHERENT(c); - /* this flavour should not be used in IRQ context */ - CHECK_COHERENT_IRQ(c); - - /* access the local copy of object */ - if (c->shared) { - CHECK_COHERENT_CORE(c); - - /* wtb and inv local data to coherent object */ - dcache_writeback_invalidate_region(c, size); - - /* unlock on uncache alias */ - k_spin_unlock(&cache_to_uncache(c)->lock, cache_to_uncache(c)->key); - } - - return cache_to_uncache(c); -} - -__must_check static inline struct coherent *coherent_acquire_irq(struct coherent *c, - const size_t size) -{ - /* assert if someone passes a cache/local address in here. */ - ADDR_IS_COHERENT(c); - - /* access the shared coherent object */ - if (c->shared) { - CHECK_COHERENT_CORE(c); - - c->key = k_spin_lock(&c->lock); - - /* invalidate local copy */ - dcache_invalidate_region(uncache_to_cache(c), size); - } - - /* client can now use cached object safely */ - return uncache_to_cache(c); -} - -static inline struct coherent *coherent_release_irq(struct coherent *c, const size_t size) -{ - /* assert if someone passes a coherent address in here. */ - ADDR_IS_INCOHERENT(c); - /* access the local copy of object */ if (c->shared) { CHECK_COHERENT_CORE(c); @@ -201,17 +148,6 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si dcache_invalidate_region(object, sizeof(*object)); \ k_spin_unlock(&(object)->member.lock, (object)->member.key); \ } while (0) - -/* set the object to shared mode with coherency managed by SW */ -#define coherent_shared_irq(object, member) \ - do { \ - /* assert if someone passes a cache/local address in here. */ \ - ADDR_IS_COHERENT(object); \ - (object)->member.key = k_spin_lock(&(object)->member.lock); \ - (object)->member.shared = true; \ - dcache_invalidate_region(object, sizeof(*object)); \ - k_spin_unlock(&(object)->member.lock, &(object)->member.key); \ - } while (0) #else /* @@ -241,31 +177,6 @@ static inline struct coherent *coherent_release(struct coherent *c, const size_t return c; } -__must_check static inline struct coherent *coherent_acquire_irq(struct coherent *c, - const size_t size) -{ - if (c->shared) { - c->key = k_spin_lock(&c->lock); - - /* invalidate local copy */ - dcache_invalidate_region(uncache_to_cache(c), size); - } - - return c; -} - -static inline struct coherent *coherent_release_irq(struct coherent *c, const size_t size) -{ - if (c->shared) { - /* wtb and inv local data to coherent object */ - dcache_writeback_invalidate_region(uncache_to_cache(c), size); - - k_spin_unlock(&c->lock, c->key); - } - - return c; -} - #define coherent_init(object, member) \ do { \ /* TODO static assert if we are not cache aligned */ \ @@ -279,7 +190,6 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si /* no function on cache coherent architectures */ #define coherent_shared(object, member) ((object)->member.shared = true) -#define coherent_shared_irq(object, member) ((object)->member.shared = true) #endif /* CONFIG_CAVS && CONFIG_CORE_COUNT > 1 */ diff --git a/src/ipc/ipc-helper.c b/src/ipc/ipc-helper.c index 1f4238b1b497..45fd07903d47 100644 --- a/src/ipc/ipc-helper.c +++ b/src/ipc/ipc-helper.c @@ -131,7 +131,7 @@ int comp_verify_params(struct comp_dev *dev, uint32_t flag, struct comp_buffer, source_list); - buf = buffer_acquire_irq(buf); + buf = buffer_acquire(buf); /* update specific pcm parameter with buffer parameter if * specific flag is set. @@ -146,7 +146,7 @@ int comp_verify_params(struct comp_dev *dev, uint32_t flag, /* set component period frames */ component_set_nearest_period_frames(dev, buf->stream.rate); - buf = buffer_release_irq(buf); + buf = buffer_release(buf); } else { /* for other components we iterate over all downstream buffers * (for playback) or upstream buffers (for capture). @@ -157,20 +157,20 @@ int comp_verify_params(struct comp_dev *dev, uint32_t flag, while (clist != buffer_list) { curr = clist; buf = buffer_from_list(curr, struct comp_buffer, dir); - buf = buffer_acquire_irq(buf); + buf = buffer_acquire(buf); clist = clist->next; comp_update_params(flag, params, buf); buffer_set_params(buf, params, BUFFER_UPDATE_FORCE); - buf = buffer_release_irq(buf); + buf = buffer_release(buf); } /* fetch sink buffer in order to calculate period frames */ sinkb = list_first_item(&dev->bsink_list, struct comp_buffer, source_list); - sinkb = buffer_acquire_irq(sinkb); + sinkb = buffer_acquire(sinkb); component_set_nearest_period_frames(dev, sinkb->stream.rate); - sinkb = buffer_release_irq(sinkb); + sinkb = buffer_release(sinkb); } return 0; diff --git a/src/samples/audio/detect_test.c b/src/samples/audio/detect_test.c index b44bc32a6e2c..b809eb8cc8e7 100644 --- a/src/samples/audio/detect_test.c +++ b/src/samples/audio/detect_test.c @@ -635,9 +635,9 @@ static int test_keyword_copy(struct comp_dev *dev) if (!source->stream.avail) return PPL_STATUS_PATH_STOP; - source = buffer_acquire_irq(source); + source = buffer_acquire(source); frames = audio_stream_get_avail_frames(&source->stream); - source = buffer_release_irq(source); + source = buffer_release(source); /* copy and perform detection */ buffer_stream_invalidate(source, audio_stream_get_avail_bytes(&source->stream)); diff --git a/src/samples/audio/smart_amp_test.c b/src/samples/audio/smart_amp_test.c index 27309e7a9123..42d4808602c1 100644 --- a/src/samples/audio/smart_amp_test.c +++ b/src/samples/audio/smart_amp_test.c @@ -420,23 +420,23 @@ static int smart_amp_copy(struct comp_dev *dev) comp_dbg(dev, "smart_amp_copy()"); - source_buf = buffer_release_irq(source_buf); - sink_buf = buffer_release_irq(sink_buf); + source_buf = buffer_release(source_buf); + sink_buf = buffer_release(sink_buf); /* available bytes and samples calculation */ avail_passthrough_frames = audio_stream_avail_frames(&sad->source_buf->stream, &sad->sink_buf->stream); - source_buf = buffer_release_irq(source_buf); - sink_buf = buffer_release_irq(sink_buf); + source_buf = buffer_release(source_buf); + sink_buf = buffer_release(sink_buf); avail_frames = avail_passthrough_frames; if (sad->feedback_buf) { struct comp_buffer *buf = sad->feedback_buf; - buf = buffer_acquire_irq(buf); + buf = buffer_acquire(buf); if (comp_get_state(dev, sad->feedback_buf->source) == dev->state) { /* feedback */ avail_feedback_frames = @@ -448,7 +448,7 @@ static int smart_amp_copy(struct comp_dev *dev) feedback_bytes = avail_frames * audio_stream_frame_bytes(&sad->feedback_buf->stream); - buffer_release_irq(buf); + buffer_release(buf); comp_dbg(dev, "smart_amp_copy(): processing %d feedback frames (avail_passthrough_frames: %d)", avail_frames, avail_passthrough_frames); @@ -461,20 +461,20 @@ static int smart_amp_copy(struct comp_dev *dev) comp_update_buffer_consume(sad->feedback_buf, feedback_bytes); } else { - buffer_release_irq(buf); + buffer_release(buf); } } /* bytes calculation */ - source_buf = buffer_acquire_irq(source_buf); + source_buf = buffer_acquire(source_buf); source_bytes = avail_frames * audio_stream_frame_bytes(&sad->source_buf->stream); - source_buf = buffer_release_irq(source_buf); + source_buf = buffer_release(source_buf); - sink_buf = buffer_acquire_irq(sink_buf); + sink_buf = buffer_acquire(sink_buf); sink_bytes = avail_frames * audio_stream_frame_bytes(&sad->sink_buf->stream); - sink_buf = buffer_release_irq(sink_buf); + sink_buf = buffer_release(sink_buf); /* process data */ buffer_stream_invalidate(sad->source_buf, source_bytes); @@ -518,12 +518,12 @@ static int smart_amp_prepare(struct comp_dev *dev) list_for_item(blist, &dev->bsource_list) { source_buffer = container_of(blist, struct comp_buffer, sink_list); - source_buffer = buffer_acquire_irq(source_buffer); + source_buffer = buffer_acquire(source_buffer); if (source_buffer->source->ipc_config.type == SOF_COMP_DEMUX) sad->feedback_buf = source_buffer; else sad->source_buf = source_buffer; - source_buffer = buffer_release_irq(source_buffer); + source_buffer = buffer_release(source_buffer); } sad->sink_buf = list_first_item(&dev->bsink_list, struct comp_buffer, @@ -535,10 +535,10 @@ static int smart_amp_prepare(struct comp_dev *dev) if (sad->feedback_buf) { struct comp_buffer *buf = sad->feedback_buf; - buf = buffer_acquire_irq(buf); + buf = buffer_acquire(buf); sad->feedback_buf->stream.channels = sad->config.feedback_channels; sad->feedback_buf->stream.rate = sad->source_buf->stream.rate; - buf = buffer_release_irq(buf); + buf = buffer_release(buf); } sad->process = get_smart_amp_process(dev);