From b76ed004cbbe9aa364ef029b89d5666a77cd8723 Mon Sep 17 00:00:00 2001 From: Liam Girdwood Date: Wed, 19 Jan 2022 13:15:03 +0000 Subject: [PATCH] zephyr: spinlock: Align spinlock API to use Zephyr API Align the SOF spinlock API to align with Zephyr spinlock API no functional changes to xtos logic. This is first part of work for spinlock Zephyr alignment, subsequent updates will align headers and associated splinlock dependecies. Signed-off-by: Liam Girdwood --- src/audio/component.c | 10 +++--- src/audio/kpb.c | 12 +++---- src/audio/pipeline/pipeline-graph.c | 11 +++--- src/drivers/amd/renoir/acp_bt_dma.c | 16 ++++----- src/drivers/amd/renoir/acp_dma.c | 16 ++++----- src/drivers/amd/renoir/acp_dmic_dma.c | 16 ++++----- src/drivers/amd/renoir/acp_sp_dma.c | 16 ++++----- src/drivers/amd/renoir/interrupt.c | 5 +-- src/drivers/dw/dma.c | 26 +++++++------- src/drivers/dw/ssi-spi.c | 14 ++++---- src/drivers/generic/dummy-dma.c | 20 +++++------ src/drivers/imx/edma.c | 16 ++++----- src/drivers/imx/interrupt-irqsteer.c | 9 ++--- src/drivers/intel/baytrail/ssp.c | 15 ++++---- src/drivers/intel/cavs/interrupt.c | 9 ++--- src/drivers/intel/cavs/ipc.c | 6 ++-- src/drivers/intel/dmic/dmic.c | 17 +++++---- src/drivers/intel/haswell/ssp.c | 15 ++++---- src/drivers/intel/hda/hda-dma.c | 16 ++++----- src/drivers/intel/ssp/mn.c | 25 ++++++++------ src/drivers/intel/ssp/ssp.c | 20 ++++++----- src/drivers/interrupt.c | 44 +++++++++++------------- src/drivers/mediatek/mt8195/afe-memif.c | 16 ++++----- src/drivers/mediatek/mt8195/interrupt.c | 5 +-- src/idc/idc.c | 6 ++-- src/include/sof/coherent.h | 28 +++++++-------- src/include/sof/ipc/msg.h | 6 ++-- src/include/sof/spinlock.h | 30 +++++++++++----- src/ipc/ipc-common.c | 24 ++++++------- src/ipc/ipc3/handler.c | 10 +++--- src/ipc/ipc3/helper.c | 11 +++--- src/lib/alloc.c | 35 ++++++++++--------- src/lib/clk.c | 6 ++-- src/lib/dai.c | 12 +++---- src/lib/dma.c | 12 +++---- src/lib/notifier.c | 10 +++--- src/platform/intel/cavs/lib/clk.c | 22 ++++++------ src/platform/intel/cavs/lib/pm_runtime.c | 36 +++++++++---------- src/schedule/ll_schedule.c | 29 +++++++++------- src/schedule/zephyr.c | 6 ++-- src/schedule/zephyr_domain.c | 12 +++---- src/schedule/zephyr_ll.c | 8 ++--- src/spinlock.c | 10 +++--- src/trace/dma-trace.c | 26 +++++++------- src/trace/trace.c | 24 ++++++------- test/cmocka/src/common_mocks.c | 4 +-- 46 files changed, 394 insertions(+), 348 deletions(-) diff --git a/src/audio/component.c b/src/audio/component.c index f6d0daf8b9f1..79a028dee994 100644 --- a/src/audio/component.c +++ b/src/audio/component.c @@ -32,10 +32,11 @@ DECLARE_TR_CTX(comp_tr, SOF_UUID(comp_uuid), LOG_LEVEL_INFO); int comp_register(struct comp_driver_info *drv) { struct comp_driver_list *drivers = comp_drivers_get(); + k_spinlock_key_t key; - spin_lock(&drivers->lock); + key = k_spin_lock(&drivers->lock); list_item_prepend(&drv->list, &drivers->list); - spin_unlock(&drivers->lock); + k_spin_unlock(&drivers->lock, key); return 0; } @@ -43,10 +44,11 @@ int comp_register(struct comp_driver_info *drv) void comp_unregister(struct comp_driver_info *drv) { struct comp_driver_list *drivers = comp_drivers_get(); + k_spinlock_key_t key; - spin_lock(&drivers->lock); + key = k_spin_lock(&drivers->lock); list_item_del(&drv->list); - spin_unlock(&drivers->lock); + k_spin_unlock(&drivers->lock, key); } /* NOTE: Keep the component state diagram up to date: diff --git a/src/audio/kpb.c b/src/audio/kpb.c index 51621e1439b7..5762a5daf0bb 100644 --- a/src/audio/kpb.c +++ b/src/audio/kpb.c @@ -1024,7 +1024,7 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli) (KPB_SAMPLE_CONTAINER_SIZE(sample_width) / 8) * kpb->config.channels; size_t period_bytes_limit; - uint32_t flags; + k_spinlock_key_t key; comp_info(dev, "kpb_init_draining(): requested draining of %d [ms] from history buffer", cli->drain_req); @@ -1045,7 +1045,7 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli) * in the history buffer. All we have to do now is to calculate * read pointer from which we will start draining. */ - spin_lock_irq(&kpb->lock, flags); + key = k_spin_lock_irq(&kpb->lock); kpb_change_state(kpb, KPB_STATE_INIT_DRAINING); @@ -1103,7 +1103,7 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli) } while (buff != first_buff); - spin_unlock_irq(&kpb->lock, flags); + k_spin_unlock_irq(&kpb->lock, key); /* Should we drain in synchronized mode (sync_draining_mode)? * Note! We have already verified host params during @@ -1190,7 +1190,7 @@ static enum task_state kpb_draining_task(void *arg) struct comp_data *kpb = comp_get_drvdata(draining_data->dev); bool sync_mode_on = draining_data->sync_mode_on; bool pm_is_active; - uint32_t flags; + k_spinlock_key_t key; comp_cl_info(&comp_kpb, "kpb_draining_task(), start."); @@ -1280,7 +1280,7 @@ static enum task_state kpb_draining_task(void *arg) */ comp_cl_info(&comp_kpb, "kpb: update drain_req by %d", *rt_stream_update); - spin_lock_irq(&kpb->lock, flags); + key = k_spin_lock_irq(&kpb->lock); drain_req += *rt_stream_update; *rt_stream_update = 0; if (!drain_req && kpb->state == KPB_STATE_DRAINING) { @@ -1292,7 +1292,7 @@ static enum task_state kpb_draining_task(void *arg) */ kpb_change_state(kpb, KPB_STATE_HOST_COPY); } - spin_unlock_irq(&kpb->lock, flags); + k_spin_unlock_irq(&kpb->lock, key); } } diff --git a/src/audio/pipeline/pipeline-graph.c b/src/audio/pipeline/pipeline-graph.c index 5dbcd9f47505..1fe2b41d25e2 100644 --- a/src/audio/pipeline/pipeline-graph.c +++ b/src/audio/pipeline/pipeline-graph.c @@ -60,8 +60,9 @@ static inline int pipeline_posn_offset_get(uint32_t *posn_offset) struct pipeline_posn *pipeline_posn = pipeline_posn_get(); int ret = -EINVAL; uint32_t i; + k_spinlock_key_t key; - spin_lock(&pipeline_posn->lock); + key = k_spin_lock(&pipeline_posn->lock); for (i = 0; i < PPL_POSN_OFFSETS; ++i) { if (!pipeline_posn->posn_offset[i]) { @@ -73,7 +74,7 @@ static inline int pipeline_posn_offset_get(uint32_t *posn_offset) } - spin_unlock(&pipeline_posn->lock); + k_spin_unlock(&pipeline_posn->lock, key); return ret; } @@ -86,13 +87,13 @@ static inline void pipeline_posn_offset_put(uint32_t posn_offset) { struct pipeline_posn *pipeline_posn = pipeline_posn_get(); int i = posn_offset / sizeof(struct sof_ipc_stream_posn); + k_spinlock_key_t key; - spin_lock(&pipeline_posn->lock); + key = k_spin_lock(&pipeline_posn->lock); pipeline_posn->posn_offset[i] = false; - - spin_unlock(&pipeline_posn->lock); + k_spin_unlock(&pipeline_posn->lock, key); } void pipeline_posn_init(struct sof *sof) diff --git a/src/drivers/amd/renoir/acp_bt_dma.c b/src/drivers/amd/renoir/acp_bt_dma.c index 5bfcb8c291c1..3b25c44186f9 100644 --- a/src/drivers/amd/renoir/acp_bt_dma.c +++ b/src/drivers/amd/renoir/acp_bt_dma.c @@ -52,24 +52,24 @@ static uint64_t prev_rx_pos; static struct dma_chan_data *acp_dai_bt_dma_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; struct dma_chan_data *channel; - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock_irq(&dma->lock); if (req_chan >= dma->plat_data.channels) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); tr_err(&acp_bt_dma_tr, "DMA: Channel %d not in range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); tr_err(&acp_bt_dma_tr, "DMA: channel already in use %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); channel->status = COMP_STATE_READY; - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); return channel; } @@ -77,13 +77,13 @@ static struct dma_chan_data *acp_dai_bt_dma_channel_get(struct dma *dma, /* channel must not be running when this is called */ static void acp_dai_bt_dma_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; notifier_unregister_all(NULL, channel); - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock_irq(&channel->dma->lock); channel->status = COMP_STATE_INIT; atomic_sub(&channel->dma->num_channels_busy, 1); - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock_irq(&channel->dma->lock, key); } static int acp_dai_bt_dma_start(struct dma_chan_data *channel) diff --git a/src/drivers/amd/renoir/acp_dma.c b/src/drivers/amd/renoir/acp_dma.c index 75ce182e8297..1527db260845 100644 --- a/src/drivers/amd/renoir/acp_dma.c +++ b/src/drivers/amd/renoir/acp_dma.c @@ -174,24 +174,24 @@ static void dma_reconfig(struct dma_chan_data *channel, uint32_t bytes) static struct dma_chan_data *acp_dma_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; struct dma_chan_data *channel; - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock_irq(&dma->lock); if (req_chan >= dma->plat_data.channels) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); tr_err(&acpdma_tr, "DMA: Channel %d not in range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); tr_err(&acpdma_tr, "DMA: channel already in use %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); channel->status = COMP_STATE_READY; - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); /* reset read and write pointers */ struct acp_dma_chan_data *acp_dma_chan = dma_chan_get_data(channel); @@ -202,12 +202,12 @@ static struct dma_chan_data *acp_dma_channel_get(struct dma *dma, static void acp_dma_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock_irq(&channel->dma->lock); channel->status = COMP_STATE_INIT; atomic_sub(&channel->dma->num_channels_busy, 1); - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock_irq(&channel->dma->lock, key); /* reset read and write pointer */ struct acp_dma_chan_data *acp_dma_chan = dma_chan_get_data(channel); diff --git a/src/drivers/amd/renoir/acp_dmic_dma.c b/src/drivers/amd/renoir/acp_dmic_dma.c index 90152353d921..9d3b8ad95af0 100644 --- a/src/drivers/amd/renoir/acp_dmic_dma.c +++ b/src/drivers/amd/renoir/acp_dmic_dma.c @@ -42,38 +42,38 @@ DECLARE_TR_CTX(acp_dmic_dma_tr, SOF_UUID(acp_dmic_dma_uuid), LOG_LEVEL_INFO); static struct dma_chan_data *acp_dmic_dma_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; struct dma_chan_data *channel; - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock_irq(&dma->lock); if (req_chan >= dma->plat_data.channels) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); tr_err(&acp_dmic_dma_tr, "ACP_DMIC: Channel %d out of range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); tr_err(&acp_dmic_dma_tr, "ACP_DMIC: Cannot reuse channel %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); channel->status = COMP_STATE_READY; - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); return channel; } static void acp_dmic_dma_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; notifier_unregister_all(NULL, channel); - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock_irq(&channel->dma->lock); channel->status = COMP_STATE_INIT; atomic_sub(&channel->dma->num_channels_busy, 1); - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock_irq(&channel->dma->lock, key); } static int acp_dmic_dma_start(struct dma_chan_data *channel) diff --git a/src/drivers/amd/renoir/acp_sp_dma.c b/src/drivers/amd/renoir/acp_sp_dma.c index e291af144a4b..b51d1863b3bf 100644 --- a/src/drivers/amd/renoir/acp_sp_dma.c +++ b/src/drivers/amd/renoir/acp_sp_dma.c @@ -48,37 +48,37 @@ static uint32_t sp_buff_size; static struct dma_chan_data *acp_dai_sp_dma_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; struct dma_chan_data *channel; - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock_irq(&dma->lock); if (req_chan >= dma->plat_data.channels) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); tr_err(&acp_sp_tr, "DMA: Channel %d not in range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); tr_err(&acp_sp_tr, "DMA: channel already in use %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); channel->status = COMP_STATE_READY; - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); return channel; } /* channel must not be running when this is called */ static void acp_dai_sp_dma_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; notifier_unregister_all(NULL, channel); - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock_irq(&channel->dma->lock); channel->status = COMP_STATE_INIT; atomic_sub(&channel->dma->num_channels_busy, 1); - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock_irq(&channel->dma->lock, key); } diff --git a/src/drivers/amd/renoir/interrupt.c b/src/drivers/amd/renoir/interrupt.c index f08649ef2460..80a9f02fc097 100644 --- a/src/drivers/amd/renoir/interrupt.c +++ b/src/drivers/amd/renoir/interrupt.c @@ -98,13 +98,14 @@ static inline void acp_handle_irq(struct irq_cascade_desc *cascade, struct irq_desc *child = NULL; int bit; bool handled; + k_spinlock_key_t key; while (status) { bit = get_first_irq(status); handled = false; status &= ~(1ull << bit); - spin_lock(&cascade->lock); + key = k_spin_lock(&cascade->lock); list_for_item(clist, &cascade->child[bit].list) { child = container_of(clist, struct irq_desc, irq_list); @@ -115,7 +116,7 @@ static inline void acp_handle_irq(struct irq_cascade_desc *cascade, } } - spin_unlock(&cascade->lock); + k_spin_unlock(&cascade->lock, key); if (!handled) { tr_err(&acp_irq_tr, "irq_handler(): not handled, bit %d", diff --git a/src/drivers/dw/dma.c b/src/drivers/dw/dma.c index d09c297cf2b3..93122a659197 100644 --- a/src/drivers/dw/dma.c +++ b/src/drivers/dw/dma.c @@ -185,13 +185,13 @@ static inline void dw_dma_chan_reload_lli_cb(void *arg, enum notify_id type, static struct dma_chan_data *dw_dma_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; int i; tr_info(&dwdma_tr, "dw_dma_channel_get(): dma %d request channel %d", dma->plat_data.id, req_chan); - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock_irq(&dma->lock); /* find first free non draining channel */ for (i = 0; i < dma->plat_data.channels; i++) { @@ -209,12 +209,12 @@ static struct dma_chan_data *dw_dma_channel_get(struct dma *dma, #endif /* return channel */ - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); return &dma->chan[i]; } /* DMA controller has no free channels */ - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); tr_err(&dwdma_tr, "dw_dma_channel_get(): dma %d no free channels", dma->plat_data.id); @@ -249,14 +249,14 @@ static void dw_dma_channel_put_unlocked(struct dma_chan_data *channel) /* channel must not be running when this is called */ static void dw_dma_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; tr_info(&dwdma_tr, "dw_dma_channel_put(): dma %d channel %d put", channel->dma->plat_data.id, channel->index); - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock_irq(&channel->dma->lock); dw_dma_channel_put_unlocked(channel); - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock_irq(&channel->dma->lock, key); } static int dw_dma_start(struct dma_chan_data *channel) @@ -860,7 +860,7 @@ static int dw_dma_copy(struct dma_chan_data *channel, int bytes, .elem = { .size = bytes }, .status = DMA_CB_STATUS_END, }; - uint32_t irq_flags; + k_spinlock_key_t key; tr_dbg(&dwdma_tr, "dw_dma_copy(): dma %d channel %d copy", channel->dma->plat_data.id, channel->index); @@ -890,9 +890,9 @@ static int dw_dma_copy(struct dma_chan_data *channel, int bytes, dw_dma_verify_transfer(channel, &next); /* increment current pointer */ - spin_lock_irq(&channel->dma->lock, irq_flags); + key = k_spin_lock_irq(&channel->dma->lock); dw_dma_increment_pointer(dw_chan, bytes); - spin_unlock_irq(&channel->dma->lock, irq_flags); + k_spin_unlock_irq(&channel->dma->lock, key); return ret; } @@ -1103,13 +1103,13 @@ static int dw_dma_get_data_size(struct dma_chan_data *channel, uint32_t *avail, uint32_t *free) { struct dw_dma_chan_data *dw_chan = dma_chan_get_data(channel); - uint32_t flags; + k_spinlock_key_t key; int ret = 0; tr_dbg(&dwdma_tr, "dw_dma_get_data_size(): dma %d channel %d get data size", channel->dma->plat_data.id, channel->index); - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock_irq(&channel->dma->lock); if (channel->direction == DMA_DIR_HMEM_TO_LMEM || channel->direction == DMA_DIR_DEV_TO_MEM) { @@ -1120,7 +1120,7 @@ static int dw_dma_get_data_size(struct dma_chan_data *channel, *avail = dw_chan->ptr_data.buffer_bytes - *free; } - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock_irq(&channel->dma->lock, key); #if CONFIG_DMA_HW_LLI if (!(dma_reg_read(channel->dma, DW_DMA_CHAN_EN) & diff --git a/src/drivers/dw/ssi-spi.c b/src/drivers/dw/ssi-spi.c index 4a02ed3bbfed..f414c3c80683 100644 --- a/src/drivers/dw/ssi-spi.c +++ b/src/drivers/dw/ssi-spi.c @@ -484,15 +484,16 @@ static unsigned int n_spi_devices; struct spi *spi_get(enum spi_type type) { struct spi *spi; - unsigned int i, flags; + unsigned int i; + k_spinlock_key_t key; - spin_lock_irq(&spi_lock, flags); + key = k_spin_lock_irq(&spi_lock); for (i = 0, spi = spi_devices; i < n_spi_devices; i++, spi++) if (spi->plat_data->type == type) break; - spin_unlock_irq(&spi_lock, flags); + k_spin_unlock_irq(&spi_lock, key); return i < n_spi_devices ? spi : NULL; } @@ -500,10 +501,11 @@ struct spi *spi_get(enum spi_type type) int spi_install(const struct spi_platform_data *plat, size_t n) { struct spi *spi; - unsigned int i, flags; + unsigned int i; + k_spinlock_key_t key; int ret; - spin_lock_irq(&spi_lock, flags); + key = k_spin_lock_irq(&spi_lock); if (spi_devices) { ret = -EBUSY; @@ -526,7 +528,7 @@ int spi_install(const struct spi_platform_data *plat, size_t n) } unlock: - spin_unlock_irq(&spi_lock, flags); + k_spin_unlock_irq(&spi_lock, key); return ret; } diff --git a/src/drivers/generic/dummy-dma.c b/src/drivers/generic/dummy-dma.c index 111e9621c6a3..5f15cb512e68 100644 --- a/src/drivers/generic/dummy-dma.c +++ b/src/drivers/generic/dummy-dma.c @@ -223,10 +223,10 @@ static ssize_t dummy_dma_do_copies(struct dma_chan_pdata *pdata, int bytes) static struct dma_chan_data *dummy_dma_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; int i; - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock_irq(&dma->lock); for (i = 0; i < dma->plat_data.channels; i++) { /* use channel if it's free */ if (dma->chan[i].status == COMP_STATE_INIT) { @@ -235,11 +235,11 @@ static struct dma_chan_data *dummy_dma_channel_get(struct dma *dma, atomic_add(&dma->num_channels_busy, 1); /* return channel */ - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); return &dma->chan[i]; } } - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); tr_err(&ddma_tr, "dummy-dmac: %d no free channel", dma->plat_data.id); return NULL; @@ -272,11 +272,11 @@ static void dummy_dma_channel_put_unlocked(struct dma_chan_data *channel) */ static void dummy_dma_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock_irq(&channel->dma->lock); dummy_dma_channel_put_unlocked(channel); - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock_irq(&channel->dma->lock, key); } /* Since copies are synchronous, the triggers do nothing */ @@ -332,10 +332,10 @@ static int dummy_dma_set_config(struct dma_chan_data *channel, struct dma_sg_config *config) { struct dma_chan_pdata *ch = dma_chan_get_data(channel); - uint32_t flags; + k_spinlock_key_t key; int ret = 0; - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock_irq(&channel->dma->lock); if (!config->elem_array.count) { tr_err(&ddma_tr, "dummy-dmac: %d channel %d no DMA descriptors", @@ -364,7 +364,7 @@ static int dummy_dma_set_config(struct dma_chan_data *channel, channel->status = COMP_STATE_PREPARE; out: - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock_irq(&channel->dma->lock, key); return ret; } diff --git a/src/drivers/imx/edma.c b/src/drivers/imx/edma.c index cd1133a12490..d938421890d0 100644 --- a/src/drivers/imx/edma.c +++ b/src/drivers/imx/edma.c @@ -96,28 +96,28 @@ static int edma_encode_tcd_attr(int src_width, int dest_width) static struct dma_chan_data *edma_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; struct dma_chan_data *channel; tr_dbg(&edma_tr, "EDMA: channel_get(%d)", req_chan); - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock_irq(&dma->lock); if (req_chan >= dma->plat_data.channels) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); tr_err(&edma_tr, "EDMA: Channel %d out of range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); tr_err(&edma_tr, "EDMA: Cannot reuse channel %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); channel->status = COMP_STATE_READY; - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); return channel; } @@ -125,7 +125,7 @@ static struct dma_chan_data *edma_channel_get(struct dma *dma, /* channel must not be running when this is called */ static void edma_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; /* Assuming channel is stopped, we thus don't need hardware to * do anything right now @@ -134,10 +134,10 @@ static void edma_channel_put(struct dma_chan_data *channel) notifier_unregister_all(NULL, channel); - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock_irq(&channel->dma->lock); channel->status = COMP_STATE_INIT; atomic_sub(&channel->dma->num_channels_busy, 1); - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock_irq(&channel->dma->lock, key); } static int edma_start(struct dma_chan_data *channel) diff --git a/src/drivers/imx/interrupt-irqsteer.c b/src/drivers/imx/interrupt-irqsteer.c index 51adefea3952..c2d0e78f6882 100644 --- a/src/drivers/imx/interrupt-irqsteer.c +++ b/src/drivers/imx/interrupt-irqsteer.c @@ -287,13 +287,14 @@ static inline void handle_irq_batch(struct irq_cascade_desc *cascade, struct irq_desc *child = NULL; int bit; bool handled; + k_spinlock_key_t key; while (status) { bit = get_first_irq(status); handled = false; status &= ~(1ull << bit); /* Release interrupt */ - spin_lock(&cascade->lock); + key = k_spin_lock(&cascade->lock); /* Get child if any and run handler */ list_for_item(clist, &cascade->child[bit].list) { @@ -301,16 +302,16 @@ static inline void handle_irq_batch(struct irq_cascade_desc *cascade, if (child->handler && (child->cpu_mask & 1 << core)) { /* run handler in non atomic context */ - spin_unlock(&cascade->lock); + k_spin_unlock(&cascade->lock, key); child->handler(child->handler_arg); - spin_lock(&cascade->lock); + k_spin_lock(&cascade->lock); handled = true; } } - spin_unlock(&cascade->lock); + k_spin_unlock(&cascade->lock, key); if (!handled) { tr_err(&irq_i_tr, "irq_handler(): nobody cared, bit %d", diff --git a/src/drivers/intel/baytrail/ssp.c b/src/drivers/intel/baytrail/ssp.c index 21dd6aa687b7..efc0a7a38a81 100644 --- a/src/drivers/intel/baytrail/ssp.c +++ b/src/drivers/intel/baytrail/ssp.c @@ -68,8 +68,9 @@ static int ssp_set_config(struct dai *dai, struct ipc_config_dai *common_config, bool cfs = false; bool cbs = false; int ret = 0; + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* is playback/capture already running */ if (ssp->state[DAI_DIR_PLAYBACK] == COMP_STATE_ACTIVE || @@ -444,7 +445,7 @@ static int ssp_set_config(struct dai *dai, struct ipc_config_dai *common_config, dai_info(dai, "ssp_set_config(), done"); out: - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); return ret; } @@ -485,8 +486,9 @@ static int ssp_get_hw_params(struct dai *dai, static void ssp_start(struct dai *dai, int direction) { struct ssp_pdata *ssp = dai_get_drvdata(dai); + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* enable port */ ssp_update_bits(dai, SSCR0, SSCR0_SSE, SSCR0_SSE); @@ -500,15 +502,16 @@ static void ssp_start(struct dai *dai, int direction) else ssp_update_bits(dai, SSCR1, SSCR1_RSRE, SSCR1_RSRE); - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } /* stop the SSP for either playback or capture */ static void ssp_stop(struct dai *dai, int direction) { struct ssp_pdata *ssp = dai_get_drvdata(dai); + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* stop Rx if neeed */ if (direction == DAI_DIR_CAPTURE && @@ -534,7 +537,7 @@ static void ssp_stop(struct dai *dai, int direction) dai_info(dai, "ssp_stop(), SSP port disabled"); } - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } static void ssp_pause(struct dai *dai, int direction) diff --git a/src/drivers/intel/cavs/interrupt.c b/src/drivers/intel/cavs/interrupt.c index 07216344e5ce..75716945e40f 100644 --- a/src/drivers/intel/cavs/interrupt.c +++ b/src/drivers/intel/cavs/interrupt.c @@ -62,6 +62,7 @@ static inline void irq_lvl2_handler(void *data, int level, uint32_t ilxsd, struct list_item *clist; uint32_t status; uint32_t tries = LVL2_MAX_TRIES; + k_spinlock_key_t key; /* read active interrupt status */ status = irq_read(ilxsd); @@ -75,7 +76,7 @@ static inline void irq_lvl2_handler(void *data, int level, uint32_t ilxsd, status &= ~(1 << bit); - spin_lock(&cascade->lock); + key = k_spin_lock(&cascade->lock); /* get child if any and run handler */ list_for_item(clist, &cascade->child[bit].list) { @@ -83,16 +84,16 @@ static inline void irq_lvl2_handler(void *data, int level, uint32_t ilxsd, if (child->handler && (child->cpu_mask & 1 << core)) { /* run handler in non atomic context */ - spin_unlock(&cascade->lock); + k_spin_unlock(&cascade->lock, key); child->handler(child->handler_arg); - spin_lock(&cascade->lock); + k_spin_lock(&cascade->lock); handled = true; } } - spin_unlock(&cascade->lock); + k_spin_unlock(&cascade->lock, key); if (!handled) { /* nobody cared ? */ diff --git a/src/drivers/intel/cavs/ipc.c b/src/drivers/intel/cavs/ipc.c index 001e7d13cc01..f3f2a8357acf 100644 --- a/src/drivers/intel/cavs/ipc.c +++ b/src/drivers/intel/cavs/ipc.c @@ -58,9 +58,9 @@ static void ipc_irq_handler(void *arg) { struct ipc *ipc = arg; uint32_t dipcctl; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock_irq(&ipc->lock); #if CAVS_VERSION == CAVS_VERSION_1_5 uint32_t dipct; @@ -128,7 +128,7 @@ static void ipc_irq_handler(void *arg) ipc_read(IPC_DIPCCTL) | IPC_DIPCCTL_IPCIDIE); } - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock_irq(&ipc->lock, key); } #if CAVS_VERSION >= CAVS_VERSION_1_8 diff --git a/src/drivers/intel/dmic/dmic.c b/src/drivers/intel/dmic/dmic.c index 5ab56a6f3d1e..b1e3726978bb 100644 --- a/src/drivers/intel/dmic/dmic.c +++ b/src/drivers/intel/dmic/dmic.c @@ -128,7 +128,7 @@ static enum task_state dmic_work(void *data) } - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, 0); return gval ? SOF_TASK_STATE_RESCHEDULE : SOF_TASK_STATE_COMPLETED; } @@ -155,6 +155,7 @@ static int dmic_set_config(struct dai *dai, struct ipc_config_dai *common_config int32_t step_db; int ret = 0; int di = dai->index; + k_spinlock_key_t key; #if CONFIG_INTEL_DMIC_TPLG_PARAMS struct sof_ipc_dai_config *config = spec_config; int i; @@ -174,7 +175,7 @@ static int dmic_set_config(struct dai *dai, struct ipc_config_dai *common_config } assert(dmic); - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); #if CONFIG_INTEL_DMIC_TPLG_PARAMS /* @@ -255,7 +256,7 @@ static int dmic_set_config(struct dai *dai, struct ipc_config_dai *common_config dmic->state = COMP_STATE_PREPARE; out: - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); return ret; } @@ -263,6 +264,7 @@ static int dmic_set_config(struct dai *dai, struct ipc_config_dai *common_config static void dmic_start(struct dai *dai) { struct dmic_pdata *dmic = dai_get_drvdata(dai); + k_spinlock_key_t key; int i; int mic_a; int mic_b; @@ -270,7 +272,7 @@ static void dmic_start(struct dai *dai) int fir_b; /* enable port */ - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); dai_dbg(dai, "dmic_start()"); dmic->startcount = 0; @@ -367,7 +369,7 @@ static void dmic_start(struct dai *dai) dmic->global->pause_mask &= ~BIT(dai->index); dmic->state = COMP_STATE_ACTIVE; - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); /* Currently there's no DMIC HW internal mutings and wait times * applied into this start sequence. It can be implemented here if @@ -404,10 +406,11 @@ static void dmic_stop_fifo_packers(struct dai *dai, int fifo_index) static void dmic_stop(struct dai *dai, bool stop_is_pause) { struct dmic_pdata *dmic = dai_get_drvdata(dai); + k_spinlock_key_t key; int i; dai_dbg(dai, "dmic_stop()"); - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); dmic_stop_fifo_packers(dai, dai->index); @@ -449,7 +452,7 @@ static void dmic_stop(struct dai *dai, bool stop_is_pause) } schedule_task_cancel(&dmic->dmicwork); - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } static int dmic_trigger(struct dai *dai, int cmd, int direction) diff --git a/src/drivers/intel/haswell/ssp.c b/src/drivers/intel/haswell/ssp.c index 8ba5d7b50c69..d15896962620 100644 --- a/src/drivers/intel/haswell/ssp.c +++ b/src/drivers/intel/haswell/ssp.c @@ -44,8 +44,9 @@ static int ssp_set_config(struct dai *dai, struct ipc_config_dai *common_config, uint32_t format; bool inverted_frame = false; int ret = 0; + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* is playback/capture already running */ if (ssp->state[DAI_DIR_PLAYBACK] == COMP_STATE_ACTIVE || @@ -356,7 +357,7 @@ static int ssp_set_config(struct dai *dai, struct ipc_config_dai *common_config, dai_info(dai, "ssp_set_config(), done"); out: - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); return ret; } @@ -397,8 +398,9 @@ static int ssp_get_hw_params(struct dai *dai, static void ssp_start(struct dai *dai, int direction) { struct ssp_pdata *ssp = dai_get_drvdata(dai); + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); dai_info(dai, "ssp_start()"); @@ -420,15 +422,16 @@ static void ssp_start(struct dai *dai, int direction) /* enable port */ ssp->state[direction] = COMP_STATE_ACTIVE; - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } /* stop the SSP for either playback or capture */ static void ssp_stop(struct dai *dai, int direction) { struct ssp_pdata *ssp = dai_get_drvdata(dai); + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* stop Rx if neeed */ if (direction == DAI_DIR_CAPTURE && @@ -457,7 +460,7 @@ static void ssp_stop(struct dai *dai, int direction) dai_info(dai, "ssp_stop(), SSP port disabled"); } - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } static void ssp_pause(struct dai *dai, int direction) diff --git a/src/drivers/intel/hda/hda-dma.c b/src/drivers/intel/hda/hda-dma.c index e569a515a1ec..c23e92b7919a 100644 --- a/src/drivers/intel/hda/hda-dma.c +++ b/src/drivers/intel/hda/hda-dma.c @@ -494,7 +494,7 @@ static int hda_dma_enable_unlock(struct dma_chan_data *channel) /* notify DMA to copy bytes */ static int hda_dma_link_copy(struct dma_chan_data *channel, int bytes, - uint32_t flags) + k_spinlock_key_t key) { return hda_dma_link_copy_ch(channel, bytes); } @@ -534,7 +534,7 @@ static int hda_dma_host_copy(struct dma_chan_data *channel, int bytes, static struct dma_chan_data *hda_dma_channel_get(struct dma *dma, unsigned int channel) { - uint32_t flags; + k_spinlock_key_t key; if (channel >= dma->plat_data.channels) { tr_err(&hdma_tr, "hda-dmac: %d invalid channel %d", @@ -542,7 +542,7 @@ static struct dma_chan_data *hda_dma_channel_get(struct dma *dma, return NULL; } - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock_irq(&dma->lock); tr_dbg(&hdma_tr, "hda-dmac: %d channel %d -> get", dma->plat_data.id, channel); @@ -553,12 +553,12 @@ static struct dma_chan_data *hda_dma_channel_get(struct dma *dma, atomic_add(&dma->num_channels_busy, 1); /* return channel */ - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); return &dma->chan[channel]; } /* DMAC has no free channels */ - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); tr_err(&hdma_tr, "hda-dmac: %d no free channel %d", dma->plat_data.id, channel); return NULL; @@ -583,11 +583,11 @@ static void hda_dma_channel_put_unlocked(struct dma_chan_data *channel) static void hda_dma_channel_put(struct dma_chan_data *channel) { struct dma *dma = channel->dma; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock_irq(&dma->lock); hda_dma_channel_put_unlocked(channel); - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); atomic_sub(&dma->num_channels_busy, 1); } diff --git a/src/drivers/intel/ssp/mn.c b/src/drivers/intel/ssp/mn.c index d230993b0f26..97a1a8ed66fc 100644 --- a/src/drivers/intel/ssp/mn.c +++ b/src/drivers/intel/ssp/mn.c @@ -237,6 +237,7 @@ static inline int set_mclk_divider(uint16_t mclk_id, uint32_t mdivr_val) int mn_set_mclk(uint16_t mclk_id, uint32_t mclk_rate) { struct mn *mn = mn_get(); + k_spinlock_key_t key; int ret = 0; if (mclk_id >= DAI_NUM_SSP_MCLK) { @@ -244,7 +245,7 @@ int mn_set_mclk(uint16_t mclk_id, uint32_t mclk_rate) return -EINVAL; } - spin_lock(&mn->lock); + key = k_spin_lock(&mn->lock); if (is_mclk_source_in_use()) ret = check_current_mclk_source(mclk_id, mclk_rate); @@ -265,7 +266,7 @@ int mn_set_mclk(uint16_t mclk_id, uint32_t mclk_rate) out: - spin_unlock(&mn->lock); + k_spin_unlock(&mn->lock, key); return ret; } @@ -282,8 +283,9 @@ void mn_release_mclk(uint32_t mclk_id) { struct mn *mn = mn_get(); uint32_t mdivc; + k_spinlock_key_t key; - spin_lock(&mn->lock); + key = k_spin_lock(&mn->lock); mn->mclk_sources_ref[mclk_id]--; @@ -306,7 +308,7 @@ void mn_release_mclk(uint32_t mclk_id) mn->mclk_source_clock = 0; } - spin_unlock(&mn->lock); + k_spin_unlock(&mn->lock, key); } #if CONFIG_INTEL_MN @@ -596,8 +598,9 @@ int mn_set_bclk(uint32_t dai_index, uint32_t bclk_rate, uint32_t n = 1; int ret = 0; bool mn_in_use; + k_spinlock_key_t key; - spin_lock(&mn->lock); + key = k_spin_lock(&mn->lock); mn->bclk_sources[dai_index] = MN_BCLK_SOURCE_NONE; @@ -630,7 +633,7 @@ int mn_set_bclk(uint32_t dai_index, uint32_t bclk_rate, out: - spin_unlock(&mn->lock); + k_spin_unlock(&mn->lock, key); return ret; } @@ -639,25 +642,27 @@ void mn_release_bclk(uint32_t dai_index) { struct mn *mn = mn_get(); bool mn_in_use; + k_spinlock_key_t key; - spin_lock(&mn->lock); + key = k_spin_lock(&mn->lock); mn->bclk_sources[dai_index] = MN_BCLK_SOURCE_NONE; mn_in_use = is_bclk_source_in_use(MN_BCLK_SOURCE_MN); /* release the M/N clock source if not used */ if (!mn_in_use) reset_bclk_mn_source(); - spin_unlock(&mn->lock); + k_spin_unlock(&mn->lock, key); } void mn_reset_bclk_divider(uint32_t dai_index) { struct mn *mn = mn_get(); + k_spinlock_key_t key; - spin_lock(&mn->lock); + key = k_spin_lock(&mn->lock); mn_reg_write(MN_MDIV_M_VAL(dai_index), dai_index, 1); mn_reg_write(MN_MDIV_N_VAL(dai_index), dai_index, 1); - spin_unlock(&mn->lock); + k_spin_unlock(&mn->lock, key); } #endif diff --git a/src/drivers/intel/ssp/ssp.c b/src/drivers/intel/ssp/ssp.c index 90dd9d505a0b..3066611f22f3 100644 --- a/src/drivers/intel/ssp/ssp.c +++ b/src/drivers/intel/ssp/ssp.c @@ -232,6 +232,7 @@ static int ssp_set_config_tplg(struct dai *dai, struct ipc_config_dai *common_co uint32_t active_tx_slots = 2; uint32_t active_rx_slots = 2; uint32_t sample_width = 2; + k_spinlock_key_t key; bool inverted_bclk = false; bool inverted_frame = false; @@ -240,7 +241,7 @@ static int ssp_set_config_tplg(struct dai *dai, struct ipc_config_dai *common_co int ret = 0; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* ignore config if SSP is already configured */ if (ssp->state[DAI_DIR_PLAYBACK] > COMP_STATE_READY || @@ -765,7 +766,7 @@ static int ssp_set_config_tplg(struct dai *dai, struct ipc_config_dai *common_co } out: - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); return ret; } @@ -934,8 +935,9 @@ static int ssp_get_hw_params(struct dai *dai, static void ssp_early_start(struct dai *dai, int direction) { struct ssp_pdata *ssp = dai_get_drvdata(dai); + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* request mclk/bclk */ ssp_pre_start(dai); @@ -952,15 +954,16 @@ static void ssp_early_start(struct dai *dai, int direction) } - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } /* start the SSP for either playback or capture */ static void ssp_start(struct dai *dai, int direction) { struct ssp_pdata *ssp = dai_get_drvdata(dai); + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); dai_info(dai, "ssp_start()"); @@ -986,15 +989,16 @@ static void ssp_start(struct dai *dai, int direction) break; } - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } /* stop the SSP for either playback or capture */ static void ssp_stop(struct dai *dai, int direction) { struct ssp_pdata *ssp = dai_get_drvdata(dai); + k_spinlock_key_t key; - spin_lock(&dai->lock); + key = k_spin_lock(&dai->lock); /* * Wait to get valid fifo status in clock consumer mode. TODO it's @@ -1044,7 +1048,7 @@ static void ssp_stop(struct dai *dai, int direction) ssp_post_stop(dai); - spin_unlock(&dai->lock); + k_spin_unlock(&dai->lock, key); } static void ssp_pause(struct dai *dai, int direction) diff --git a/src/drivers/interrupt.c b/src/drivers/interrupt.c index 60c7d7b103df..588ec5d97ca3 100644 --- a/src/drivers/interrupt.c +++ b/src/drivers/interrupt.c @@ -53,14 +53,14 @@ int interrupt_cascade_register(const struct irq_cascade_tmpl *tmpl) { struct cascade_root *root = cascade_root_get(); struct irq_cascade_desc **cascade; - unsigned long flags; + k_spinlock_key_t key; unsigned int i; int ret; if (!tmpl->name || !tmpl->ops) return -EINVAL; - spin_lock_irq(&root->lock, flags); + key = k_spin_lock_irq(&root->lock); for (cascade = &root->list; *cascade; cascade = &(*cascade)->next) { @@ -95,7 +95,7 @@ int interrupt_cascade_register(const struct irq_cascade_tmpl *tmpl) unlock: - spin_unlock_irq(&root->lock, flags); + k_spin_unlock_irq(&root->lock, key); return ret; } @@ -104,8 +104,8 @@ int interrupt_get_irq(unsigned int irq, const char *name) { struct cascade_root *root = cascade_root_get(); struct irq_cascade_desc *cascade; - unsigned long flags; int ret = -ENODEV; + k_spinlock_key_t key; if (!name || name[0] == '\0') return irq; @@ -117,7 +117,7 @@ int interrupt_get_irq(unsigned int irq, const char *name) return -EINVAL; } - spin_lock_irq(&root->lock, flags); + key = k_spin_lock_irq(&root->lock); for (cascade = root->list; cascade; cascade = cascade->next) { /* .name is non-volatile */ @@ -129,7 +129,7 @@ int interrupt_get_irq(unsigned int irq, const char *name) } - spin_unlock_irq(&root->lock, flags); + k_spin_unlock_irq(&root->lock, key); return ret; } @@ -138,12 +138,12 @@ struct irq_cascade_desc *interrupt_get_parent(uint32_t irq) { struct cascade_root *root = cascade_root_get(); struct irq_cascade_desc *cascade, *c = NULL; - unsigned long flags; + k_spinlock_key_t key; if (irq < PLATFORM_IRQ_HW_NUM) return NULL; - spin_lock_irq(&root->lock, flags); + key = k_spin_lock_irq(&root->lock); for (cascade = root->list; cascade; cascade = cascade->next) { if (irq >= cascade->irq_base && @@ -155,7 +155,7 @@ struct irq_cascade_desc *interrupt_get_parent(uint32_t irq) } - spin_unlock_irq(&root->lock, flags); + k_spin_unlock_irq(&root->lock, key); return c; } @@ -277,14 +277,14 @@ static uint32_t irq_enable_child(struct irq_cascade_desc *cascade, int irq, struct irq_child *child; unsigned int child_idx; struct list_item *list; - unsigned long flags; + k_spinlock_key_t key; /* * Locking is child to parent: when called recursively we are already * holding the child's lock and then also taking the parent's lock. The * same holds for the interrupt_(un)register() paths. */ - spin_lock_irq(&cascade->lock, flags); + key = k_spin_lock_irq(&cascade->lock); child = cascade->child + hw_irq; child_idx = cascade->global_mask ? 0 : core; @@ -311,7 +311,7 @@ static uint32_t irq_enable_child(struct irq_cascade_desc *cascade, int irq, } - spin_unlock_irq(&cascade->lock, flags); + k_spin_unlock_irq(&cascade->lock, key); return 0; } @@ -324,9 +324,9 @@ static uint32_t irq_disable_child(struct irq_cascade_desc *cascade, int irq, struct irq_child *child; unsigned int child_idx; struct list_item *list; - unsigned long flags; + k_spinlock_key_t key; - spin_lock_irq(&cascade->lock, flags); + key = k_spin_lock_irq(&cascade->lock); child = cascade->child + hw_irq; child_idx = cascade->global_mask ? 0 : core; @@ -356,7 +356,7 @@ static uint32_t irq_disable_child(struct irq_cascade_desc *cascade, int irq, } - spin_unlock_irq(&cascade->lock, flags); + k_spin_unlock_irq(&cascade->lock, key); return 0; } @@ -370,8 +370,7 @@ static int interrupt_register_internal(uint32_t irq, void (*handler)(void *arg), void *arg, struct irq_desc *desc) { struct irq_cascade_desc *cascade; - /* Avoid a bogus compiler warning */ - unsigned long flags = 0; + k_spinlock_key_t key; int ret; /* no parent means we are registering DSP internal IRQ */ @@ -389,9 +388,9 @@ static int interrupt_register_internal(uint32_t irq, void (*handler)(void *arg), #endif } - spin_lock_irq(&cascade->lock, flags); + key = k_spin_lock_irq(&cascade->lock); ret = irq_register_child(cascade, irq, handler, arg, desc); - spin_unlock_irq(&cascade->lock, flags); + k_spin_unlock_irq(&cascade->lock, key); return ret; } @@ -405,8 +404,7 @@ static void interrupt_unregister_internal(uint32_t irq, const void *arg, struct irq_desc *desc) { struct irq_cascade_desc *cascade; - /* Avoid a bogus compiler warning */ - unsigned long flags = 0; + k_spinlock_key_t key; /* no parent means we are unregistering DSP internal IRQ */ cascade = interrupt_get_parent(irq); @@ -424,9 +422,9 @@ static void interrupt_unregister_internal(uint32_t irq, const void *arg, return; } - spin_lock_irq(&cascade->lock, flags); + key = k_spin_lock_irq(&cascade->lock); irq_unregister_child(cascade, irq, arg, desc); - spin_unlock_irq(&cascade->lock, flags); + k_spin_unlock_irq(&cascade->lock, key); } uint32_t interrupt_enable(uint32_t irq, void *arg) diff --git a/src/drivers/mediatek/mt8195/afe-memif.c b/src/drivers/mediatek/mt8195/afe-memif.c index 3058d52987df..16e60f804fb4 100644 --- a/src/drivers/mediatek/mt8195/afe-memif.c +++ b/src/drivers/mediatek/mt8195/afe-memif.c @@ -144,28 +144,28 @@ struct afe_memif_dma { /* acquire the specific DMA channel */ static struct dma_chan_data *memif_channel_get(struct dma *dma, unsigned int req_chan) { - uint32_t flags; + k_spinlock_key_t key; struct dma_chan_data *channel; tr_dbg(&memif_tr, "MEMIF: channel_get(%d)", req_chan); - spin_lock_irq(&dma->lock, flags); + key = k_spin_lock_irq(&dma->lock); if (req_chan >= dma->plat_data.channels) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); tr_err(&memif_tr, "MEMIF: Channel %d out of range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); tr_err(&memif_tr, "MEMIF: Cannot reuse channel %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); channel->status = COMP_STATE_READY; - spin_unlock_irq(&dma->lock, flags); + k_spin_unlock_irq(&dma->lock, key); return channel; } @@ -173,7 +173,7 @@ static struct dma_chan_data *memif_channel_get(struct dma *dma, unsigned int req /* channel must not be running when this is called */ static void memif_channel_put(struct dma_chan_data *channel) { - uint32_t flags; + k_spinlock_key_t key; /* Assuming channel is stopped, we thus don't need hardware to * do anything right now @@ -182,10 +182,10 @@ static void memif_channel_put(struct dma_chan_data *channel) notifier_unregister_all(NULL, channel); - spin_lock_irq(&channel->dma->lock, flags); + key = k_spin_lock_irq(&channel->dma->lock); channel->status = COMP_STATE_INIT; atomic_sub(&channel->dma->num_channels_busy, 1); - spin_unlock_irq(&channel->dma->lock, flags); + k_spin_unlock_irq(&channel->dma->lock, key); } #if TEST_SGEN diff --git a/src/drivers/mediatek/mt8195/interrupt.c b/src/drivers/mediatek/mt8195/interrupt.c index 6b49387fa6cc..02ce1684a1ff 100644 --- a/src/drivers/mediatek/mt8195/interrupt.c +++ b/src/drivers/mediatek/mt8195/interrupt.c @@ -121,6 +121,7 @@ static inline void mtk_handle_irq(struct irq_cascade_desc *cascade, int core = cpu_get_id(); struct list_item *clist; struct irq_desc *child = NULL; + k_spinlock_key_t key; int bit; bool handled; @@ -129,7 +130,7 @@ static inline void mtk_handle_irq(struct irq_cascade_desc *cascade, handled = false; status &= ~(1ull << bit); - spin_lock(&cascade->lock); + key = k_spin_lock(&cascade->lock); list_for_item(clist, &cascade->child[bit].list) { child = container_of(clist, struct irq_desc, irq_list); @@ -140,7 +141,7 @@ static inline void mtk_handle_irq(struct irq_cascade_desc *cascade, } } - spin_unlock(&cascade->lock); + k_spin_unlock(&cascade->lock, key); if (!handled) { tr_err(&int_tr, "irq_handler(): not handled, bit %d", bit); diff --git a/src/idc/idc.c b/src/idc/idc.c index 141db1aeede7..33c0b0486337 100644 --- a/src/idc/idc.c +++ b/src/idc/idc.c @@ -323,15 +323,15 @@ static void idc_complete(void *data) struct ipc *ipc = ipc_get(); struct idc *idc = data; uint32_t type = iTS(idc->received_msg.header); - uint32_t flags; + k_spinlock_key_t key; switch (type) { case iTS(IDC_MSG_IPC): /* Signal the host */ - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock_irq(&ipc->lock); ipc->task_mask &= ~IPC_TASK_SECONDARY_CORE; ipc_complete_cmd(ipc); - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock_irq(&ipc->lock, key); } } #endif diff --git a/src/include/sof/coherent.h b/src/include/sof/coherent.h index a3fa493a2148..55e6278a2e16 100644 --- a/src/include/sof/coherent.h +++ b/src/include/sof/coherent.h @@ -29,7 +29,7 @@ */ struct coherent { spinlock_t lock; /* locking mechanism */ - uint32_t flags; /* lock flags */ + k_spinlock_key_t key; /* lock flags */ uint16_t shared; /* shared on other non coherent cores */ uint16_t core; /* owner core if not shared */ struct list_item list; /* coherent list iteration */ @@ -77,7 +77,7 @@ __must_check static inline struct coherent *coherent_acquire(struct coherent *c, if (c->shared) { CHECK_COHERENT_CORE(c); - spin_lock(&c->lock); + k_spin_lock(&c->lock); /* invalidate local copy */ dcache_invalidate_region(uncache_to_cache(c), size); @@ -103,7 +103,7 @@ static inline struct coherent *coherent_release(struct coherent *c, const size_t dcache_writeback_invalidate_region(c, size); /* unlock on uncache alias */ - spin_unlock(&(cache_to_uncache(c))->lock); + k_spin_unlock(&(cache_to_uncache(c))->lock); } return cache_to_uncache(c); @@ -119,7 +119,7 @@ __must_check static inline struct coherent *coherent_acquire_irq(struct coherent if (c->shared) { CHECK_COHERENT_CORE(c); - spin_lock_irq(&c->lock, c->flags); + k_spin_lock_irq(&c->lock, c->flags); /* invalidate local copy */ dcache_invalidate_region(uncache_to_cache(c), size); @@ -142,8 +142,8 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si dcache_writeback_invalidate_region(c, size); /* unlock on uncache alias */ - spin_unlock_irq(&(cache_to_uncache(c))->lock, - (cache_to_uncache(c))->flags); + k_spin_unlock_irq(&(cache_to_uncache(c))->lock, + (cache_to_uncache(c))->flags); } return cache_to_uncache(c); @@ -175,10 +175,10 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si do { \ /* assert if someone passes a cache/local address in here. */ \ ADDR_IS_COHERENT(object); \ - spin_lock(&(object)->member.lock); \ + k_spin_lock(&(object)->member.lock); \ (object)->member.shared = true; \ dcache_writeback_invalidate_region(object, sizeof(*object)); \ - spin_unlock(&(object)->member.lock); \ + k_spin_unlock(&(object)->member.lock); \ } while (0) /* set the object to shared mode with coherency managed by SW */ @@ -186,10 +186,10 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si do { \ /* assert if someone passes a cache/local address in here. */ \ ADDR_IS_COHERENT(object); \ - spin_lock_irq(&(object)->member.lock, &(object)->member.flags); \ + k_spin_lock_irq(&(object)->member.lock, &(object)->member.flags); \ (object)->member.shared = true; \ dcache_writeback_invalidate_region(object, sizeof(*object)); \ - spin_unlock_irq(&(object)->member.lock, &(object)->member.flags); \ + k_spin_unlock_irq(&(object)->member.lock, &(object)->member.flags); \ } while (0) #else @@ -199,7 +199,7 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si __must_check static inline struct coherent *coherent_acquire(struct coherent *c, const size_t size) { if (c->shared) { - spin_lock(&c->lock); + c->key = k_spin_lock(&c->lock); /* invalidate local copy */ dcache_invalidate_region(uncache_to_cache(c), size); @@ -214,7 +214,7 @@ static inline struct coherent *coherent_release(struct coherent *c, const size_t /* wtb and inv local data to coherent object */ dcache_writeback_invalidate_region(uncache_to_cache(c), size); - spin_unlock(&c->lock); + k_spin_unlock(&c->lock, c->key); } return c; @@ -224,7 +224,7 @@ __must_check static inline struct coherent *coherent_acquire_irq(struct coherent const size_t size) { if (c->shared) { - spin_lock_irq(&c->lock, c->flags); + c->key = k_spin_lock(&c->lock); /* invalidate local copy */ dcache_invalidate_region(uncache_to_cache(c), size); @@ -239,7 +239,7 @@ static inline struct coherent *coherent_release_irq(struct coherent *c, const si /* wtb and inv local data to coherent object */ dcache_writeback_invalidate_region(uncache_to_cache(c), size); - spin_unlock_irq(&c->lock, c->flags); + k_spin_unlock(&c->lock, c->key); } return c; diff --git a/src/include/sof/ipc/msg.h b/src/include/sof/ipc/msg.h index 4c5cea20e586..07c6fa967c01 100644 --- a/src/include/sof/ipc/msg.h +++ b/src/include/sof/ipc/msg.h @@ -72,15 +72,15 @@ static inline void ipc_msg_free(struct ipc_msg *msg) return; struct ipc *ipc = ipc_get(); - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock_irq(&ipc->lock); list_item_del(&msg->list); rfree(msg->tx_data); rfree(msg); - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock_irq(&ipc->lock, key); } /** diff --git a/src/include/sof/spinlock.h b/src/include/sof/spinlock.h index 83549be342fa..00a9bece716f 100644 --- a/src/include/sof/spinlock.h +++ b/src/include/sof/spinlock.h @@ -19,6 +19,8 @@ #include +typedef uint32_t k_spinlock_key_t; + /* * Lock debugging provides a simple interface to debug deadlocks. The rmbox * trace output will show an output :- @@ -38,7 +40,7 @@ * src/drivers/dw-dma.c:840: spinlock_init(&dma->lock); * * grep -rn lock --include *.c | grep 439 - * src/lib/alloc.c:439: spin_lock_irq(&memmap.lock, flags); + * src/lib/alloc.c:439: k_spin_lock_irq(&memmap.lock, flags); * * Every lock entry and exit shows LcE and LcX in trace alongside the lock * line numbers in hex. e.g. @@ -160,7 +162,7 @@ static inline void _spinlock_init(spinlock_t *lock, int line) #define spinlock_init(lock) _spinlock_init(lock, __LINE__) /* does nothing on UP systems */ -static inline void _spin_lock(spinlock_t *lock, int line) +static inline k_spinlock_key_t _spin_lock(spinlock_t *lock, int line) { spin_lock_dbg(line); #if CONFIG_DEBUG_LOCKS @@ -171,16 +173,22 @@ static inline void _spin_lock(spinlock_t *lock, int line) #endif /* spinlock has to be in a shared memory */ + return 0; } -#define spin_lock(lock) _spin_lock(lock, __LINE__) +//#define spin_lock(lock) _spin_lock(lock, __LINE__) + +#define k_spin_lock(lock) _spin_lock(lock, __LINE__) /* disables all IRQ sources and takes lock - enter atomic context */ -uint32_t _spin_lock_irq(spinlock_t *lock); +k_spinlock_key_t _k_spin_lock_irq(spinlock_t *lock); + +//#define k_spin_lock_irq(lock) _k_spin_lock_irq(lock) -#define spin_lock_irq(lock, flags) (flags = _spin_lock_irq(lock)) +#define k_spin_lock_irq(lock) _k_spin_lock_irq(lock) -static inline void _spin_unlock(spinlock_t *lock, int line) +static inline void _spin_unlock(spinlock_t *lock, int line, + __attribute__((unused)) k_spinlock_key_t key) { arch_spin_unlock(lock); #if CONFIG_DEBUG_LOCKS @@ -190,11 +198,15 @@ static inline void _spin_unlock(spinlock_t *lock, int line) /* spinlock has to be in a shared memory */ } -#define spin_unlock(lock) _spin_unlock(lock, __LINE__) +//#define spin_unlock(lock) _spin_unlock(lock, __LINE__) + +#define k_spin_unlock(lock, key) _spin_unlock(lock, __LINE__, key) /* re-enables current IRQ sources and releases lock - leave atomic context */ -void _spin_unlock_irq(spinlock_t *lock, uint32_t flags, int line); +void _k_spin_unlock_irq(spinlock_t *lock, k_spinlock_key_t key, int line); + +//#define spin_unlock_irq(lock, flags) _k_spin_unlock_irq(lock, flags, __LINE__) -#define spin_unlock_irq(lock, flags) _spin_unlock_irq(lock, flags, __LINE__) +#define k_spin_unlock_irq(lock, key) _k_spin_unlock_irq(lock, key, __LINE__) #endif /* __SOF_SPINLOCK_H__ */ diff --git a/src/ipc/ipc-common.c b/src/ipc/ipc-common.c index 247c78ff8718..42fafbd93974 100644 --- a/src/ipc/ipc-common.c +++ b/src/ipc/ipc-common.c @@ -59,12 +59,12 @@ int ipc_process_on_core(uint32_t core, bool blocking) * will also reply to the host */ if (!blocking) { - uint32_t flags; + k_spinlock_key_t key; ipc->core = core; - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock_irq(&ipc->lock); ipc->task_mask |= IPC_TASK_SECONDARY_CORE; - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock_irq(&ipc->lock, key); } /* send IDC message */ @@ -176,9 +176,9 @@ void ipc_send_queued_msg(void) { struct ipc *ipc = ipc_get(); struct ipc_msg *msg; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock_irq(&ipc->lock); /* any messages to send ? */ if (list_is_empty(&ipc->msg_list)) @@ -190,16 +190,16 @@ void ipc_send_queued_msg(void) ipc_platform_send_msg(msg); out: - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock_irq(&ipc->lock, key); } void ipc_msg_send(struct ipc_msg *msg, void *data, bool high_priority) { struct ipc *ipc = ipc_get(); - uint32_t flags; + k_spinlock_key_t key; int ret; - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock_irq(&ipc->lock); /* copy mailbox data to message */ if (msg->tx_size > 0 && msg->tx_size < SOF_IPC_MSG_MAX_SIZE) { @@ -223,7 +223,7 @@ void ipc_msg_send(struct ipc_msg *msg, void *data, bool high_priority) } out: - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock_irq(&ipc->lock, key); } void ipc_schedule_process(struct ipc *ipc) @@ -271,12 +271,12 @@ void ipc_complete_cmd(struct ipc *ipc) static void ipc_complete_task(void *data) { struct ipc *ipc = data; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock_irq(&ipc->lock); ipc->task_mask &= ~IPC_TASK_INLINE; ipc_complete_cmd(ipc); - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock_irq(&ipc->lock, key); } static enum task_state ipc_do_cmd(void *data) diff --git a/src/ipc/ipc3/handler.c b/src/ipc/ipc3/handler.c index 36c9266f0885..ca29b0e5ec35 100644 --- a/src/ipc/ipc3/handler.c +++ b/src/ipc/ipc3/handler.c @@ -456,17 +456,17 @@ static int ipc_stream_trigger(uint32_t header) * synchronously. */ if (pipeline_is_timer_driven(pcm_dev->cd->pipeline)) { - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock_irq(&ipc->lock); ipc->task_mask |= IPC_TASK_IN_THREAD; - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock_irq(&ipc->lock, key); ret = pipeline_trigger(pcm_dev->cd->pipeline, pcm_dev->cd, cmd); if (ret <= 0) { - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock_irq(&ipc->lock); ipc->task_mask &= ~IPC_TASK_IN_THREAD; - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock_irq(&ipc->lock, key); } } else { ret = pipeline_trigger_run(pcm_dev->cd->pipeline, pcm_dev->cd, cmd); diff --git a/src/ipc/ipc3/helper.c b/src/ipc/ipc3/helper.c index 51a2b449805b..8823e9fde61b 100644 --- a/src/ipc/ipc3/helper.c +++ b/src/ipc/ipc3/helper.c @@ -73,6 +73,7 @@ static const struct comp_driver *get_drv(struct sof_ipc_comp *comp) const struct comp_driver *drv = NULL; struct comp_driver_info *info; struct sof_ipc_comp_ext *comp_ext; + k_spinlock_key_t key; /* do we have extended data ? */ if (!comp->ext_data_length) { @@ -114,7 +115,7 @@ static const struct comp_driver *get_drv(struct sof_ipc_comp *comp) } /* search driver list with UUID */ - spin_lock(&drivers->lock); + key = k_spin_lock(&drivers->lock); list_for_item(clist, &drivers->list) { info = container_of(clist, struct comp_driver_info, list); @@ -138,7 +139,7 @@ static const struct comp_driver *get_drv(struct sof_ipc_comp *comp) tr_dbg(&comp_tr, "get_drv(), found driver type %d, uuid %pU", drv->type, drv->tctx->uuid_p); - spin_unlock(&drivers->lock); + k_spin_unlock(&drivers->lock, key); return drv; } @@ -647,12 +648,12 @@ int ipc_comp_new(struct ipc *ipc, ipc_comp *_comp) void ipc_msg_reply(struct sof_ipc_reply *reply) { struct ipc *ipc = ipc_get(); - uint32_t flags; + k_spinlock_key_t key; mailbox_hostbox_write(0, reply, reply->hdr.size); - spin_lock_irq(&ipc->lock, flags); + key = k_spin_lock_irq(&ipc->lock); ipc->task_mask &= ~IPC_TASK_IN_THREAD; ipc_complete_cmd(ipc); - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock_irq(&ipc->lock, key); } diff --git a/src/lib/alloc.c b/src/lib/alloc.c index ccaff0a227a7..0812dd0b2ba2 100644 --- a/src/lib/alloc.c +++ b/src/lib/alloc.c @@ -751,14 +751,14 @@ static void *_malloc_unlocked(enum mem_zone zone, uint32_t flags, uint32_t caps, void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes) { struct mm *memmap = memmap_get(); - uint32_t lock_flags; + k_spinlock_key_t key; void *ptr = NULL; - spin_lock_irq(&memmap->lock, lock_flags); + key = k_spin_lock_irq(&memmap->lock); ptr = _malloc_unlocked(zone, flags, caps, bytes); - spin_unlock_irq(&memmap->lock, lock_flags); + k_spin_unlock_irq(&memmap->lock, key); DEBUG_TRACE_PTR(ptr, bytes, zone, caps, flags); return ptr; @@ -779,16 +779,16 @@ void *rzalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes) void *rzalloc_core_sys(int core, size_t bytes) { struct mm *memmap = memmap_get(); - uint32_t flags; + k_spinlock_key_t key; void *ptr = NULL; - spin_lock_irq(&memmap->lock, flags); + key = k_spin_lock_irq(&memmap->lock); ptr = rmalloc_sys(memmap->system + core, 0, 0, bytes); if (ptr) bzero(ptr, bytes); - spin_unlock_irq(&memmap->lock, flags); + k_spin_unlock_irq(&memmap->lock, key); return ptr; } @@ -944,13 +944,13 @@ void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes, { struct mm *memmap = memmap_get(); void *ptr = NULL; - uint32_t lock_flags; + k_spinlock_key_t key; - spin_lock_irq(&memmap->lock, lock_flags); + key = k_spin_lock_irq(&memmap->lock); ptr = _balloc_unlocked(flags, caps, bytes, alignment); - spin_unlock_irq(&memmap->lock, lock_flags); + k_spin_unlock_irq(&memmap->lock, key); DEBUG_TRACE_PTR(ptr, bytes, SOF_MEM_ZONE_BUFFER, caps, flags); return ptr; @@ -995,11 +995,11 @@ static void _rfree_unlocked(void *ptr) void rfree(void *ptr) { struct mm *memmap = memmap_get(); - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&memmap->lock, flags); + key = k_spin_lock_irq(&memmap->lock); _rfree_unlocked(ptr); - spin_unlock_irq(&memmap->lock, flags); + k_spin_unlock_irq(&memmap->lock, key); } void *rbrealloc_align(void *ptr, uint32_t flags, uint32_t caps, size_t bytes, @@ -1007,13 +1007,13 @@ void *rbrealloc_align(void *ptr, uint32_t flags, uint32_t caps, size_t bytes, { struct mm *memmap = memmap_get(); void *new_ptr = NULL; - uint32_t lock_flags; + k_spinlock_key_t key; size_t copy_bytes = MIN(bytes, old_bytes); if (!bytes) return new_ptr; - spin_lock_irq(&memmap->lock, lock_flags); + key = k_spin_lock_irq(&memmap->lock); new_ptr = _balloc_unlocked(flags, caps, bytes, alignment); @@ -1023,7 +1023,7 @@ void *rbrealloc_align(void *ptr, uint32_t flags, uint32_t caps, size_t bytes, if (new_ptr) _rfree_unlocked(ptr); - spin_unlock_irq(&memmap->lock, lock_flags); + k_spin_unlock_irq(&memmap->lock, key); DEBUG_TRACE_PTR(ptr, bytes, SOF_MEM_ZONE_BUFFER, caps, flags); return new_ptr; @@ -1094,6 +1094,7 @@ int heap_info(enum mem_zone zone, int index, struct mm_info *out) { struct mm *memmap = memmap_get(); struct mm_heap *heap; + k_spinlock_key_t key; if (!out) goto error; @@ -1135,9 +1136,9 @@ int heap_info(enum mem_zone zone, int index, struct mm_info *out) goto error; } - spin_lock(&memmap->lock); + key = k_spin_lock(&memmap->lock); *out = heap->info; - spin_unlock(&memmap->lock); + k_spin_unlock(&memmap->lock, key); return 0; error: tr_err(&mem_tr, "heap_info(): failed for zone 0x%x index %d out ptr 0x%x", zone, index, diff --git a/src/lib/clk.c b/src/lib/clk.c index e13e10e8541c..d81e178a2bb6 100644 --- a/src/lib/clk.c +++ b/src/lib/clk.c @@ -53,7 +53,7 @@ void clock_set_freq(int clock, uint32_t hz) { struct clock_info *clk_info = clocks_get() + clock; uint32_t idx; - uint32_t flags; + k_spinlock_key_t key; clk_notify_data.old_freq = clk_info->freqs[clk_info->current_freq_idx].freq; @@ -61,7 +61,7 @@ void clock_set_freq(int clock, uint32_t hz) clk_info->freqs[clk_info->current_freq_idx].ticks_per_msec; /* atomic context for changing clocks */ - spin_lock_irq(&clk_info->lock, flags); + key = k_spin_lock_irq(&clk_info->lock); /* get nearest frequency that is >= requested Hz */ idx = clock_get_nearest_freq_idx(clk_info->freqs, clk_info->freqs_num, @@ -88,7 +88,7 @@ void clock_set_freq(int clock, uint32_t hz) clk_info->notification_mask, &clk_notify_data, sizeof(clk_notify_data)); - spin_unlock_irq(&clk_info->lock, flags); + k_spin_unlock_irq(&clk_info->lock, key); } void clock_low_power_mode(int clock, bool enable) diff --git a/src/lib/dai.c b/src/lib/dai.c index c98e3fd012f3..3112045cab18 100644 --- a/src/lib/dai.c +++ b/src/lib/dai.c @@ -139,7 +139,7 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) int ret = 0; const struct dai_type_info *dti; struct dai *d; - uint32_t flags_irq; + k_spinlock_key_t key; dti = dai_find_type(type); if (!dti) @@ -150,7 +150,7 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) continue; } /* device created? */ - spin_lock_irq(&d->lock, flags_irq); + key = k_spin_lock_irq(&d->lock); if (d->sref == 0) { if (flags & DAI_CREAT) ret = dai_probe(d); @@ -163,7 +163,7 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) tr_info(&dai_tr, "dai_get type %d index %d new sref %d", type, index, d->sref); - spin_unlock_irq(&d->lock, flags_irq); + k_spin_unlock_irq(&d->lock, key); return !ret ? d : NULL; } @@ -174,9 +174,9 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) void dai_put(struct dai *dai) { int ret; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&dai->lock, flags); + key = k_spin_lock_irq(&dai->lock); if (--dai->sref == 0) { ret = dai_remove(dai); if (ret < 0) { @@ -186,5 +186,5 @@ void dai_put(struct dai *dai) } tr_info(&dai_tr, "dai_put type %d index %d new sref %d", dai->drv->type, dai->index, dai->sref); - spin_unlock_irq(&dai->lock, flags); + k_spin_unlock_irq(&dai->lock, key); } diff --git a/src/lib/dma.c b/src/lib/dma.c index f2fd898fedb5..9bbc5e1a6255 100644 --- a/src/lib/dma.c +++ b/src/lib/dma.c @@ -32,7 +32,7 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) int users, ret; int min_users = INT32_MAX; struct dma *d = NULL, *dmin = NULL; - unsigned int flags_irq; + k_spinlock_key_t key; if (!info->num_dmas) { tr_err(&dma_tr, "dma_get(): No DMACs installed"); @@ -103,7 +103,7 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) * may be requested many times, let the probe() * do on-first-use initialization. */ - spin_lock_irq(&dmin->lock, flags_irq); + key = k_spin_lock_irq(&dmin->lock); ret = 0; if (!dmin->sref) { @@ -120,16 +120,16 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) dmin->plat_data.id, dmin->sref, atomic_read(&dmin->num_channels_busy)); - spin_unlock_irq(&dmin->lock, flags_irq); + k_spin_unlock_irq(&dmin->lock, key); return !ret ? dmin : NULL; } void dma_put(struct dma *dma) { - unsigned int flags_irq; + k_spinlock_key_t key; int ret; - spin_lock_irq(&dma->lock, flags_irq); + key = k_spin_lock_irq(&dma->lock); if (--dma->sref == 0) { ret = dma_remove(dma); if (ret < 0) { @@ -139,7 +139,7 @@ void dma_put(struct dma *dma) } tr_info(&dma_tr, "dma_put(), dma = %p, sref = %d", dma, dma->sref); - spin_unlock_irq(&dma->lock, flags_irq); + k_spin_unlock_irq(&dma->lock, key); } int dma_sg_alloc(struct dma_sg_elem_array *elem_array, diff --git a/src/lib/notifier.c b/src/lib/notifier.c index e8182115aeef..9fb835e14fcf 100644 --- a/src/lib/notifier.c +++ b/src/lib/notifier.c @@ -41,11 +41,12 @@ int notifier_register(void *receiver, void *caller, enum notify_id type, { struct notify *notify = *arch_notify_get(); struct callback_handle *handle; + k_spinlock_key_t key; int ret = 0; assert(type >= NOTIFIER_ID_CPU_FREQ && type < NOTIFIER_ID_COUNT); - spin_lock(¬ify->lock); + key = k_spin_lock(¬ify->lock); /* Find already registered event of this type */ if (flags & NOTIFIER_FLAG_AGGREGATE && @@ -74,7 +75,7 @@ int notifier_register(void *receiver, void *caller, enum notify_id type, list_item_prepend(&handle->list, ¬ify->list[type]); out: - spin_unlock(¬ify->lock); + k_spin_unlock(¬ify->lock, key); return ret; } @@ -84,10 +85,11 @@ void notifier_unregister(void *receiver, void *caller, enum notify_id type) struct list_item *wlist; struct list_item *tlist; struct callback_handle *handle; + k_spinlock_key_t key; assert(type >= NOTIFIER_ID_CPU_FREQ && type < NOTIFIER_ID_COUNT); - spin_lock(¬ify->lock); + key = k_spin_lock(¬ify->lock); /* * Unregister all matching callbacks @@ -110,7 +112,7 @@ void notifier_unregister(void *receiver, void *caller, enum notify_id type) } } - spin_unlock(¬ify->lock); + k_spin_unlock(¬ify->lock, key); } void notifier_unregister_all(void *receiver, void *caller) diff --git a/src/platform/intel/cavs/lib/clk.c b/src/platform/intel/cavs/lib/clk.c index 4a2b4888af1d..c871c4855d2f 100644 --- a/src/platform/intel/cavs/lib/clk.c +++ b/src/platform/intel/cavs/lib/clk.c @@ -84,7 +84,7 @@ static inline void select_cpu_clock(int freq_idx, bool release_unused) /* lock clock for all cores */ for (i = 0; i < CONFIG_CORE_COUNT; i++) - spin_lock_irq(&clk_info[CLK_CPU(i)].lock, flags[i]); + flags[i] = k_spin_lock_irq(&clk_info[CLK_CPU(i)].lock); /* change clock */ select_cpu_clock_hw(freq_idx, release_unused); @@ -93,7 +93,7 @@ static inline void select_cpu_clock(int freq_idx, bool release_unused) /* unlock clock for all cores */ for (i = CONFIG_CORE_COUNT - 1; i >= 0; i--) - spin_unlock_irq(&clk_info[CLK_CPU(i)].lock, flags[i]); + k_spin_unlock_irq(&clk_info[CLK_CPU(i)].lock, flags[i]); } /* LPRO_ONLY mode */ @@ -178,13 +178,13 @@ static void platform_clock_low_power_mode(int clock, bool enable) void platform_clock_on_waiti(void) { struct pm_runtime_data *prd = pm_runtime_data_get(); - uint32_t flags; + k_spinlock_key_t key; int freq_idx; int lowest_freq_idx; bool pm_is_active; /* hold the prd->lock for possible active_freq_idx switching */ - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock_irq(&prd->lock); freq_idx = *cache_to_uncache(&active_freq_idx); lowest_freq_idx = get_lowest_freq_idx(CLK_CPU(cpu_get_id())); @@ -200,7 +200,7 @@ void platform_clock_on_waiti(void) set_cpu_current_freq_idx(lowest_freq_idx, true); } - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock_irq(&prd->lock, key); /* check if waiti HPRO->LPRO switching is needed */ pm_runtime_put(CORE_HP_CLK, cpu_get_id()); @@ -253,13 +253,13 @@ static void platform_clock_low_power_mode(int clock, bool enable) void platform_clock_on_waiti(void) { struct pm_runtime_data *prd = pm_runtime_data_get(); - uint32_t flags; + k_spinlock_key_t key; int freq_idx; int lowest_freq_idx; bool pm_is_active; /* hold the prd->lock for possible active_freq_idx switching */ - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock_irq(&prd->lock); freq_idx = *cache_to_uncache(&active_freq_idx); lowest_freq_idx = get_lowest_freq_idx(CLK_CPU(cpu_get_id())); @@ -275,18 +275,18 @@ void platform_clock_on_waiti(void) set_cpu_current_freq_idx(lowest_freq_idx, true); } - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock_irq(&prd->lock, key); } void platform_clock_on_wakeup(void) { struct pm_runtime_data *prd = pm_runtime_data_get(); - uint32_t flags; + k_spinlock_key_t key; int current_idx; int target_idx; /* hold the prd->lock for possible active_freq_idx switching */ - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock(&prd->lock); current_idx = get_current_freq_idx(CLK_CPU(cpu_get_id())); target_idx = *cache_to_uncache(&active_freq_idx); @@ -295,7 +295,7 @@ void platform_clock_on_wakeup(void) if (current_idx != target_idx) set_cpu_current_freq_idx(target_idx, true); - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock(&prd->lock, key); } #endif diff --git a/src/platform/intel/cavs/lib/pm_runtime.c b/src/platform/intel/cavs/lib/pm_runtime.c index 2c39815e75c1..cd27bcbe88e8 100644 --- a/src/platform/intel/cavs/lib/pm_runtime.c +++ b/src/platform/intel/cavs/lib/pm_runtime.c @@ -54,13 +54,13 @@ static void cavs_pm_runtime_host_dma_l1_get(void) { struct pm_runtime_data *prd = pm_runtime_data_get(); struct cavs_pm_runtime_data *pprd = prd->platform_data; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock_irq(&prd->lock); pprd->host_dma_l1_sref++; - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock_irq(&prd->lock, key); } /** @@ -71,9 +71,9 @@ static inline void cavs_pm_runtime_host_dma_l1_put(void) { struct pm_runtime_data *prd = pm_runtime_data_get(); struct cavs_pm_runtime_data *pprd = prd->platform_data; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock_irq(&prd->lock); if (!--pprd->host_dma_l1_sref) { shim_write(SHIM_SVCFG, @@ -85,7 +85,7 @@ static inline void cavs_pm_runtime_host_dma_l1_put(void) shim_read(SHIM_SVCFG) & ~(SHIM_SVCFG_FORCE_L1_EXIT)); } - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock_irq(&prd->lock, key); } static inline void cavs_pm_runtime_enable_dsp(bool enable) @@ -369,9 +369,9 @@ static inline void cavs_pm_runtime_core_dis_hp_clk(uint32_t index) int enabled_cores = cpu_enabled_cores(); struct pm_runtime_data *prd = pm_runtime_data_get(); struct cavs_pm_runtime_data *pprd = prd->platform_data; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock_irq(&prd->lock); pprd->sleep_core_mask |= BIT(index); @@ -381,21 +381,21 @@ static inline void cavs_pm_runtime_core_dis_hp_clk(uint32_t index) if (all_active_cores_sleep) clock_low_power_mode(CLK_CPU(index), true); - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock_irq(&prd->lock, key); } static inline void cavs_pm_runtime_core_en_hp_clk(uint32_t index) { struct pm_runtime_data *prd = pm_runtime_data_get(); struct cavs_pm_runtime_data *pprd = prd->platform_data; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock_irq(&prd->lock); pprd->sleep_core_mask &= ~BIT(index); clock_low_power_mode(CLK_CPU(index), false); - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock_irq(&prd->lock, key); } static inline void cavs_pm_runtime_dis_dsp_pg(uint32_t index) @@ -574,26 +574,26 @@ void platform_pm_runtime_prepare_d0ix_en(uint32_t index) { struct pm_runtime_data *prd = pm_runtime_data_get(); struct cavs_pm_runtime_data *pprd = prd->platform_data; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock_irq(&prd->lock); pprd->prepare_d0ix_core_mask |= BIT(index); - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock_irq(&prd->lock, key); } void platform_pm_runtime_prepare_d0ix_dis(uint32_t index) { struct pm_runtime_data *prd = pm_runtime_data_get(); struct cavs_pm_runtime_data *pprd = prd->platform_data; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&prd->lock, flags); + key = k_spin_lock_irq(&prd->lock); pprd->prepare_d0ix_core_mask &= ~BIT(index); - spin_unlock_irq(&prd->lock, flags); + k_spin_unlock_irq(&prd->lock, key); } int platform_pm_runtime_prepare_d0ix_is_req(uint32_t index) diff --git a/src/schedule/ll_schedule.c b/src/schedule/ll_schedule.c index 61b8077a3cbe..c6cf0ccfcd0a 100644 --- a/src/schedule/ll_schedule.c +++ b/src/schedule/ll_schedule.c @@ -92,8 +92,9 @@ static bool schedule_ll_is_pending(struct ll_schedule_data *sch) struct task *task; uint32_t pending_count = 0; struct comp_dev *sched_comp; + k_spinlock_key_t key; - spin_lock(&domain->lock); + key = k_spin_lock(&domain->lock); do { sched_comp = NULL; @@ -117,7 +118,7 @@ static bool schedule_ll_is_pending(struct ll_schedule_data *sch) } } while (sched_comp); - spin_unlock(&domain->lock); + k_spin_unlock(&domain->lock, key); return pending_count > 0; } @@ -154,6 +155,7 @@ static void schedule_ll_tasks_execute(struct ll_schedule_data *sch) struct ll_schedule_domain *domain = sch->domain; struct list_item *wlist; struct task *task; + k_spinlock_key_t key; /* check each task in the list for pending */ wlist = sch->tasks.next; @@ -183,7 +185,7 @@ static void schedule_ll_tasks_execute(struct ll_schedule_data *sch) wlist = task->list.next; - spin_lock(&domain->lock); + key = k_spin_lock(&domain->lock); /* do we need to reschedule this task */ if (task->state == SOF_TASK_STATE_COMPLETED) { @@ -196,7 +198,7 @@ static void schedule_ll_tasks_execute(struct ll_schedule_data *sch) (uint32_t)domain->next_tick); } - spin_unlock(&domain->lock); + k_spin_unlock(&domain->lock, key); } } @@ -239,6 +241,7 @@ static void schedule_ll_tasks_run(void *data) { struct ll_schedule_data *sch = data; struct ll_schedule_domain *domain = sch->domain; + k_spinlock_key_t key; uint32_t flags; uint32_t core = cpu_get_id(); @@ -248,7 +251,7 @@ static void schedule_ll_tasks_run(void *data) (unsigned int)domain->next_tick); irq_local_disable(flags); - spin_lock(&domain->lock); + key = k_spin_lock(&domain->lock); /* disable domain on current core until tasks are finished */ domain_disable(domain, core); @@ -258,7 +261,7 @@ static void schedule_ll_tasks_run(void *data) domain_clear(domain); } - spin_unlock(&domain->lock); + k_spin_unlock(&domain->lock, key); perf_cnt_init(&sch->pcd); @@ -271,7 +274,7 @@ static void schedule_ll_tasks_run(void *data) perf_cnt_stamp(&sch->pcd, perf_ll_sched_trace, 0 /* ignored */); - spin_lock(&domain->lock); + key = k_spin_lock(&domain->lock); /* reset the new_target_tick for the first core */ if (domain->new_target_tick < platform_timer_get_atomic(timer_get())) @@ -291,7 +294,7 @@ static void schedule_ll_tasks_run(void *data) if (atomic_read(&sch->num_tasks)) domain_enable(domain, core); - spin_unlock(&domain->lock); + k_spin_unlock(&domain->lock, key); irq_local_enable(flags); } @@ -306,9 +309,10 @@ static int schedule_ll_domain_set(struct ll_schedule_data *sch, uint64_t task_start_ticks; uint64_t task_start; uint64_t offset; + k_spinlock_key_t key; int ret; - spin_lock(&domain->lock); + key = k_spin_lock(&domain->lock); ret = domain_register(domain, task, &schedule_ll_tasks_run, sch); if (ret < 0) { @@ -364,7 +368,7 @@ static int schedule_ll_domain_set(struct ll_schedule_data *sch, atomic_read(&domain->total_num_tasks)); done: - spin_unlock(&domain->lock); + k_spin_unlock(&domain->lock, key); return ret; } @@ -373,8 +377,9 @@ static void schedule_ll_domain_clear(struct ll_schedule_data *sch, struct task *task) { struct ll_schedule_domain *domain = sch->domain; + k_spinlock_key_t key; - spin_lock(&domain->lock); + key = k_spin_lock(&domain->lock); /* * Decrement the number of tasks on the core. @@ -390,7 +395,7 @@ static void schedule_ll_domain_clear(struct ll_schedule_data *sch, atomic_read(&sch->num_tasks), atomic_read(&domain->total_num_tasks)); - spin_unlock(&domain->lock); + k_spin_unlock(&domain->lock, key); } static void schedule_ll_task_insert(struct task *task, struct list_item *tasks) diff --git a/src/schedule/zephyr.c b/src/schedule/zephyr.c index 6d1530fecf1b..1319cf7775cd 100644 --- a/src/schedule/zephyr.c +++ b/src/schedule/zephyr.c @@ -59,7 +59,7 @@ static void idc_handler(struct k_p4wq_work *work) struct ipc *ipc = ipc_get(); struct idc_msg *msg = &zmsg->msg; int payload = -1; - uint32_t flags; + k_spinlock_key_t key; SOC_DCACHE_INVALIDATE(msg, sizeof(*msg)); @@ -81,10 +81,10 @@ static void idc_handler(struct k_p4wq_work *work) case IDC_MSG_IPC: idc_cmd(&idc->received_msg); /* Signal the host */ - spin_lock_irq(&ipc->lock, flags); + k_spin_lock_irq(&ipc->lock, flags); ipc->task_mask &= ~IPC_TASK_SECONDARY_CORE; ipc_complete_cmd(ipc); - spin_unlock_irq(&ipc->lock, flags); + k_spin_unlock_irq(&ipc->lock, flags); } } diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index f085ac5f0a4b..6ca12891cea4 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -132,7 +132,7 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; char thread_name[] = "ll_thread0"; k_tid_t thread; - uint32_t flags; + k_spinlock_key_t key; tr_dbg(&ll_tr, "zephyr_domain_register()"); @@ -160,7 +160,7 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, k_thread_start(thread); - spin_lock_irq(&domain->lock, flags); + k_spin_lock_irq(&domain->lock, flags); if (!k_timer_user_data_get(&zephyr_domain->timer)) { k_timeout_t start = {0}; @@ -173,7 +173,7 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, k_timer_remaining_ticks(&zephyr_domain->timer); } - spin_unlock_irq(&domain->lock, flags); + k_spin_unlock_irq(&domain->lock, flags); tr_info(&ll_tr, "zephyr_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d period %d", domain->type, domain->clk, domain->ticks_per_ms, (uint32_t)LL_TIMER_PERIOD_US); @@ -186,7 +186,7 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain, { struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); int core = cpu_get_id(); - uint32_t flags; + k_spinlock_key_t key; tr_dbg(&ll_tr, "zephyr_domain_unregister()"); @@ -194,7 +194,7 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain, if (num_tasks) return 0; - spin_lock_irq(&domain->lock, flags); + k_spin_lock_irq(&domain->lock, flags); if (!atomic_read(&domain->total_num_tasks)) { k_timer_stop(&zephyr_domain->timer); @@ -203,7 +203,7 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain, zephyr_domain->domain_thread[core].handler = NULL; - spin_unlock_irq(&domain->lock, flags); + k_spin_unlock_irq(&domain->lock, flags); tr_info(&ll_tr, "zephyr_domain_unregister domain->type %d domain->clk %d", domain->type, domain->clk); diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c index 5af53876762f..36b7acad2b59 100644 --- a/src/schedule/zephyr_ll.c +++ b/src/schedule/zephyr_ll.c @@ -173,7 +173,7 @@ static void zephyr_ll_run(void *data) struct zephyr_ll *sch = data; struct task *task; struct list_item *list; - uint32_t flags; + k_spinlock_key_t key; zephyr_ll_lock(sch, &flags); @@ -278,7 +278,7 @@ static int zephyr_ll_task_schedule_common(struct zephyr_ll *sch, struct task *ta struct task *task_iter; struct list_item *list; uint64_t delay = period ? period : start; - uint32_t flags; + k_spinlock_key_t key; int ret; zephyr_ll_assert_core(sch); @@ -382,7 +382,7 @@ static int zephyr_ll_task_schedule_after(void *data, struct task *task, uint64_t static int zephyr_ll_task_free(void *data, struct task *task) { struct zephyr_ll *sch = data; - uint32_t flags; + k_spinlock_key_t key; struct zephyr_ll_pdata *pdata = task->priv_data; bool must_wait, on_list = true; @@ -446,7 +446,7 @@ static int zephyr_ll_task_free(void *data, struct task *task) static int zephyr_ll_task_cancel(void *data, struct task *task) { struct zephyr_ll *sch = data; - uint32_t flags; + k_spinlock_key_t key; zephyr_ll_assert_core(sch); diff --git a/src/spinlock.c b/src/spinlock.c index 2895f42103b5..8db700a9f0ab 100644 --- a/src/spinlock.c +++ b/src/spinlock.c @@ -22,15 +22,15 @@ DECLARE_TR_CTX(sl_tr, SOF_UUID(spinlock_uuid), LOG_LEVEL_INFO); #endif -uint32_t _spin_lock_irq(spinlock_t *lock) +k_spinlock_key_t _k_spin_lock_irq(spinlock_t *lock) { - uint32_t flags; + k_spinlock_key_t flags; flags = interrupt_global_disable(); #if CONFIG_DEBUG_LOCKS lock_dbg_atomic++; #endif - spin_lock(lock); + k_spin_lock(lock); #if CONFIG_DEBUG_LOCKS if (lock_dbg_atomic < DBG_LOCK_USERS) lock_dbg_user[lock_dbg_atomic - 1] = (lock)->user; @@ -38,9 +38,9 @@ uint32_t _spin_lock_irq(spinlock_t *lock) return flags; } -void _spin_unlock_irq(spinlock_t *lock, uint32_t flags, int line) +void _k_spin_unlock_irq(spinlock_t *lock, uint32_t flags, int line) { - _spin_unlock(lock, line); + _spin_unlock(lock, line, 0); #if CONFIG_DEBUG_LOCKS lock_dbg_atomic--; #endif diff --git a/src/trace/dma-trace.c b/src/trace/dma-trace.c index 3eb256829955..90eae7e3687b 100644 --- a/src/trace/dma-trace.c +++ b/src/trace/dma-trace.c @@ -54,7 +54,7 @@ static enum task_state trace_work(void *data) struct dma_trace_data *d = data; struct dma_trace_buf *buffer = &d->dmatb; struct dma_sg_config *config = &d->config; - unsigned long flags; + k_spinlock_key_t key; uint32_t avail = buffer->avail; int32_t size; uint32_t overflow; @@ -103,7 +103,7 @@ static enum task_state trace_work(void *data) ipc_msg_send(d->msg, &d->posn, false); out: - spin_lock_irq(&d->lock, flags); + key = k_spin_lock_irq(&d->lock); /* disregard any old messages and don't resend them if we overflow */ if (size > 0) { @@ -116,7 +116,7 @@ static enum task_state trace_work(void *data) /* DMA trace copying is done, allow reschedule */ d->copy_in_progress = 0; - spin_unlock_irq(&d->lock, flags); + k_spin_unlock_irq(&d->lock, key); /* reschedule the trace copying work */ return SOF_TASK_STATE_RESCHEDULE; @@ -222,7 +222,7 @@ static int dma_trace_buffer_init(struct dma_trace_data *d) { struct dma_trace_buf *buffer = &d->dmatb; void *buf; - unsigned int flags; + k_spinlock_key_t key; /* allocate new buffer */ buf = rballoc(0, SOF_MEM_CAPS_RAM | SOF_MEM_CAPS_DMA, @@ -236,7 +236,7 @@ static int dma_trace_buffer_init(struct dma_trace_data *d) dcache_writeback_region(buf, DMA_TRACE_LOCAL_SIZE); /* initialise the DMA buffer, whole sequence in section */ - spin_lock_irq(&d->lock, flags); + key = k_spin_lock_irq(&d->lock); buffer->addr = buf; buffer->size = DMA_TRACE_LOCAL_SIZE; @@ -245,7 +245,7 @@ static int dma_trace_buffer_init(struct dma_trace_data *d) buffer->end_addr = (char *)buffer->addr + buffer->size; buffer->avail = 0; - spin_unlock_irq(&d->lock, flags); + k_spin_unlock_irq(&d->lock, key); return 0; } @@ -253,14 +253,14 @@ static int dma_trace_buffer_init(struct dma_trace_data *d) static void dma_trace_buffer_free(struct dma_trace_data *d) { struct dma_trace_buf *buffer = &d->dmatb; - unsigned int flags; + k_spinlock_key_t key; - spin_lock_irq(&d->lock, flags); + key = k_spin_lock_irq(&d->lock); rfree(buffer->addr); memset(buffer, 0, sizeof(*buffer)); - spin_unlock_irq(&d->lock, flags); + k_spin_unlock_irq(&d->lock, key); } #if CONFIG_DMA_GW @@ -657,7 +657,7 @@ void dtrace_event(const char *e, uint32_t length) { struct dma_trace_data *trace_data = dma_trace_data_get(); struct dma_trace_buf *buffer = NULL; - unsigned long flags; + k_spinlock_key_t key; if (!dma_trace_initialized(trace_data) || length > DMA_TRACE_LOCAL_SIZE / 8 || length == 0) { @@ -666,7 +666,7 @@ void dtrace_event(const char *e, uint32_t length) buffer = &trace_data->dmatb; - spin_lock_irq(&trace_data->lock, flags); + key = k_spin_lock_irq(&trace_data->lock); dtrace_add_event(e, length); /* if DMA trace copying is working or secondary core @@ -674,11 +674,11 @@ void dtrace_event(const char *e, uint32_t length) */ if (trace_data->copy_in_progress || cpu_get_id() != PLATFORM_PRIMARY_CORE_ID) { - spin_unlock_irq(&trace_data->lock, flags); + k_spin_unlock_irq(&trace_data->lock, key); return; } - spin_unlock_irq(&trace_data->lock, flags); + k_spin_unlock_irq(&trace_data->lock, key); /* schedule copy now if buffer > 50% full */ if (trace_data->enabled && diff --git a/src/trace/trace.c b/src/trace/trace.c index 0e0eaadd89d9..935eead0ab24 100644 --- a/src/trace/trace.c +++ b/src/trace/trace.c @@ -456,9 +456,9 @@ void trace_flush_dma_to_mbox(void) { struct trace *trace = trace_get(); volatile uint64_t *t; - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&trace->lock, flags); + key = k_spin_lock_irq(&trace->lock); /* get mailbox position */ t = (volatile uint64_t *)(MAILBOX_TRACE_BASE + trace->pos); @@ -466,33 +466,33 @@ void trace_flush_dma_to_mbox(void) /* flush dma trace messages */ dma_trace_flush((void *)t); - spin_unlock_irq(&trace->lock, flags); + k_spin_unlock_irq(&trace->lock, key); } void trace_on(void) { struct trace *trace = trace_get(); - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&trace->lock, flags); + key = k_spin_lock_irq(&trace->lock); trace->enable = 1; dma_trace_on(); - spin_unlock_irq(&trace->lock, flags); + k_spin_unlock_irq(&trace->lock, key); } void trace_off(void) { struct trace *trace = trace_get(); - uint32_t flags; + k_spinlock_key_t key; - spin_lock_irq(&trace->lock, flags); + key = k_spin_lock_irq(&trace->lock); trace->enable = 0; dma_trace_off(); - spin_unlock_irq(&trace->lock, flags); + k_spin_unlock_irq(&trace->lock, key); } void trace_init(struct sof *sof) @@ -536,11 +536,11 @@ static void mtrace_dict_entry_vl(bool atomic_context, uint32_t dict_entry_addres mtrace_event(packet, MESSAGE_SIZE(n_args)); } else { struct trace * const trace = trace_get(); - uint32_t saved_flags; + k_spinlock_key_t key; - spin_lock_irq(&trace->lock, saved_flags); + key = k_spin_lock_irq(&trace->lock); mtrace_event(packet, MESSAGE_SIZE(n_args)); - spin_unlock_irq(&trace->lock, saved_flags); + k_spin_unlock_irq(&trace->lock, key); } } diff --git a/test/cmocka/src/common_mocks.c b/test/cmocka/src/common_mocks.c index 62bbb4396059..40acfe4ea26a 100644 --- a/test/cmocka/src/common_mocks.c +++ b/test/cmocka/src/common_mocks.c @@ -146,14 +146,14 @@ volatile void * WEAK task_context_get(void) return NULL; } -uint32_t WEAK _spin_lock_irq(spinlock_t *lock) +uint32_t WEAK _k_spin_lock_irq(spinlock_t *lock) { (void)lock; return 0; } -void WEAK _spin_unlock_irq(spinlock_t *lock, uint32_t flags, int line) +void WEAK _k_spin_unlock_irq(spinlock_t *lock, uint32_t flags, int line) { (void)lock; (void)flags;