diff --git a/src/audio/CMakeLists.txt b/src/audio/CMakeLists.txt index a7b91617f502..b04f5c279194 100644 --- a/src/audio/CMakeLists.txt +++ b/src/audio/CMakeLists.txt @@ -2,7 +2,7 @@ if(NOT CONFIG_LIBRARY) add_local_sources(sof - host.c + host-legacy.c component.c data_blob.c buffer.c @@ -55,7 +55,7 @@ if(NOT CONFIG_LIBRARY) endif() if(CONFIG_COMP_DAI) add_local_sources(sof - dai.c + dai-legacy.c ) endif() if(CONFIG_COMP_KPB) diff --git a/src/audio/dai.c b/src/audio/dai-legacy.c similarity index 98% rename from src/audio/dai.c rename to src/audio/dai-legacy.c index a3b71af01818..0f1f19cc2bcd 100644 --- a/src/audio/dai.c +++ b/src/audio/dai-legacy.c @@ -220,7 +220,7 @@ static void dai_free(struct comp_dev *dev) if (dd->chan) { notifier_unregister(dev, dd->chan, NOTIFIER_ID_DMA_COPY); - dma_channel_put(dd->chan); + dma_channel_put_legacy(dd->chan); dd->chan->dev_data = NULL; } @@ -595,7 +595,7 @@ static int dai_config_prepare(struct comp_dev *dev) } /* allocate DMA channel */ - dd->chan = dma_channel_get(dd->dma, channel); + dd->chan = dma_channel_get_legacy(dd->dma, channel); if (!dd->chan) { comp_err(dev, "dai_config_prepare(): dma_channel_get() failed"); dd->chan = NULL; @@ -656,7 +656,7 @@ static int dai_prepare(struct comp_dev *dev) return ret; } - ret = dma_set_config(dd->chan, &dd->config); + ret = dma_set_config_legacy(dd->chan, &dd->config); if (ret < 0) comp_set_state(dev, COMP_TRIGGER_RESET); @@ -728,7 +728,7 @@ static int dai_comp_trigger_internal(struct comp_dev *dev, int cmd) /* only start the DAI if we are not XRUN handling */ if (dd->xrun == 0) { - ret = dma_start(dd->chan); + ret = dma_start_legacy(dd->chan); if (ret < 0) return ret; /* start the DAI */ @@ -750,13 +750,13 @@ static int dai_comp_trigger_internal(struct comp_dev *dev, int cmd) /* only start the DAI if we are not XRUN handling */ if (dd->xrun == 0) { /* recover valid start position */ - ret = dma_release(dd->chan); + ret = dma_release_legacy(dd->chan); if (ret < 0) return ret; /* start the DAI */ dai_trigger(dd->dai, cmd, dev->direction); - ret = dma_start(dd->chan); + ret = dma_start_legacy(dd->chan); if (ret < 0) return ret; } else { @@ -781,16 +781,16 @@ static int dai_comp_trigger_internal(struct comp_dev *dev, int cmd) * as soon as possible. */ #if CONFIG_DMA_SUSPEND_DRAIN - ret = dma_stop(dd->chan); + ret = dma_stop_legacy(dd->chan); dai_trigger(dd->dai, cmd, dev->direction); #else dai_trigger(dd->dai, cmd, dev->direction); - ret = dma_stop(dd->chan); + ret = dma_stop_legacy(dd->chan); #endif break; case COMP_TRIGGER_PAUSE: comp_dbg(dev, "dai_comp_trigger_internal(), PAUSE"); - ret = dma_pause(dd->chan); + ret = dma_pause_legacy(dd->chan); dai_trigger(dd->dai, cmd, dev->direction); break; case COMP_TRIGGER_PRE_START: @@ -904,7 +904,7 @@ static int dai_copy(struct comp_dev *dev) comp_dbg(dev, "dai_copy()"); /* get data sizes from DMA */ - ret = dma_get_data_size(dd->chan, &avail_bytes, &free_bytes); + ret = dma_get_data_size_legacy(dd->chan, &avail_bytes, &free_bytes); if (ret < 0) { dai_report_xrun(dev, 0); return ret; @@ -952,7 +952,7 @@ static int dai_copy(struct comp_dev *dev) return 0; } - ret = dma_copy(dd->chan, copy_bytes, 0); + ret = dma_copy_legacy(dd->chan, copy_bytes, 0); if (ret < 0) { dai_report_xrun(dev, copy_bytes); return ret; diff --git a/src/audio/dai-zephyr.c b/src/audio/dai-zephyr.c new file mode 100644 index 000000000000..654bd99c4e7d --- /dev/null +++ b/src/audio/dai-zephyr.c @@ -0,0 +1,1177 @@ +// SPDX-License-Identifier: BSD-3-Clause +// +// Copyright(c) 2016 Intel Corporation. All rights reserved. +// +// Author: Liam Girdwood +// Keyon Jie + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const struct comp_driver comp_dai; + +/* c2b00d27-ffbc-4150-a51a-245c79c5e54b */ +DECLARE_SOF_RT_UUID("dai", dai_comp_uuid, 0xc2b00d27, 0xffbc, 0x4150, + 0xa5, 0x1a, 0x24, 0x5c, 0x79, 0xc5, 0xe5, 0x4b); + +DECLARE_TR_CTX(dai_comp_tr, SOF_UUID(dai_comp_uuid), LOG_LEVEL_INFO); + +static void dai_atomic_trigger(void *arg, enum notify_id type, void *data); + +/* Assign DAI to a group */ +int dai_assign_group(struct comp_dev *dev, uint32_t group_id) +{ + struct dai_data *dd = comp_get_drvdata(dev); + + if (dd->group) { + if (dd->group->group_id != group_id) { + comp_err(dev, "dai_assign_group(), DAI already in group %d, requested %d", + dd->group->group_id, group_id); + return -EINVAL; + } + + /* No need to re-assign to the same group, do nothing */ + return 0; + } + + dd->group = dai_group_get(group_id, DAI_CREAT); + if (!dd->group) { + comp_err(dev, "dai_assign_group(), failed to assign group %d", + group_id); + return -EINVAL; + } + + comp_dbg(dev, "dai_assign_group(), group %d num %d", + group_id, dd->group->num_dais); + + /* Register for the atomic trigger event */ + notifier_register(dev, dd->group, NOTIFIER_ID_DAI_TRIGGER, + dai_atomic_trigger, 0); + + return 0; +} + +/* this is called by DMA driver every time descriptor has completed */ +static void dai_dma_cb(void *arg, enum notify_id type, void *data) +{ + struct dma_cb_data *next = data; + struct comp_dev *dev = arg; + struct dai_data *dd = comp_get_drvdata(dev); + uint32_t bytes = next->elem.size; + struct comp_buffer *source; + struct comp_buffer *sink; + void *buffer_ptr; + int ret; + + comp_dbg(dev, "dai_dma_cb()"); + + next->status = DMA_CB_STATUS_RELOAD; + + /* stop dma copy for pause/stop/xrun */ + if (dev->state != COMP_STATE_ACTIVE || dd->xrun) { + /* stop the DAI */ + dai_trigger(dd->dai, COMP_TRIGGER_STOP, dev->direction); + + /* tell DMA not to reload */ + next->status = DMA_CB_STATUS_END; + } + + /* is our pipeline handling an XRUN ? */ + if (dd->xrun) { + /* make sure we only playback silence during an XRUN */ + if (dev->direction == SOF_IPC_STREAM_PLAYBACK) + /* fill buffer with silence */ + buffer_zero(dd->dma_buffer); + + return; + } + + if (dev->direction == SOF_IPC_STREAM_PLAYBACK) { + ret = dma_buffer_copy_to(dd->local_buffer, dd->dma_buffer, + dd->process, bytes); + + buffer_ptr = dd->local_buffer->stream.r_ptr; + } else { + ret = dma_buffer_copy_from(dd->dma_buffer, dd->local_buffer, + dd->process, bytes); + + buffer_ptr = dd->local_buffer->stream.w_ptr; + } + + /* assert dma_buffer_copy succeed */ + if (ret < 0) { + source = dev->direction == SOF_IPC_STREAM_PLAYBACK ? + dd->local_buffer : dd->dma_buffer; + sink = dev->direction == SOF_IPC_STREAM_PLAYBACK ? + dd->dma_buffer : dd->local_buffer; + comp_err(dev, "dai_dma_cb() dma buffer copy failed, dir %d bytes %d avail %d free %d", + dev->direction, bytes, + audio_stream_get_avail_samples(&source->stream) * + audio_stream_frame_bytes(&source->stream), + audio_stream_get_free_samples(&sink->stream) * + audio_stream_frame_bytes(&sink->stream)); + return; + } + + /* update host position (in bytes offset) for drivers */ + dev->position += bytes; + if (dd->dai_pos) { + dd->dai_pos_blks += bytes; + *dd->dai_pos = dd->dai_pos_blks + + (char *)buffer_ptr - + (char *)dd->dma_buffer->stream.addr; + } +} + +static struct comp_dev *dai_new(const struct comp_driver *drv, + struct comp_ipc_config *config, + void *spec) +{ + struct comp_dev *dev; + struct ipc_config_dai *dai = spec; + struct dai_data *dd; + uint32_t dir, caps, dma_dev; + + comp_cl_dbg(&comp_dai, "dai_new()"); + + dev = comp_alloc(drv, sizeof(*dev)); + if (!dev) + return NULL; + dev->ipc_config = *config; + + dd = rzalloc(SOF_MEM_ZONE_RUNTIME_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*dd)); + if (!dd) { + rfree(dev); + return NULL; + } + + comp_set_drvdata(dev, dd); + + dd->dai = dai_get(dai->type, dai->dai_index, DAI_CREAT); + if (!dd->dai) { + comp_cl_err(&comp_dai, "dai_new(): dai_get() failed to create DAI."); + goto error; + } + dd->ipc_config = *dai; + + /* request GP LP DMA with shared access privilege */ + dir = dai->direction == SOF_IPC_STREAM_PLAYBACK ? + DMA_DIR_MEM_TO_DEV : DMA_DIR_DEV_TO_MEM; + + caps = dai_get_info(dd->dai, DAI_INFO_DMA_CAPS); + dma_dev = dai_get_info(dd->dai, DAI_INFO_DMA_DEV); + + dd->dma = dma_get(dir, caps, dma_dev, DMA_ACCESS_SHARED); + if (!dd->dma) { + comp_cl_err(&comp_dai, "dai_new(): dma_get() failed to get shared access to DMA."); + goto error; + } + + dma_sg_init(&dd->config.elem_array); + dd->dai_pos = NULL; + dd->dai_pos_blks = 0; + dd->xrun = 0; + dd->chan = NULL; + + dev->state = COMP_STATE_READY; + return dev; + +error: + rfree(dd); + rfree(dev); + return NULL; +} + +static void dai_free(struct comp_dev *dev) +{ + struct dai_data *dd = comp_get_drvdata(dev); + + if (dd->group) { + notifier_unregister(dev, dd->group, NOTIFIER_ID_DAI_TRIGGER); + dai_group_put(dd->group); + } + + if (dd->chan) { + notifier_unregister(dev, dd->chan, NOTIFIER_ID_DMA_COPY); + dma_release_channel(dd->dma->z_dev, dd->chan->index); + dd->chan->dev_data = NULL; + } + + dma_put(dd->dma); + + dai_put(dd->dai); + + rfree(dd->dai_spec_config); + rfree(dd); + rfree(dev); +} + +static int dai_comp_get_hw_params(struct comp_dev *dev, + struct sof_ipc_stream_params *params, + int dir) +{ + struct dai_data *dd = comp_get_drvdata(dev); + int ret; + + comp_dbg(dev, "dai_hw_params()"); + + /* fetching hw dai stream params */ + ret = dai_get_hw_params(dd->dai, params, dir); + if (ret < 0) { + comp_err(dev, "dai_comp_get_hw_params(): dai_get_hw_params failed ret %d", + ret); + return ret; + } + + /* dai_comp_get_hw_params() function fetches hardware dai parameters, + * which then are propagating back through the pipeline, so that any + * component can convert specific stream parameter. Here, we overwrite + * frame_fmt hardware parameter as DAI component is able to convert + * stream with different frame_fmt's (using pcm converter) + */ + params->frame_fmt = dev->ipc_config.frame_fmt; + + return 0; +} + +static int dai_comp_hw_params(struct comp_dev *dev, + struct sof_ipc_stream_params *params) +{ + struct dai_data *dd = comp_get_drvdata(dev); + int ret; + + comp_dbg(dev, "dai_comp_hw_params()"); + + /* configure hw dai stream params */ + ret = dai_hw_params(dd->dai, params); + if (ret < 0) { + comp_err(dev, "dai_comp_hw_params(): dai_hw_params failed ret %d", + ret); + return ret; + } + + return 0; +} + +static int dai_verify_params(struct comp_dev *dev, + struct sof_ipc_stream_params *params) +{ + struct sof_ipc_stream_params hw_params; + + dai_comp_get_hw_params(dev, &hw_params, params->direction); + + /* checks whether pcm parameters match hardware DAI parameter set + * during dai_set_config(). If hardware parameter is equal to 0, it + * means that it can vary, so any value is acceptable. We do not check + * format parameter, because DAI is able to change format using + * pcm_converter functions. + */ + if (hw_params.rate && hw_params.rate != params->rate) { + comp_err(dev, "dai_verify_params(): pcm rate parameter %d does not match hardware rate %d", + params->rate, hw_params.rate); + return -EINVAL; + } + + if (hw_params.channels && hw_params.channels != params->channels) { + comp_err(dev, "dai_verify_params(): pcm channels parameter %d does not match hardware channels %d", + params->channels, hw_params.channels); + return -EINVAL; + } + + /* set component period frames */ + component_set_nearest_period_frames(dev, params->rate); + + return 0; +} + +/* set component audio SSP and DMA configuration */ +static int dai_playback_params(struct comp_dev *dev, uint32_t period_bytes, + uint32_t period_count) +{ + struct dai_data *dd = comp_get_drvdata(dev); + struct dma_sg_config *config = &dd->config; + struct dma_config *dma_cfg; + struct dma_block_config *dma_block_cfg; + struct dma_block_config *prev; + uint32_t local_fmt = dd->local_buffer->stream.frame_fmt; + uint32_t dma_fmt = dd->dma_buffer->stream.frame_fmt; + uint32_t fifo; + int i, err; + + /* set processing function */ + dd->process = pcm_get_conversion_function(local_fmt, dma_fmt); + + if (!dd->process) { + comp_err(dev, "dai_playback_params(): converter function NULL: local fmt %d dma fmt %d\n", + local_fmt, dma_fmt); + return -EINVAL; + } + + /* set up DMA configuration */ + config->direction = DMA_DIR_MEM_TO_DEV; + config->src_width = get_sample_bytes(dma_fmt); + config->dest_width = config->src_width; + config->cyclic = 1; + config->irq_disabled = pipeline_is_timer_driven(dev->pipeline); + config->dest_dev = dai_get_handshake(dd->dai, dev->direction, + dd->stream_id); + config->is_scheduling_source = comp_is_scheduling_source(dev); + config->period = dev->pipeline->period; + + comp_info(dev, "dai_playback_params() dest_dev = %d stream_id = %d src_width = %d dest_width = %d", + config->dest_dev, dd->stream_id, + config->src_width, config->dest_width); + + if (!config->elem_array.elems) { + fifo = dai_get_fifo(dd->dai, dev->direction, + dd->stream_id); + + comp_info(dev, "dai_playback_params() fifo 0x%x", fifo); + + err = dma_sg_alloc(&config->elem_array, SOF_MEM_ZONE_RUNTIME, + config->direction, + period_count, + period_bytes, + (uintptr_t)(dd->dma_buffer->stream.addr), + fifo); + if (err < 0) { + comp_err(dev, "dai_playback_params(): dma_sg_alloc() for period_count %d period_bytes %d failed with err = %d", + period_count, period_bytes, err); + return err; + } + } + + dma_cfg = rballoc(SOF_MEM_FLAG_COHERENT, + SOF_MEM_CAPS_RAM | SOF_MEM_CAPS_DMA, + sizeof(struct dma_config)); + if (!dma_cfg) { + comp_err(dev, "dai_playback_params(): dma_cfg allocation failed"); + return -ENOMEM; + } + + dma_cfg->channel_direction = MEMORY_TO_PERIPHERAL; + dma_cfg->source_data_size = config->src_width; + dma_cfg->dest_data_size = config->dest_width; + + if (config->burst_elems) + dma_cfg->source_burst_length = config->burst_elems; + else + dma_cfg->source_burst_length = 8; + + dma_cfg->dest_burst_length = dma_cfg->source_burst_length; + dma_cfg->cyclic = config->cyclic; + dma_cfg->user_data = NULL; + dma_cfg->dma_callback = NULL; + dma_cfg->block_count = config->elem_array.count; + dma_cfg->dma_slot = config->dest_dev; + + dma_block_cfg = rballoc(SOF_MEM_FLAG_COHERENT, + SOF_MEM_CAPS_RAM | SOF_MEM_CAPS_DMA, + sizeof(struct dma_block_config) * dma_cfg->block_count); + if (!dma_block_cfg) { + rfree(dma_cfg); + comp_err(dev, "dai_playback_params(): dma_block_config allocation failed"); + return -ENOMEM; + } + + dma_cfg->head_block = dma_block_cfg; + for (i = 0; i < dma_cfg->block_count; i++) { + dma_block_cfg->dest_scatter_en = config->scatter; + dma_block_cfg->block_size = config->elem_array.elems[i].size; + dma_block_cfg->source_address = config->elem_array.elems[i].src; + dma_block_cfg->dest_address = config->elem_array.elems[i].dest; + prev = dma_block_cfg; + prev->next_block = ++dma_block_cfg; + } + prev->next_block = dma_cfg->head_block; + dd->z_config = dma_cfg; + + return 0; +} + +static int dai_capture_params(struct comp_dev *dev, uint32_t period_bytes, + uint32_t period_count) +{ + struct dai_data *dd = comp_get_drvdata(dev); + struct dma_sg_config *config = &dd->config; + struct dma_config *dma_cfg; + struct dma_block_config *dma_block_cfg; + struct dma_block_config *prev; + uint32_t local_fmt = dd->local_buffer->stream.frame_fmt; + uint32_t dma_fmt = dd->dma_buffer->stream.frame_fmt; + uint32_t fifo; + int i, err; + + /* set processing function */ + dd->process = pcm_get_conversion_function(dma_fmt, local_fmt); + + if (!dd->process) { + comp_err(dev, "dai_capture_params(): converter function NULL: local fmt %d dma fmt %d\n", + local_fmt, dma_fmt); + return -EINVAL; + } + + /* set up DMA configuration */ + config->direction = DMA_DIR_DEV_TO_MEM; + config->cyclic = 1; + config->irq_disabled = pipeline_is_timer_driven(dev->pipeline); + config->src_dev = dai_get_handshake(dd->dai, dev->direction, + dd->stream_id); + config->is_scheduling_source = comp_is_scheduling_source(dev); + config->period = dev->pipeline->period; + + /* TODO: Make this code platform-specific or move it driver callback */ + if (dai_get_info(dd->dai, DAI_INFO_TYPE) == SOF_DAI_INTEL_DMIC) { + /* For DMIC the DMA src and dest widths should always be 4 bytes + * due to 32 bit FIFO packer. Setting width to 2 bytes for + * 16 bit format would result in recording at double rate. + */ + config->src_width = 4; + config->dest_width = 4; + } else { + config->src_width = get_sample_bytes(dma_fmt); + config->dest_width = config->src_width; + } + + comp_info(dev, "dai_capture_params() src_dev = %d stream_id = %d src_width = %d dest_width = %d", + config->src_dev, dd->stream_id, + config->src_width, config->dest_width); + + if (!config->elem_array.elems) { + fifo = dai_get_fifo(dd->dai, dev->direction, + dd->stream_id); + + comp_info(dev, "dai_capture_params() fifo 0x%x", fifo); + + err = dma_sg_alloc(&config->elem_array, SOF_MEM_ZONE_RUNTIME, + config->direction, + period_count, + period_bytes, + (uintptr_t)(dd->dma_buffer->stream.addr), + fifo); + if (err < 0) { + comp_err(dev, "dai_capture_params(): dma_sg_alloc() for period_count %d period_bytes %d failed with err = %d", + period_count, period_bytes, err); + return err; + } + } + + dma_cfg = rballoc(SOF_MEM_FLAG_COHERENT, + SOF_MEM_CAPS_RAM | SOF_MEM_CAPS_DMA, + sizeof(struct dma_config)); + if (!dma_cfg) { + comp_err(dev, "dai_playback_params(): dma_cfg allocation failed"); + return -ENOMEM; + } + + dma_cfg->channel_direction = PERIPHERAL_TO_MEMORY; + dma_cfg->source_data_size = config->src_width; + dma_cfg->dest_data_size = config->dest_width; + + if (config->burst_elems) + dma_cfg->source_burst_length = config->burst_elems; + else + dma_cfg->source_burst_length = 8; + + dma_cfg->dest_burst_length = dma_cfg->source_burst_length; + dma_cfg->cyclic = config->cyclic; + dma_cfg->user_data = NULL; + dma_cfg->dma_callback = NULL; + dma_cfg->block_count = config->elem_array.count; + dma_cfg->dma_slot = config->src_dev; + + dma_block_cfg = rballoc(SOF_MEM_FLAG_COHERENT, + SOF_MEM_CAPS_RAM | SOF_MEM_CAPS_DMA, + sizeof(struct dma_block_config) * dma_cfg->block_count); + if (!dma_block_cfg) { + rfree(dma_cfg); + comp_err(dev, "dai_playback_params(): dma_block_config allocation failed"); + return -ENOMEM; + } + + dma_cfg->head_block = dma_block_cfg; + for (i = 0; i < dma_cfg->block_count; i++) { + dma_block_cfg->dest_scatter_en = config->scatter; + dma_block_cfg->block_size = config->elem_array.elems[i].size; + dma_block_cfg->source_address = config->elem_array.elems[i].src; + dma_block_cfg->dest_address = config->elem_array.elems[i].dest; + prev = dma_block_cfg; + prev->next_block = ++dma_block_cfg; + } + prev->next_block = dma_cfg->head_block; + dd->z_config = dma_cfg; + + return 0; +} + +static int dai_params(struct comp_dev *dev, + struct sof_ipc_stream_params *params) +{ + struct sof_ipc_stream_params hw_params = *params; + struct dai_data *dd = comp_get_drvdata(dev); + uint32_t frame_size; + uint32_t period_count; + uint32_t period_bytes; + uint32_t buffer_size; + uint32_t addr_align; + uint32_t align; + int err; + + comp_dbg(dev, "dai_params()"); + + /* configure dai_data first */ + err = ipc_dai_data_config(dev); + if (err < 0) + return err; + + err = dai_verify_params(dev, params); + if (err < 0) { + comp_err(dev, "dai_params(): pcm params verification failed."); + return -EINVAL; + } + + /* params verification passed, so now configure hw dai stream params */ + err = dai_comp_hw_params(dev, params); + if (err < 0) { + comp_err(dev, "dai_params(): dai_comp_hw_params failed err %d", err); + return err; + } + + if (dev->direction == SOF_IPC_STREAM_PLAYBACK) + dd->local_buffer = list_first_item(&dev->bsource_list, + struct comp_buffer, + sink_list); + else + dd->local_buffer = list_first_item(&dev->bsink_list, + struct comp_buffer, + source_list); + + /* check if already configured */ + if (dev->state == COMP_STATE_PREPARE) { + comp_info(dev, "dai_params() component has been already configured."); + return 0; + } + + /* can set params on only init state */ + if (dev->state != COMP_STATE_READY) { + comp_err(dev, "dai_params(): Component is in state %d, expected COMP_STATE_READY.", + dev->state); + return -EINVAL; + } + + err = dma_get_attribute(dd->dma, DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT, + &addr_align); + if (err < 0) { + comp_err(dev, "dai_params(): could not get dma buffer address alignment, err = %d", + err); + return err; + } + + err = dma_get_attribute(dd->dma, DMA_ATTR_BUFFER_ALIGNMENT, &align); + if (err < 0 || !align) { + comp_err(dev, "dai_params(): could not get valid dma buffer alignment, err = %d, align = %u", + err, align); + return -EINVAL; + } + + err = dma_get_attribute(dd->dma, DMA_ATTR_BUFFER_PERIOD_COUNT, + &period_count); + if (err < 0 || !period_count) { + comp_err(dev, "dai_params(): could not get valid dma buffer period count, err = %d, period_count = %u", + err, period_count); + return -EINVAL; + } + + /* calculate frame size */ + frame_size = get_frame_bytes(dev->ipc_config.frame_fmt, + dd->local_buffer->stream.channels); + + /* calculate period size */ + period_bytes = dev->frames * frame_size; + if (!period_bytes) { + comp_err(dev, "dai_params(): invalid period_bytes."); + return -EINVAL; + } + + dd->period_bytes = period_bytes; + + /* calculate DMA buffer size */ + buffer_size = ALIGN_UP(period_count * period_bytes, align); + + /* alloc DMA buffer or change its size if exists */ + if (dd->dma_buffer) { + err = buffer_set_size(dd->dma_buffer, buffer_size); + if (err < 0) { + comp_err(dev, "dai_params(): buffer_set_size() failed, buffer_size = %u", + buffer_size); + return err; + } + } else { + dd->dma_buffer = buffer_alloc(buffer_size, SOF_MEM_CAPS_DMA, + addr_align); + if (!dd->dma_buffer) { + comp_err(dev, "dai_params(): failed to alloc dma buffer"); + return -ENOMEM; + } + + /* + * dma_buffer should reffer to hardware dai parameters. + * Here, we overwrite frame_fmt hardware parameter as DAI + * component is able to convert stream with different + * frame_fmt's (using pcm converter). + */ + hw_params.frame_fmt = dev->ipc_config.frame_fmt; + buffer_set_params(dd->dma_buffer, &hw_params, + BUFFER_UPDATE_FORCE); + } + + return dev->direction == SOF_IPC_STREAM_PLAYBACK ? + dai_playback_params(dev, period_bytes, period_count) : + dai_capture_params(dev, period_bytes, period_count); +} + +static int dai_config_prepare(struct comp_dev *dev) +{ + struct dai_data *dd = comp_get_drvdata(dev); + int channel; + + /* cannot configure DAI while active */ + if (dev->state == COMP_STATE_ACTIVE) { + comp_info(dev, "dai_config_prepare(): Component is in active state."); + return 0; + } + + if (!dd->dai_spec_config) { + comp_err(dev, "dai specific config is not set yet!"); + return -EINVAL; + } + + if (dd->chan) { + comp_info(dev, "dai_config_prepare(): dma channel index %d already configured", + dd->chan->index); + return 0; + } + + channel = dai_config_dma_channel(dev, dd->dai_spec_config); + comp_info(dev, "dai_config_prepare(), channel = %d", channel); + + /* do nothing for asking for channel free, for compatibility. */ + if (channel == DMA_CHAN_INVALID) { + comp_err(dev, "dai_config is not set yet!"); + return -EINVAL; + } + + /* get DMA channel */ + channel = dma_request_channel(dd->dma->z_dev, &channel); + if (channel < 0) { + comp_err(dev, "dai_config_prepare(): dma_request_channel() failed"); + dd->chan = NULL; + return -EIO; + } + + dd->chan = &dd->dma->chan[channel]; + dd->chan->dev_data = dd; + + comp_info(dev, "dai_config_prepare(): new configured dma channel index %d", + dd->chan->index); + + /* setup callback */ + notifier_register(dev, dd->chan, NOTIFIER_ID_DMA_COPY, + dai_dma_cb, 0); + + return 0; +} + +static int dai_prepare(struct comp_dev *dev) +{ + struct dai_data *dd = comp_get_drvdata(dev); + int ret; + + comp_info(dev, "dai_prepare()"); + + ret = dai_config_prepare(dev); + if (ret < 0) + return ret; + + ret = comp_set_state(dev, COMP_TRIGGER_PREPARE); + if (ret < 0) + return ret; + + if (ret == COMP_STATUS_STATE_ALREADY_SET) + return PPL_STATUS_PATH_STOP; + + dev->position = 0; + + if (!dd->chan) { + comp_err(dev, "dai_prepare(): Missing dd->chan."); + comp_set_state(dev, COMP_TRIGGER_RESET); + return -EINVAL; + } + + if (!dd->config.elem_array.elems) { + comp_err(dev, "dai_prepare(): Missing dd->config.elem_array.elems."); + comp_set_state(dev, COMP_TRIGGER_RESET); + return -EINVAL; + } + + /* clear dma buffer to avoid pop noise */ + buffer_zero(dd->dma_buffer); + + /* dma reconfig not required if XRUN handling */ + if (dd->xrun) { + /* after prepare, we have recovered from xrun */ + dd->xrun = 0; + return ret; + } + + ret = dma_config(dd->chan->dma->z_dev, dd->chan->index, dd->z_config); + + if (ret < 0) + comp_set_state(dev, COMP_TRIGGER_RESET); + + return ret; +} + +static int dai_reset(struct comp_dev *dev) +{ + struct dai_data *dd = comp_get_drvdata(dev); + struct dma_sg_config *config = &dd->config; + + comp_info(dev, "dai_reset()"); + + /* + * DMA channel release should be skipped now for DAI's that support the two-step stop option + * It will be done when the host sends the DAI_CONFIG IPC during hw_free. + */ + if (!dd->delayed_dma_stop) + dai_dma_release(dev); + + dma_sg_free(&config->elem_array); + rfree(dd->z_config); + + if (dd->dma_buffer) { + buffer_free(dd->dma_buffer); + dd->dma_buffer = NULL; + } + + dd->dai_pos_blks = 0; + if (dd->dai_pos) + *dd->dai_pos = 0; + dd->dai_pos = NULL; + dd->wallclock = 0; + dev->position = 0; + dd->xrun = 0; + comp_set_state(dev, COMP_TRIGGER_RESET); + + return 0; +} + +static void dai_update_start_position(struct comp_dev *dev) +{ + struct dai_data *dd = comp_get_drvdata(dev); + + /* update starting wallclock */ + platform_dai_wallclock(dev, &dd->wallclock); + + /* update start position */ + dd->start_position = dev->position; +} + +/* used to pass standard and bespoke command (with data) to component */ +static int dai_comp_trigger_internal(struct comp_dev *dev, int cmd) +{ + struct dai_data *dd = comp_get_drvdata(dev); + int ret; + + comp_dbg(dev, "dai_comp_trigger_internal(), command = %u", cmd); + + ret = comp_set_state(dev, cmd); + if (ret < 0) + return ret; + + if (ret == COMP_STATUS_STATE_ALREADY_SET) + return PPL_STATUS_PATH_STOP; + + switch (cmd) { + case COMP_TRIGGER_START: + comp_dbg(dev, "dai_comp_trigger_internal(), START"); + + /* only start the DAI if we are not XRUN handling */ + if (dd->xrun == 0) { + ret = dma_start(dd->chan->dma->z_dev, dd->chan->index); + if (ret < 0) + return ret; + + /* start the DAI */ + dai_trigger(dd->dai, cmd, dev->direction); + } else { + dd->xrun = 0; + } + + dai_update_start_position(dev); + break; + case COMP_TRIGGER_RELEASE: + /* before release, we clear the buffer data to 0s, + * then there is no history data sent out after release. + * this is only supported at capture mode. + */ + if (dev->direction == SOF_IPC_STREAM_CAPTURE) + buffer_zero(dd->dma_buffer); + + /* only start the DAI if we are not XRUN handling */ + if (dd->xrun == 0) { + /* recover valid start position */ + ret = dma_stop(dd->chan->dma->z_dev, dd->chan->index); + if (ret < 0) + return ret; + + /* start the DAI */ + dai_trigger(dd->dai, cmd, dev->direction); + ret = dma_start(dd->chan->dma->z_dev, dd->chan->index); + if (ret < 0) + return ret; + } else { + dd->xrun = 0; + } + + dai_update_start_position(dev); + break; + case COMP_TRIGGER_XRUN: + comp_info(dev, "dai_comp_trigger_internal(), XRUN"); + dd->xrun = 1; + + COMPILER_FALLTHROUGH; + case COMP_TRIGGER_STOP: + comp_dbg(dev, "dai_comp_trigger_internal(), STOP"); +/* + * Some platforms cannot just simple disable + * DMA channel during the transfer, + * because it will hang the whole DMA controller. + * Therefore, stop the DMA first and let the DAI + * drain the FIFO in order to stop the channel + * as soon as possible. + */ +#if CONFIG_DMA_SUSPEND_DRAIN + ret = dma_stop(dd->chan->dma->z_dev, dd->chan->index); + dai_trigger(dd->dai, cmd, dev->direction); +#else + dai_trigger(dd->dai, cmd, dev->direction); + ret = dma_stop(dd->chan->dma->z_dev, dd->chan->index); +#endif + break; + case COMP_TRIGGER_PAUSE: + comp_dbg(dev, "dai_comp_trigger_internal(), PAUSE"); + ret = dma_suspend(dd->chan->dma->z_dev, dd->chan->index); + dai_trigger(dd->dai, cmd, dev->direction); + break; + case COMP_TRIGGER_PRE_START: + case COMP_TRIGGER_PRE_RELEASE: + /* only start the DAI if we are not XRUN handling */ + if (dd->xrun) + dd->xrun = 0; + else + dai_trigger(dd->dai, cmd, dev->direction); + break; + } + + return ret; +} + +static int dai_comp_trigger(struct comp_dev *dev, int cmd) +{ + struct dai_data *dd = comp_get_drvdata(dev); + struct dai_group *group = dd->group; + uint32_t irq_flags; + int ret = 0; + + /* DAI not in a group, use normal trigger */ + if (!group) { + comp_dbg(dev, "dai_comp_trigger(), non-atomic trigger"); + return dai_comp_trigger_internal(dev, cmd); + } + + /* DAI is grouped, so only trigger when the entire group is ready */ + + if (!group->trigger_counter) { + /* First DAI to receive the trigger command, + * prepare for atomic trigger + */ + comp_dbg(dev, "dai_comp_trigger(), begin atomic trigger for group %d", + group->group_id); + group->trigger_cmd = cmd; + group->trigger_counter = group->num_dais - 1; + } else if (group->trigger_cmd != cmd) { + /* Already processing a different trigger command */ + comp_err(dev, "dai_comp_trigger(), already processing atomic trigger"); + ret = -EAGAIN; + } else { + /* Count down the number of remaining DAIs required + * to receive the trigger command before atomic trigger + * takes place + */ + group->trigger_counter--; + comp_dbg(dev, "dai_comp_trigger(), trigger counter %d, group %d", + group->trigger_counter, group->group_id); + + if (!group->trigger_counter) { + /* The counter has reached 0, which means + * all DAIs have received the same trigger command + * and we may begin the actual trigger process + * synchronously. + */ + + irq_local_disable(irq_flags); + notifier_event(group, NOTIFIER_ID_DAI_TRIGGER, + BIT(cpu_get_id()), NULL, 0); + irq_local_enable(irq_flags); + + /* return error of last trigger */ + ret = group->trigger_ret; + } + } + + return ret; +} + +static void dai_atomic_trigger(void *arg, enum notify_id type, void *data) +{ + struct comp_dev *dev = arg; + struct dai_data *dd = comp_get_drvdata(dev); + struct dai_group *group = dd->group; + + /* Atomic context set by the last DAI to receive trigger command */ + group->trigger_ret = dai_comp_trigger_internal(dev, group->trigger_cmd); +} + +/* report xrun occurrence */ +static void dai_report_xrun(struct comp_dev *dev, uint32_t bytes) +{ + struct dai_data *dd = comp_get_drvdata(dev); + + if (dev->direction == SOF_IPC_STREAM_PLAYBACK) { + comp_err(dev, "dai_report_xrun(): underrun due to no data available"); + comp_underrun(dev, dd->local_buffer, bytes); + } else { + comp_err(dev, "dai_report_xrun(): overrun due to no space available"); + comp_overrun(dev, dd->local_buffer, bytes); + } +} + +/* copy and process stream data from source to sink buffers */ +static int dai_copy(struct comp_dev *dev) +{ + struct dai_data *dd = comp_get_drvdata(dev); + uint32_t dma_fmt = dd->dma_buffer->stream.frame_fmt; + const uint32_t sampling = get_sample_bytes(dma_fmt); + struct comp_buffer *buf = dd->local_buffer; + struct comp_buffer __sparse_cache *buf_c; + struct dma_status stat; + uint32_t avail_bytes = 0; + uint32_t free_bytes = 0; + uint32_t copy_bytes = 0; + uint32_t src_samples; + uint32_t sink_samples; + uint32_t samples; + int ret; + + comp_dbg(dev, "dai_copy()"); + + /* get data sizes from DMA */ + ret = dma_get_status(dd->chan->dma->z_dev, dd->chan->index, &stat); + if (ret < 0) { + dai_report_xrun(dev, 0); + return ret; + } + avail_bytes = stat.pending_length; + free_bytes = stat.free; + + buf_c = buffer_acquire(buf); + + /* calculate minimum size to copy */ + if (dev->direction == SOF_IPC_STREAM_PLAYBACK) { + src_samples = audio_stream_get_avail_samples(&buf_c->stream); + sink_samples = free_bytes / sampling; + samples = MIN(src_samples, sink_samples); + } else { + src_samples = avail_bytes / sampling; + sink_samples = audio_stream_get_free_samples(&buf_c->stream); + samples = MIN(src_samples, sink_samples); + } + + /* limit bytes per copy to one period for the whole pipeline + * in order to avoid high load spike + */ + samples = MIN(samples, dd->period_bytes / sampling); + + copy_bytes = samples * sampling; + + buffer_release(buf_c); + + comp_dbg(dev, "dai_copy(), dir: %d copy_bytes= 0x%x, frames= %d", + dev->direction, copy_bytes, + samples / buf->stream.channels); + + /* Check possibility of glitch occurrence */ + if (dev->direction == SOF_IPC_STREAM_PLAYBACK && + copy_bytes + avail_bytes < dd->period_bytes) + comp_warn(dev, "dai_copy(): Copy_bytes %d + avail bytes %d < period bytes %d, possible glitch", + copy_bytes, avail_bytes, dd->period_bytes); + else if (dev->direction == SOF_IPC_STREAM_CAPTURE && + copy_bytes + free_bytes < dd->period_bytes) + comp_warn(dev, "dai_copy(): Copy_bytes %d + free bytes %d < period bytes %d, possible glitch", + copy_bytes, free_bytes, dd->period_bytes); + + /* return if nothing to copy */ + if (!copy_bytes) { + comp_warn(dev, "dai_copy(): nothing to copy"); + return 0; + } + + struct dma_cb_data next = { + .channel = dd->chan, + .elem = { .size = copy_bytes }, + .status = DMA_CB_STATUS_END, + }; + + notifier_event(dd->chan, NOTIFIER_ID_DMA_COPY, + NOTIFIER_TARGET_CORE_LOCAL, &next, sizeof(next)); + + if (next.status == DMA_CB_STATUS_END) + dma_stop(dd->chan->dma->z_dev, dd->chan->index); + + ret = dma_reload(dd->chan->dma->z_dev, dd->chan->index, 0, 0, copy_bytes); + if (ret < 0) { + dai_report_xrun(dev, copy_bytes); + return ret; + } + + dai_dma_position_update(dev); + + return ret; +} + +/** + * \brief Get DAI parameters and configure timestamping + * \param[in, out] dev DAI device. + * \return Error code. + * + * This function retrieves various DAI parameters such as type, direction, index, and DMA + * controller information those are needed when configuring HW timestamping. Note that + * DAI must be prepared before this function is used (for DMA information). If not, an error + * is returned. + */ +static int dai_ts_config(struct comp_dev *dev) +{ + struct dai_data *dd = comp_get_drvdata(dev); + struct timestamp_cfg *cfg = &dd->ts_config; + struct ipc_config_dai *dai = &dd->ipc_config; + + comp_dbg(dev, "dai_ts_config()"); + if (!dd->chan) { + comp_err(dev, "dai_ts_config(), No DMA channel information"); + return -EINVAL; + } + + cfg->type = dd->dai->drv->type; + cfg->direction = dai->direction; + cfg->index = dd->dai->index; + cfg->dma_id = dd->dma->plat_data.id; + cfg->dma_chan_index = dd->chan->index; + cfg->dma_chan_count = dd->dma->plat_data.channels; + if (!dd->dai->drv->ts_ops.ts_config) + return -ENXIO; + + return dd->dai->drv->ts_ops.ts_config(dd->dai, cfg); +} + +static int dai_ts_start(struct comp_dev *dev) +{ + struct dai_data *dd = comp_get_drvdata(dev); + + comp_dbg(dev, "dai_ts_start()"); + if (!dd->dai->drv->ts_ops.ts_start) + return -ENXIO; + + return dd->dai->drv->ts_ops.ts_start(dd->dai, &dd->ts_config); +} + +static int dai_ts_stop(struct comp_dev *dev) +{ + struct dai_data *dd = comp_get_drvdata(dev); + + comp_dbg(dev, "dai_ts_stop()"); + if (!dd->dai->drv->ts_ops.ts_stop) + return -ENXIO; + + return dd->dai->drv->ts_ops.ts_stop(dd->dai, &dd->ts_config); +} + +static int dai_ts_get(struct comp_dev *dev, struct timestamp_data *tsd) +{ + struct dai_data *dd = comp_get_drvdata(dev); + + comp_dbg(dev, "dai_ts_get()"); + if (!dd->dai->drv->ts_ops.ts_get) + return -ENXIO; + + return dd->dai->drv->ts_ops.ts_get(dd->dai, &dd->ts_config, tsd); +} + +static const struct comp_driver comp_dai = { + .type = SOF_COMP_DAI, + .uid = SOF_RT_UUID(dai_comp_uuid), + .tctx = &dai_comp_tr, + .ops = { + .create = dai_new, + .free = dai_free, + .params = dai_params, + .dai_get_hw_params = dai_comp_get_hw_params, + .trigger = dai_comp_trigger, + .copy = dai_copy, + .prepare = dai_prepare, + .reset = dai_reset, + .dai_config = dai_config, + .position = dai_position, + .dai_ts_config = dai_ts_config, + .dai_ts_start = dai_ts_start, + .dai_ts_stop = dai_ts_stop, + .dai_ts_get = dai_ts_get, + }, +}; + +static SHARED_DATA struct comp_driver_info comp_dai_info = { + .drv = &comp_dai, +}; + +UT_STATIC void sys_comp_dai_init(void) +{ + comp_register(platform_shared_get(&comp_dai_info, + sizeof(comp_dai_info))); +} + +DECLARE_MODULE(sys_comp_dai_init); diff --git a/src/audio/host.c b/src/audio/host-legacy.c similarity index 97% rename from src/audio/host.c rename to src/audio/host-legacy.c index 7ee10e7d1102..4c893f7d4d85 100644 --- a/src/audio/host.c +++ b/src/audio/host-legacy.c @@ -145,14 +145,14 @@ static int host_dma_set_config_and_copy(struct comp_dev *dev, uint32_t bytes) local_elem->size = bytes; /* reconfigure transfer */ - ret = dma_set_config(hd->chan, &hd->config); + ret = dma_set_config_legacy(hd->chan, &hd->config); if (ret < 0) { comp_err(dev, "host_dma_set_config_and_copy(): dma_set_config() failed, ret = %d", ret); return ret; } - ret = dma_copy(hd->chan, bytes, DMA_COPY_ONE_SHOT | DMA_COPY_BLOCKING); + ret = dma_copy_legacy(hd->chan, bytes, DMA_COPY_ONE_SHOT | DMA_COPY_BLOCKING); if (ret < 0) { comp_err(dev, "host_dma_set_config_and_copy(): dma_copy() failed, ret = %d", ret); @@ -288,13 +288,13 @@ static int host_copy_one_shot(struct comp_dev *dev) } /* reconfigure transfer */ - ret = dma_set_config(hd->chan, &hd->config); + ret = dma_set_config_legacy(hd->chan, &hd->config); if (ret < 0) { comp_err(dev, "host_copy_one_shot(): dma_set_config() failed, ret = %u", ret); return ret; } - ret = dma_copy(hd->chan, copy_bytes, DMA_COPY_ONE_SHOT); + ret = dma_copy_legacy(hd->chan, copy_bytes, DMA_COPY_ONE_SHOT); if (ret < 0) { comp_err(dev, "host_copy_one_shot(): dma_copy() failed, ret = %u", ret); return ret; @@ -454,8 +454,7 @@ static uint32_t host_get_copy_bytes_normal(struct comp_dev *dev) int ret; /* get data sizes from DMA */ - ret = dma_get_data_size(hd->chan, &avail_bytes, - &free_bytes); + ret = dma_get_data_size_legacy(hd->chan, &avail_bytes, &free_bytes); if (ret < 0) { comp_err(dev, "host_get_copy_bytes_normal(): dma_get_data_size() failed, ret = %u", ret); @@ -513,7 +512,7 @@ static int host_copy_normal(struct comp_dev *dev) if (!copy_bytes) return 0; - ret = dma_copy(hd->chan, copy_bytes, flags); + ret = dma_copy_legacy(hd->chan, copy_bytes, flags); if (ret < 0) comp_err(dev, "host_copy_normal(): dma_copy() failed, ret = %u", ret); @@ -596,14 +595,14 @@ static int host_trigger(struct comp_dev *dev, int cmd) switch (cmd) { case COMP_TRIGGER_START: - ret = dma_start(hd->chan); + ret = dma_start_legacy(hd->chan); if (ret < 0) comp_err(dev, "host_trigger(): dma_start() failed, ret = %u", ret); break; case COMP_TRIGGER_STOP: case COMP_TRIGGER_XRUN: - ret = dma_stop(hd->chan); + ret = dma_stop_legacy(hd->chan); if (ret < 0) comp_err(dev, "host_trigger(): dma stop failed: %d", ret); @@ -870,16 +869,16 @@ static int host_params(struct comp_dev *dev, /* get DMA channel from DMAC * note: stream_tag is ignored by dw-dma */ - hd->chan = dma_channel_get(hd->dma, hd->stream_tag); + hd->chan = dma_channel_get_legacy(hd->dma, hd->stream_tag); if (!hd->chan) { comp_err(dev, "host_params(): hd->chan is NULL"); return -ENODEV; } - err = dma_set_config(hd->chan, &hd->config); + err = dma_set_config_legacy(hd->chan, &hd->config); if (err < 0) { comp_err(dev, "host_params(): dma_set_config() failed"); - dma_channel_put(hd->chan); + dma_channel_put_legacy(hd->chan); hd->chan = NULL; return err; } @@ -959,11 +958,11 @@ static int host_reset(struct comp_dev *dev) comp_dbg(dev, "host_reset()"); if (hd->chan) { - dma_stop_delayed(hd->chan); + dma_stop_delayed_legacy(hd->chan); /* remove callback */ notifier_unregister(dev, hd->chan, NOTIFIER_ID_DMA_COPY); - dma_channel_put(hd->chan); + dma_channel_put_legacy(hd->chan); hd->chan = NULL; } diff --git a/src/audio/host-zephyr.c b/src/audio/host-zephyr.c new file mode 100644 index 000000000000..704e0b05d0a9 --- /dev/null +++ b/src/audio/host-zephyr.c @@ -0,0 +1,1135 @@ +// SPDX-License-Identifier: BSD-3-Clause +// +// Copyright(c) 2016 Intel Corporation. All rights reserved. +// +// Author: Liam Girdwood +// Keyon Jie + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static const struct comp_driver comp_host; + +/* 8b9d100c-6d78-418f-90a3-e0e805d0852b */ +DECLARE_SOF_RT_UUID("host", host_uuid, 0x8b9d100c, 0x6d78, 0x418f, + 0x90, 0xa3, 0xe0, 0xe8, 0x05, 0xd0, 0x85, 0x2b); + +DECLARE_TR_CTX(host_tr, SOF_UUID(host_uuid), LOG_LEVEL_INFO); + +/** \brief Host copy function interface. */ +typedef int (*host_copy_func)(struct comp_dev *dev); + +/** + * \brief Host buffer info. + */ +struct hc_buf { + struct dma_sg_elem_array elem_array; /**< array of SG elements */ + uint32_t current; /**< index of current element */ + uint32_t current_end; +}; + +/** + * \brief Host component data. + * + * Host reports local position in the host buffer every params.host_period_bytes + * if the latter is != 0. report_pos is used to track progress since the last + * multiple of host_period_bytes. + * + * host_size is the host buffer size (in bytes) specified in the IPC parameters. + */ +struct host_data { + /* local DMA config */ + struct dma *dma; + struct dma_chan_data *chan; + struct dma_sg_config config; + struct dma_config z_config; + struct comp_buffer *dma_buffer; + struct comp_buffer *local_buffer; + + /* host position reporting related */ + uint32_t host_size; /**< Host buffer size (in bytes) */ + uint32_t report_pos; /**< Position in current report period */ + uint32_t local_pos; /**< Local position in host buffer */ + uint32_t host_period_bytes; + uint16_t stream_tag; + uint16_t no_stream_position; /**< 1 means don't send stream position */ + uint8_t cont_update_posn; /**< 1 means continuous update stream position */ + + /* host component attributes */ + enum comp_copy_type copy_type; /**< Current host copy type */ + + /* local and host DMA buffer info */ + struct hc_buf host; + struct hc_buf local; + + /* pointers set during params to host or local above */ + struct hc_buf *source; + struct hc_buf *sink; + + uint32_t dma_copy_align; /**< Minimal chunk of data possible to be + * copied by dma connected to host + */ + uint32_t period_bytes; /**< number of bytes per one period */ + + host_copy_func copy; /**< host copy function */ + pcm_converter_func process; /**< processing function */ + + /* IPC host init info */ + struct ipc_config_host ipc_host; + + /* stream info */ + struct sof_ipc_stream_posn posn; /* TODO: update this */ + struct ipc_msg *msg; /**< host notification */ +}; + +static inline struct dma_sg_elem *next_buffer(struct hc_buf *hc) +{ + if (!hc->elem_array.elems || !hc->elem_array.count) + return NULL; + if (++hc->current == hc->elem_array.count) + hc->current = 0; + return hc->elem_array.elems + hc->current; +} + +static uint32_t host_dma_get_split(struct host_data *hd, uint32_t bytes) +{ + struct dma_sg_elem *local_elem = hd->config.elem_array.elems; + uint32_t split_src = 0; + uint32_t split_dst = 0; + + if (local_elem->src + bytes > hd->source->current_end) + split_src = bytes - + (hd->source->current_end - local_elem->src); + + if (local_elem->dest + bytes > hd->sink->current_end) + split_dst = bytes - + (hd->sink->current_end - local_elem->dest); + + /* get max split, so the current copy will be minimum */ + return MAX(split_src, split_dst); +} + +#if CONFIG_FORCE_DMA_COPY_WHOLE_BLOCK + +static int host_dma_set_config_and_copy(struct comp_dev *dev, uint32_t bytes) +{ + struct host_data *hd = comp_get_drvdata(dev); + struct dma_sg_elem *local_elem = hd->config.elem_array.elems; + int ret; + + local_elem->size = bytes; + + /* reconfigure transfer */ + ret = dma_config(hd->chan->dma->z_dev, hd->chan->index, &hd->z_config); + if (ret < 0) { + comp_err(dev, "host_dma_set_config_and_copy(): dma_config() failed, ret = %d", + ret); + return ret; + } + + struct dma_cb_data next = { + .channel = hd->chan, + .elem = { .size = bytes }, + }; + notifier_event(hd->chan, NOTIFIER_ID_DMA_COPY, + NOTIFIER_TARGET_CORE_LOCAL, &next, sizeof(next)); + ret = dma_reload(hd->chan->dma->z_dev, hd->chan->index, 0, 0, bytes); + if (ret < 0) { + comp_err(dev, "host_dma_set_config_and_copy(): dma_copy() failed, ret = %d", + ret); + return ret; + } + + return ret; +} + +/** + * Calculates bytes to be copied in one shot mode. + * @param dev Host component device. + * @return Bytes to be copied. + */ +static uint32_t host_get_copy_bytes_one_shot(struct comp_dev *dev) +{ + struct host_data *hd = comp_get_drvdata(dev); + struct comp_buffer *buffer = hd->local_buffer; + struct comp_buffer __sparse_cache *buffer_c; + uint32_t copy_bytes; + + buffer_c = buffer_acquire(buffer); + + /* calculate minimum size to copy */ + if (dev->direction == SOF_IPC_STREAM_PLAYBACK) + copy_bytes = audio_stream_get_free_bytes(&buffer_c->stream); + else + copy_bytes = audio_stream_get_avail_bytes(&buffer_c->stream); + + buffer_release(buffer_c); + + /* copy_bytes should be aligned to minimum possible chunk of + * data to be copied by dma. + */ + copy_bytes = ALIGN_DOWN(copy_bytes, hd->dma_copy_align); + + return copy_bytes; +} + +/** + * Performs copy operation for host component working in one shot mode. + * It means DMA needs to be reconfigured after every transfer. + * @param dev Host component device. + * @return 0 if succeeded, error code otherwise. + */ +static int host_copy_one_shot(struct comp_dev *dev) +{ + struct host_data *hd = comp_get_drvdata(dev); + uint32_t copy_bytes; + uint32_t split_value; + int ret = 0; + + comp_dbg(dev, "host_copy_one_shot()"); + + copy_bytes = host_get_copy_bytes_one_shot(dev); + if (!copy_bytes) { + comp_info(dev, "host_copy_one_shot(): no bytes to copy"); + return ret; + } + + while (copy_bytes) { + /* get split value */ + split_value = host_dma_get_split(hd, copy_bytes); + copy_bytes -= split_value; + + ret = host_dma_set_config_and_copy(dev, copy_bytes); + if (ret < 0) + return ret; + + /* update copy bytes */ + copy_bytes = split_value; + } + + return ret; +} + +#else /* CONFIG_FORCE_DMA_COPY_WHOLE_BLOCK */ + +/** + * Calculates bytes to be copied in one shot mode. + * @param dev Host component device. + * @return Bytes to be copied. + */ +static uint32_t host_get_copy_bytes_one_shot(struct comp_dev *dev) +{ + struct host_data *hd = comp_get_drvdata(dev); + struct dma_sg_elem *local_elem = hd->config.elem_array.elems; + struct comp_buffer *buffer = hd->local_buffer; + struct comp_buffer __sparse_cache *buffer_c; + uint32_t copy_bytes; + uint32_t split_value; + + buffer_c = buffer_acquire(buffer); + + /* calculate minimum size to copy */ + if (dev->direction == SOF_IPC_STREAM_PLAYBACK) + copy_bytes = audio_stream_get_free_bytes(&buffer_c->stream); + else + copy_bytes = audio_stream_get_avail_bytes(&buffer_c->stream); + + buffer_release(buffer_c); + + /* copy_bytes should be aligned to minimum possible chunk of + * data to be copied by dma. + */ + copy_bytes = ALIGN_DOWN(copy_bytes, hd->dma_copy_align); + + split_value = host_dma_get_split(hd, copy_bytes); + if (split_value) + copy_bytes -= split_value; + + local_elem->size = copy_bytes; + + return copy_bytes; +} + +/** + * Performs copy operation for host component working in one shot mode. + * It means DMA needs to be reconfigured after every transfer. + * @param dev Host component device. + * @return 0 if succeeded, error code otherwise. + */ +static int host_copy_one_shot(struct comp_dev *dev) +{ + struct host_data *hd = comp_get_drvdata(dev); + uint32_t copy_bytes; + int ret = 0; + + comp_dbg(dev, "host_copy_one_shot()"); + + copy_bytes = host_get_copy_bytes_one_shot(dev); + if (!copy_bytes) { + comp_info(dev, "host_copy_one_shot(): no bytes to copy"); + return ret; + } + + /* reconfigure transfer */ + ret = dma_config(hd->chan->dma->z_dev, hd->chan->index, &hd->z_config); + if (ret < 0) { + comp_err(dev, "host_copy_one_shot(): dma_config() failed, ret = %u", ret); + return ret; + } + + struct dma_cb_data next = { + .channel = hd->chan, + .elem = { .size = copy_bytes }, + }; + notifier_event(hd->chan, NOTIFIER_ID_DMA_COPY, + NOTIFIER_TARGET_CORE_LOCAL, &next, sizeof(next)); + ret = dma_reload(hd->chan->dma->z_dev, hd->chan->index, 0, 0, copy_bytes); + if (ret < 0) { + comp_err(dev, "host_copy_one_shot(): dma_copy() failed, ret = %u", ret); + return ret; + } + + return ret; +} +#endif + +static void host_update_position(struct comp_dev *dev, uint32_t bytes) +{ + struct host_data *hd = comp_get_drvdata(dev); + struct comp_buffer *source; + struct comp_buffer *sink; + int ret; + bool update_mailbox = false; + bool send_ipc = false; + + if (dev->direction == SOF_IPC_STREAM_PLAYBACK) + ret = dma_buffer_copy_from(hd->dma_buffer, hd->local_buffer, + hd->process, bytes); + else + ret = dma_buffer_copy_to(hd->local_buffer, hd->dma_buffer, + hd->process, bytes); + + /* assert dma_buffer_copy succeed */ + if (ret < 0) { + source = dev->direction == SOF_IPC_STREAM_PLAYBACK ? + hd->dma_buffer : hd->local_buffer; + sink = dev->direction == SOF_IPC_STREAM_PLAYBACK ? + hd->local_buffer : hd->dma_buffer; + comp_err(dev, "host_update_position() dma buffer copy failed, dir %d bytes %d avail %d free %d", + dev->direction, bytes, + audio_stream_get_avail_samples(&source->stream) * + audio_stream_frame_bytes(&source->stream), + audio_stream_get_free_samples(&sink->stream) * + audio_stream_frame_bytes(&sink->stream)); + return; + } + + dev->position += bytes; + + /* new local period, update host buffer position blks + * local_pos is queried by the ops.position() API + */ + hd->local_pos += bytes; + + /* buffer overlap, hardcode host buffer size at the moment */ + if (hd->local_pos >= hd->host_size) +#if CONFIG_WRAP_ACTUAL_POSITION + hd->local_pos %= hd->host_size; +#else + hd->local_pos = 0; +#endif + if (hd->cont_update_posn) + update_mailbox = true; + + /* Don't send stream position if no_stream_position == 1 */ + if (!hd->no_stream_position) { + hd->report_pos += bytes; + + /* host_period_bytes is set to zero to disable position update + * by IPC for FW version before 3.11, so send IPC message to + * driver according to this condition and report_pos. + */ + if (hd->host_period_bytes != 0 && + hd->report_pos >= hd->host_period_bytes) { + hd->report_pos = 0; + + /* send timestamped position to host + * (updates position first, by calling ops.position()) + */ + update_mailbox = true; + send_ipc = true; + } + } + + if (update_mailbox) { + pipeline_get_timestamp(dev->pipeline, dev, &hd->posn); + mailbox_stream_write(dev->pipeline->posn_offset, + &hd->posn, sizeof(hd->posn)); + if (send_ipc) + ipc_msg_send(hd->msg, &hd->posn, false); + } +} + +/* The host memory is not guaranteed to be continuous and also not guaranteed + * to have a period/buffer size that is a multiple of the DSP period size. + * This means we must check we do not overflow host period/buffer/page + * boundaries on each transfer and split the DMA transfer if we do overflow. + */ +static void host_one_shot_cb(struct comp_dev *dev, uint32_t bytes) +{ + struct host_data *hd = comp_get_drvdata(dev); + struct dma_sg_elem *local_elem = hd->config.elem_array.elems; + struct dma_sg_elem *source_elem; + struct dma_sg_elem *sink_elem; + + /* update src and dest positions and check for overflow */ + local_elem->src += bytes; + local_elem->dest += bytes; + + if (local_elem->src == hd->source->current_end) { + /* end of element, so use next */ + source_elem = next_buffer(hd->source); + if (source_elem) { + hd->source->current_end = source_elem->src + + source_elem->size; + local_elem->src = source_elem->src; + } + } + + if (local_elem->dest == hd->sink->current_end) { + /* end of element, so use next */ + sink_elem = next_buffer(hd->sink); + if (sink_elem) { + hd->sink->current_end = sink_elem->dest + + sink_elem->size; + local_elem->dest = sink_elem->dest; + } + } +} + +/* This is called by DMA driver every time when DMA completes its current + * transfer between host and DSP. + */ +static void host_dma_cb(void *arg, enum notify_id type, void *data) +{ + struct dma_cb_data *next = data; + struct comp_dev *dev = arg; + struct host_data *hd = comp_get_drvdata(dev); + uint32_t bytes = next->elem.size; + + comp_cl_dbg(&comp_host, "host_dma_cb() %p", &comp_host); + + /* update position */ + host_update_position(dev, bytes); + + /* callback for one shot copy */ + if (hd->copy_type == COMP_COPY_ONE_SHOT) + host_one_shot_cb(dev, bytes); +} + +/** + * Calculates bytes to be copied in normal mode. + * @param dev Host component device. + * @return Bytes to be copied. + */ +static uint32_t host_get_copy_bytes_normal(struct comp_dev *dev) +{ + struct host_data *hd = comp_get_drvdata(dev); + struct comp_buffer *buffer = hd->local_buffer; + struct comp_buffer __sparse_cache *buffer_c; + struct dma_status stat; + uint32_t avail_bytes; + uint32_t free_bytes; + uint32_t copy_bytes; + int ret; + + /* get data sizes from DMA */ + ret = dma_get_status(hd->chan->dma->z_dev, hd->chan->index, &stat); + if (ret < 0) { + comp_err(dev, "host_get_copy_bytes_normal(): dma_get_status() failed, ret = %u", + ret); + /* return 0 copy_bytes in case of error to skip DMA copy */ + return 0; + } + avail_bytes = stat.pending_length; + free_bytes = stat.free; + + buffer_c = buffer_acquire(buffer); + + /* calculate minimum size to copy */ + if (dev->direction == SOF_IPC_STREAM_PLAYBACK) { + /* limit bytes per copy to one period for the whole pipeline + * in order to avoid high load spike + */ + free_bytes = audio_stream_get_free_bytes(&buffer_c->stream); + copy_bytes = MIN(hd->period_bytes, MIN(avail_bytes, free_bytes)); + if (!copy_bytes) + comp_info(dev, "no bytes to copy, %d free in buffer, %d available in DMA", + free_bytes, avail_bytes); + } else { + avail_bytes = audio_stream_get_avail_bytes(&buffer_c->stream); + copy_bytes = MIN(avail_bytes, free_bytes); + if (!copy_bytes) + comp_info(dev, "no bytes to copy, %d avail in buffer, %d free in DMA", + avail_bytes, free_bytes); + } + + buffer_release(buffer_c); + + /* copy_bytes should be aligned to minimum possible chunk of + * data to be copied by dma. + */ + return ALIGN_DOWN(copy_bytes, hd->dma_copy_align); +} + +/** + * Performs copy operation for host component working in normal mode. + * It means DMA works continuously and doesn't need reconfiguration. + * @param dev Host component device. + * @return 0 if succeeded, error code otherwise. + */ +static int host_copy_normal(struct comp_dev *dev) +{ + struct host_data *hd = comp_get_drvdata(dev); + uint32_t copy_bytes; + uint32_t flags = 0; + int ret; + + comp_dbg(dev, "host_copy_normal()"); + + if (hd->copy_type == COMP_COPY_BLOCKING) + flags |= DMA_COPY_BLOCKING; + + copy_bytes = host_get_copy_bytes_normal(dev); + if (!copy_bytes) + return 0; + + struct dma_cb_data next = { + .channel = hd->chan, + .elem = { .size = copy_bytes }, + }; + notifier_event(hd->chan, NOTIFIER_ID_DMA_COPY, + NOTIFIER_TARGET_CORE_LOCAL, &next, sizeof(next)); + ret = dma_reload(hd->chan->dma->z_dev, hd->chan->index, 0, 0, copy_bytes); + if (ret < 0) + comp_err(dev, "host_copy_normal(): dma_copy() failed, ret = %u", ret); + + return ret; +} + +static int create_local_elems(struct comp_dev *dev, uint32_t buffer_count, + uint32_t buffer_bytes) +{ + struct host_data *hd = comp_get_drvdata(dev); + struct dma_sg_elem_array *elem_array; + uint32_t dir; + int err; + + dir = dev->direction == SOF_IPC_STREAM_PLAYBACK ? + DMA_DIR_HMEM_TO_LMEM : DMA_DIR_LMEM_TO_HMEM; + + /* if host buffer set we need to allocate local buffer */ + if (hd->host.elem_array.count) { + elem_array = &hd->local.elem_array; + + /* config buffer will be used as proxy */ + err = dma_sg_alloc(&hd->config.elem_array, SOF_MEM_ZONE_RUNTIME, + dir, 1, 0, 0, 0); + if (err < 0) { + comp_err(dev, "create_local_elems(): dma_sg_alloc() failed"); + return err; + } + } else { + elem_array = &hd->config.elem_array; + } + + err = dma_sg_alloc(elem_array, SOF_MEM_ZONE_RUNTIME, dir, buffer_count, + buffer_bytes, + (uintptr_t)(hd->dma_buffer->stream.addr), 0); + if (err < 0) { + comp_err(dev, "create_local_elems(): dma_sg_alloc() failed"); + return err; + } + + return 0; +} + +/** + * \brief Command handler. + * \param[in,out] dev Device + * \param[in] cmd Command + * \return 0 if successful, error code otherwise. + * + * Used to pass standard and bespoke commands (with data) to component. + * This function is common for all dma types, with one exception: + * dw-dma is run on demand, so no start()/stop() is issued. + */ +static int host_trigger(struct comp_dev *dev, int cmd) +{ + struct host_data *hd = comp_get_drvdata(dev); + int ret; + + comp_dbg(dev, "host_trigger()"); + + ret = comp_set_state(dev, cmd); + if (ret < 0) + return ret; + + if (ret == COMP_STATUS_STATE_ALREADY_SET) + return PPL_STATUS_PATH_STOP; + + /* we should ignore any trigger commands besides start + * when doing one shot, because transfers will stop automatically + */ + if (cmd != COMP_TRIGGER_START && hd->copy_type == COMP_COPY_ONE_SHOT) + return ret; + + if (!hd->chan) { + comp_err(dev, "host_trigger(): no dma channel configured"); + return -EINVAL; + } + + switch (cmd) { + case COMP_TRIGGER_START: + ret = dma_start(hd->chan->dma->z_dev, hd->chan->index); + if (ret < 0) + comp_err(dev, "host_trigger(): dma_start() failed, ret = %u", + ret); + break; + case COMP_TRIGGER_STOP: + case COMP_TRIGGER_XRUN: + ret = dma_stop(hd->chan->dma->z_dev, hd->chan->index); + if (ret < 0) + comp_err(dev, "host_trigger(): dma stop failed: %d", + ret); + break; + default: + break; + } + + return ret; +} + +static struct comp_dev *host_new(const struct comp_driver *drv, + struct comp_ipc_config *config, + void *spec) +{ + struct comp_dev *dev; + struct host_data *hd; + struct ipc_config_host *ipc_host = spec; + uint32_t dir; + + comp_cl_dbg(&comp_host, "host_new()"); + + dev = comp_alloc(drv, sizeof(*dev)); + if (!dev) + return NULL; + dev->ipc_config = *config; + + hd = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(*hd)); + if (!hd) { + rfree(dev); + return NULL; + } + + comp_set_drvdata(dev, hd); + hd->ipc_host = *ipc_host; + + /* request HDA DMA with shared access privilege */ + dir = hd->ipc_host.direction == SOF_IPC_STREAM_PLAYBACK ? + DMA_DIR_HMEM_TO_LMEM : DMA_DIR_LMEM_TO_HMEM; + + hd->dma = dma_get(dir, 0, DMA_DEV_HOST, DMA_ACCESS_SHARED); + if (!hd->dma) { + comp_err(dev, "host_new(): dma_get() returned NULL"); + rfree(hd); + rfree(dev); + return NULL; + } + + /* init buffer elems */ + dma_sg_init(&hd->config.elem_array); + dma_sg_init(&hd->host.elem_array); + dma_sg_init(&hd->local.elem_array); + + ipc_build_stream_posn(&hd->posn, SOF_IPC_STREAM_POSITION, dev->ipc_config.id); + + hd->msg = ipc_msg_init(hd->posn.rhdr.hdr.cmd, sizeof(hd->posn)); + if (!hd->msg) { + comp_err(dev, "host_new(): ipc_msg_init failed"); + dma_put(hd->dma); + rfree(hd); + rfree(dev); + return NULL; + } + + hd->chan = NULL; + hd->copy_type = COMP_COPY_NORMAL; + dev->state = COMP_STATE_READY; + + return dev; +} + +static void host_free(struct comp_dev *dev) +{ + struct host_data *hd = comp_get_drvdata(dev); + + comp_info(dev, "host_free()"); + + dma_put(hd->dma); + + ipc_msg_free(hd->msg); + dma_sg_free(&hd->config.elem_array); + rfree(hd); + rfree(dev); +} + +static int host_elements_reset(struct comp_dev *dev) +{ + struct host_data *hd = comp_get_drvdata(dev); + struct dma_sg_elem *source_elem; + struct dma_sg_elem *sink_elem; + struct dma_sg_elem *local_elem; + + /* setup elem to point to first source elem */ + source_elem = hd->source->elem_array.elems; + if (source_elem) { + hd->source->current = 0; + hd->source->current_end = source_elem->src + source_elem->size; + } + + /* setup elem to point to first sink elem */ + sink_elem = hd->sink->elem_array.elems; + if (sink_elem) { + hd->sink->current = 0; + hd->sink->current_end = sink_elem->dest + sink_elem->size; + } + + /* local element */ + if (source_elem && sink_elem) { + local_elem = hd->config.elem_array.elems; + local_elem->dest = sink_elem->dest; + local_elem->size = + dev->direction == SOF_IPC_STREAM_PLAYBACK ? + sink_elem->size : source_elem->size; + local_elem->src = source_elem->src; + } + + return 0; +} + +static int host_verify_params(struct comp_dev *dev, + struct sof_ipc_stream_params *params) +{ + int ret; + + comp_dbg(dev, "host_verify_params()"); + + ret = comp_verify_params(dev, 0, params); + if (ret < 0) { + comp_err(dev, "host_verify_params(): comp_verify_params() failed"); + return ret; + } + + return 0; +} + +/* configure the DMA params and descriptors for host buffer IO */ +static int host_params(struct comp_dev *dev, + struct sof_ipc_stream_params *params) +{ + struct host_data *hd = comp_get_drvdata(dev); + struct dma_sg_config *config = &hd->config; + struct dma_sg_elem *sg_elem; + struct dma_config *dma_cfg = &hd->z_config; + struct dma_block_config dma_block_cfg; + uint32_t period_count; + uint32_t period_bytes; + uint32_t buffer_size; + uint32_t addr_align; + uint32_t align; + int i, channel, err; + + comp_dbg(dev, "host_params()"); + + if (dev->direction == SOF_IPC_STREAM_PLAYBACK) + hd->local_buffer = list_first_item(&dev->bsink_list, + struct comp_buffer, + source_list); + else + hd->local_buffer = list_first_item(&dev->bsource_list, + struct comp_buffer, + sink_list); + + err = host_verify_params(dev, params); + if (err < 0) { + comp_err(dev, "host_params(): pcm params verification failed."); + return -EINVAL; + } + + /* host params always installed by pipeline IPC */ + hd->host_size = params->buffer.size; + hd->stream_tag = params->stream_tag; + hd->no_stream_position = params->no_stream_position; + hd->host_period_bytes = params->host_period_bytes; + hd->cont_update_posn = params->cont_update_posn; + + /* retrieve DMA buffer address alignment */ + err = dma_get_attribute(hd->dma, DMA_ATTR_BUFFER_ADDRESS_ALIGNMENT, + &addr_align); + if (err < 0) { + comp_err(dev, "host_params(): could not get dma buffer address alignment, err = %d", + err); + return err; + } + + /* retrieve DMA buffer size alignment */ + err = dma_get_attribute(hd->dma, DMA_ATTR_BUFFER_ALIGNMENT, &align); + if (err < 0 || !align) { + comp_err(dev, "host_params(): could not get valid dma buffer alignment, err = %d, align = %u", + err, align); + return -EINVAL; + } + + /* retrieve DMA buffer period count */ + err = dma_get_attribute(hd->dma, DMA_ATTR_BUFFER_PERIOD_COUNT, + &period_count); + if (err < 0 || !period_count) { + comp_err(dev, "host_params(): could not get valid dma buffer period count, err = %d, period_count = %u", + err, period_count); + return -EINVAL; + } + + period_bytes = dev->frames * + audio_stream_frame_bytes(&hd->local_buffer->stream); + + if (!period_bytes) { + comp_err(dev, "host_params(): invalid period_bytes"); + return -EINVAL; + } + + /* determine source and sink buffer elements */ + if (dev->direction == SOF_IPC_STREAM_PLAYBACK) { + config->direction = DMA_DIR_HMEM_TO_LMEM; + hd->source = &hd->host; + hd->sink = &hd->local; + } else { + config->direction = DMA_DIR_LMEM_TO_HMEM; + hd->source = &hd->local; + hd->sink = &hd->host; + } + + /* TODO: should be taken from DMA */ + if (hd->host.elem_array.count) { + period_bytes *= period_count; + period_count = 1; + } + + /* calculate DMA buffer size */ + buffer_size = ALIGN_UP(period_bytes, align) * period_count; + + /* alloc DMA buffer or change its size if exists */ + if (hd->dma_buffer) { + err = buffer_set_size(hd->dma_buffer, buffer_size); + if (err < 0) { + comp_err(dev, "host_params(): buffer_set_size() failed, buffer_size = %u", + buffer_size); + return err; + } + } else { + hd->dma_buffer = buffer_alloc(buffer_size, SOF_MEM_CAPS_DMA, + addr_align); + if (!hd->dma_buffer) { + comp_err(dev, "host_params(): failed to alloc dma buffer"); + return -ENOMEM; + } + + buffer_set_params(hd->dma_buffer, params, BUFFER_UPDATE_FORCE); + } + + /* create SG DMA elems for local DMA buffer */ + err = create_local_elems(dev, period_count, buffer_size / period_count); + if (err < 0) + return err; + + /* set up DMA configuration - copy in sample bytes. */ + config->src_width = + audio_stream_sample_bytes(&hd->local_buffer->stream); + config->dest_width = + audio_stream_sample_bytes(&hd->local_buffer->stream); + config->cyclic = 0; + config->irq_disabled = pipeline_is_timer_driven(dev->pipeline); + config->is_scheduling_source = comp_is_scheduling_source(dev); + config->period = dev->pipeline->period; + + host_elements_reset(dev); + + hd->stream_tag -= 1; + uint32_t hda_chan = hd->stream_tag; + /* get DMA channel from DMAC + * note: stream_tag is ignored by dw-dma + */ + channel = dma_request_channel(hd->dma->z_dev, &hda_chan); + if (channel < 0) { + comp_err(dev, "host_params(): hd->chan is NULL"); + return -ENODEV; + } + hd->chan = &hd->dma->chan[channel]; + + uint32_t buffer_addr = 0; + uint32_t buffer_bytes = 0; + uint32_t addr; + + hd->chan->direction = config->direction; + hd->chan->desc_count = config->elem_array.count; + hd->chan->is_scheduling_source = config->is_scheduling_source; + hd->chan->period = config->period; + + memset(dma_cfg, 0, sizeof(*dma_cfg)); + + dma_cfg->block_count = 1; + dma_cfg->source_data_size = config->src_width; + dma_cfg->dest_data_size = config->dest_width; + dma_cfg->head_block = &dma_block_cfg; + + for (i = 0; i < config->elem_array.count; i++) { + sg_elem = config->elem_array.elems + i; + + if (config->direction == DMA_DIR_HMEM_TO_LMEM || + config->direction == DMA_DIR_DEV_TO_MEM) + addr = sg_elem->dest; + else + addr = sg_elem->src; + + buffer_bytes += sg_elem->size; + + if (buffer_addr == 0) + buffer_addr = addr; + } + + dma_block_cfg.block_size = buffer_bytes; + + switch (config->direction) { + case DMA_DIR_LMEM_TO_HMEM: + dma_cfg->channel_direction = MEMORY_TO_HOST; + dma_block_cfg.source_address = buffer_addr; + break; + case DMA_DIR_HMEM_TO_LMEM: + dma_cfg->channel_direction = HOST_TO_MEMORY; + dma_block_cfg.dest_address = buffer_addr; + break; + } + + err = dma_config(hd->chan->dma->z_dev, hd->chan->index, dma_cfg); + if (err < 0) { + comp_err(dev, "host_params(): dma_config() failed"); + dma_release_channel(hd->dma->z_dev, hd->chan->index); + hd->chan = NULL; + return err; + } + + err = dma_get_attribute(hd->dma, DMA_ATTR_COPY_ALIGNMENT, + &hd->dma_copy_align); + + if (err < 0) { + comp_err(dev, "host_params(): dma_get_attribute()"); + + return err; + } + + /* minimal copied data shouldn't be less than alignment */ + hd->period_bytes = ALIGN_UP(period_bytes, hd->dma_copy_align); + + /* set up callback */ + notifier_register(dev, hd->chan, NOTIFIER_ID_DMA_COPY, host_dma_cb, 0); + + /* set copy function */ + hd->copy = hd->copy_type == COMP_COPY_ONE_SHOT ? host_copy_one_shot : + host_copy_normal; + + /* set processing function */ + hd->process = + pcm_get_conversion_function(hd->local_buffer->stream.frame_fmt, + hd->local_buffer->stream.frame_fmt); + + return 0; +} + +static int host_prepare(struct comp_dev *dev) +{ + struct host_data *hd = comp_get_drvdata(dev); + int ret; + + comp_dbg(dev, "host_prepare()"); + + ret = comp_set_state(dev, COMP_TRIGGER_PREPARE); + if (ret < 0) + return ret; + + if (ret == COMP_STATUS_STATE_ALREADY_SET) + return PPL_STATUS_PATH_STOP; + + buffer_zero(hd->dma_buffer); + return 0; +} + +static int host_pointer_reset(struct comp_dev *dev) +{ + struct host_data *hd = comp_get_drvdata(dev); + + /* reset buffer pointers */ + hd->local_pos = 0; + hd->report_pos = 0; + dev->position = 0; + + return 0; +} + +static int host_position(struct comp_dev *dev, + struct sof_ipc_stream_posn *posn) +{ + struct host_data *hd = comp_get_drvdata(dev); + + /* TODO: improve accuracy by adding current DMA position */ + posn->host_posn = hd->local_pos; + + return 0; +} + +static int host_reset(struct comp_dev *dev) +{ + struct host_data *hd = comp_get_drvdata(dev); + + comp_dbg(dev, "host_reset()"); + + if (hd->chan) { + dma_stop(hd->chan->dma->z_dev, hd->chan->index); + + /* remove callback */ + notifier_unregister(dev, hd->chan, NOTIFIER_ID_DMA_COPY); + dma_release_channel(hd->dma->z_dev, hd->chan->index); + hd->chan = NULL; + } + + /* free all DMA elements */ + dma_sg_free(&hd->host.elem_array); + dma_sg_free(&hd->local.elem_array); + dma_sg_free(&hd->config.elem_array); + + /* free DMA buffer */ + if (hd->dma_buffer) { + buffer_free(hd->dma_buffer); + hd->dma_buffer = NULL; + } + + host_pointer_reset(dev); + hd->copy_type = COMP_COPY_NORMAL; + hd->source = NULL; + hd->sink = NULL; + dev->state = COMP_STATE_READY; + + return 0; +} + +/* copy and process stream data from source to sink buffers */ +static int host_copy(struct comp_dev *dev) +{ + struct host_data *hd = comp_get_drvdata(dev); + + if (dev->state != COMP_STATE_ACTIVE) + return 0; + + return hd->copy(dev); +} + +static int host_get_attribute(struct comp_dev *dev, uint32_t type, + void *value) +{ + struct host_data *hd = comp_get_drvdata(dev); + + switch (type) { + case COMP_ATTR_COPY_TYPE: + *(enum comp_copy_type *)value = hd->copy_type; + break; + case COMP_ATTR_COPY_DIR: + *(uint32_t *)value = hd->ipc_host.direction; + break; + default: + return -EINVAL; + } + + return 0; +} + +static int host_set_attribute(struct comp_dev *dev, uint32_t type, + void *value) +{ + struct host_data *hd = comp_get_drvdata(dev); + + switch (type) { + case COMP_ATTR_COPY_TYPE: + hd->copy_type = *(enum comp_copy_type *)value; + break; + case COMP_ATTR_HOST_BUFFER: + hd->host.elem_array = *(struct dma_sg_elem_array *)value; + break; + default: + return -EINVAL; + } + + return 0; +} + +static const struct comp_driver comp_host = { + .type = SOF_COMP_HOST, + .uid = SOF_RT_UUID(host_uuid), + .tctx = &host_tr, + .ops = { + .create = host_new, + .free = host_free, + .params = host_params, + .reset = host_reset, + .trigger = host_trigger, + .copy = host_copy, + .prepare = host_prepare, + .position = host_position, + .get_attribute = host_get_attribute, + .set_attribute = host_set_attribute, + }, +}; + +static SHARED_DATA struct comp_driver_info comp_host_info = { + .drv = &comp_host, +}; + +UT_STATIC void sys_comp_host_init(void) +{ + comp_register(platform_shared_get(&comp_host_info, + sizeof(comp_host_info))); +} + +DECLARE_MODULE(sys_comp_host_init); diff --git a/src/drivers/dw/ssi-spi.c b/src/drivers/dw/ssi-spi.c index 54b85742372b..42ef1c67bc22 100644 --- a/src/drivers/dw/ssi-spi.c +++ b/src/drivers/dw/ssi-spi.c @@ -180,7 +180,7 @@ static int spi_trigger(struct spi *spi, int cmd, int direction) switch (cmd) { case SPI_TRIGGER_START: /* trigger the SPI-Slave + DMA + INT + Receiving */ - ret = dma_start(spi->chan[direction]); + ret = dma_start_legacy(spi->chan[direction]); if (ret < 0) return ret; @@ -196,7 +196,7 @@ static int spi_trigger(struct spi *spi, int cmd, int direction) case SPI_TRIGGER_STOP: /* Stop the SPI-Slave */ spi_stop(spi); - dma_stop(spi->chan[direction]); + dma_stop_legacy(spi->chan[direction]); break; default: @@ -280,7 +280,7 @@ static int spi_slave_dma_set_config(struct spi *spi, config.elem_array.count = 1; config.elem_array.elems = &local_sg_elem; - return dma_set_config(chan, &config); + return dma_set_config_legacy(chan, &config); } static int spi_set_config(struct spi *spi, @@ -435,11 +435,11 @@ int spi_probe(struct spi *spi) if (!spi->dma[SPI_DIR_TX]) return -ENODEV; - spi->chan[SPI_DIR_RX] = dma_channel_get(spi->dma[SPI_DIR_RX], 0); + spi->chan[SPI_DIR_RX] = dma_channel_get_legacy(spi->dma[SPI_DIR_RX], 0); if (!spi->chan[SPI_DIR_RX]) return -ENODEV; - spi->chan[SPI_DIR_TX] = dma_channel_get(spi->dma[SPI_DIR_TX], 0); + spi->chan[SPI_DIR_TX] = dma_channel_get_legacy(spi->dma[SPI_DIR_TX], 0); if (!spi->chan[SPI_DIR_TX]) return -ENODEV; diff --git a/src/drivers/imx/sdma.c b/src/drivers/imx/sdma.c index ad0b1d38cb0d..56955e82fe99 100644 --- a/src/drivers/imx/sdma.c +++ b/src/drivers/imx/sdma.c @@ -447,7 +447,7 @@ static void sdma_channel_put(struct dma_chan_data *channel) return; /* Channel was already free */ tr_dbg(&sdma_tr, "sdma_channel_put(%d)", channel->index); - dma_interrupt(channel, DMA_IRQ_CLEAR); + dma_interrupt_legacy(channel, DMA_IRQ_CLEAR); sdma_disable_event(channel, pdata->hw_event); sdma_set_overrides(channel, false, false); channel->status = COMP_STATE_INIT; diff --git a/src/include/sof/lib/dai.h b/src/include/sof/lib/dai.h index 3b026ddb1c33..4fa37897a9cf 100644 --- a/src/include/sof/lib/dai.h +++ b/src/include/sof/lib/dai.h @@ -157,6 +157,9 @@ struct dai_data { struct dma_chan_data *chan; uint32_t stream_id; struct dma_sg_config config; +#ifdef __ZEPHYR__ + struct dma_config *z_config; +#endif struct comp_buffer *dma_buffer; struct comp_buffer *local_buffer; struct timestamp_cfg ts_config; diff --git a/src/include/sof/lib/dma.h b/src/include/sof/lib/dma.h index b95581128300..89928799100d 100644 --- a/src/include/sof/lib/dma.h +++ b/src/include/sof/lib/dma.h @@ -28,6 +28,11 @@ #include #include +#ifdef __ZEPHYR__ +#include +#include +#endif + struct comp_buffer; /** \addtogroup sof_dma_drivers DMA Drivers @@ -205,6 +210,9 @@ struct dma { const struct dma_ops *ops; atomic_t num_channels_busy; /* number of busy channels */ struct dma_chan_data *chan; /* channels array */ +#ifdef __ZEPHYR__ + const struct device *z_dev; /* Zephyr driver */ +#endif void *priv_data; }; @@ -292,8 +300,8 @@ void dma_put(struct dma *dma); * 7) dma_channel_put() */ -static inline struct dma_chan_data *dma_channel_get(struct dma *dma, - int req_channel) +static inline struct dma_chan_data *dma_channel_get_legacy(struct dma *dma, + int req_channel) { if (!dma || !dma->ops || !dma->ops->channel_get) return NULL; @@ -303,18 +311,18 @@ static inline struct dma_chan_data *dma_channel_get(struct dma *dma, return chan; } -static inline void dma_channel_put(struct dma_chan_data *channel) +static inline void dma_channel_put_legacy(struct dma_chan_data *channel) { channel->dma->ops->channel_put(channel); } -static inline int dma_start(struct dma_chan_data *channel) +static inline int dma_start_legacy(struct dma_chan_data *channel) { return channel->dma->ops->start(channel); } -static inline int dma_stop(struct dma_chan_data *channel) +static inline int dma_stop_legacy(struct dma_chan_data *channel) { if (channel->dma->ops->stop) return channel->dma->ops->stop(channel); @@ -322,7 +330,7 @@ static inline int dma_stop(struct dma_chan_data *channel) return 0; } -static inline int dma_stop_delayed(struct dma_chan_data *channel) +static inline int dma_stop_delayed_legacy(struct dma_chan_data *channel) { if (channel->dma->ops->stop_delayed) return channel->dma->ops->stop_delayed(channel); @@ -336,14 +344,14 @@ static inline int dma_stop_delayed(struct dma_chan_data *channel) * struct dma_copy {} * @{ */ -static inline int dma_copy(struct dma_chan_data *channel, int bytes, - uint32_t flags) +static inline int dma_copy_legacy(struct dma_chan_data *channel, int bytes, + uint32_t flags) { return channel->dma->ops->copy(channel, bytes, flags); } /** @} */ -static inline int dma_pause(struct dma_chan_data *channel) +static inline int dma_pause_legacy(struct dma_chan_data *channel) { if (channel->dma->ops->pause) return channel->dma->ops->pause(channel); @@ -351,7 +359,7 @@ static inline int dma_pause(struct dma_chan_data *channel) return 0; } -static inline int dma_release(struct dma_chan_data *channel) +static inline int dma_release_legacy(struct dma_chan_data *channel) { if (channel->dma->ops->release) return channel->dma->ops->release(channel); @@ -359,30 +367,30 @@ static inline int dma_release(struct dma_chan_data *channel) return 0; } -static inline int dma_status(struct dma_chan_data *channel, - struct dma_chan_status *status, uint8_t direction) +static inline int dma_status_legacy(struct dma_chan_data *channel, + struct dma_chan_status *status, uint8_t direction) { return channel->dma->ops->status(channel, status, direction); } -static inline int dma_set_config(struct dma_chan_data *channel, - struct dma_sg_config *config) +static inline int dma_set_config_legacy(struct dma_chan_data *channel, + struct dma_sg_config *config) { return channel->dma->ops->set_config(channel, config); } -static inline int dma_probe(struct dma *dma) +static inline int dma_probe_legacy(struct dma *dma) { return dma->ops->probe(dma); } -static inline int dma_remove(struct dma *dma) +static inline int dma_remove_legacy(struct dma *dma) { return dma->ops->remove(dma); } -static inline int dma_get_data_size(struct dma_chan_data *channel, - uint32_t *avail, uint32_t *free) +static inline int dma_get_data_size_legacy(struct dma_chan_data *channel, + uint32_t *avail, uint32_t *free) { return channel->dma->ops->get_data_size(channel, avail, free); } @@ -393,8 +401,8 @@ static inline int dma_get_attribute(struct dma *dma, uint32_t type, return dma->ops->get_attribute(dma, type, value); } -static inline int dma_interrupt(struct dma_chan_data *channel, - enum dma_irq_cmd cmd) +static inline int dma_interrupt_legacy(struct dma_chan_data *channel, + enum dma_irq_cmd cmd) { return channel->dma->ops->interrupt(channel, cmd); } @@ -529,7 +537,7 @@ int dma_copy_new(struct dma_copy *dc); /* free dma copy context resources */ static inline void dma_copy_free(struct dma_copy *dc) { - dma_channel_put(dc->chan); + dma_channel_put_legacy(dc->chan); } /* DMA copy data from host to DSP */ diff --git a/src/ipc/dma-copy.c b/src/ipc/dma-copy.c index 36cac2e9d415..4f9390e6d81d 100644 --- a/src/ipc/dma-copy.c +++ b/src/ipc/dma-copy.c @@ -66,7 +66,7 @@ int dma_copy_to_host(struct dma_copy *dc, struct dma_sg_config *host_sg, int ret; /* tell gateway to copy */ - ret = dma_copy(dc->chan, size, DMA_COPY_BLOCKING); + ret = dma_copy_legacy(dc->chan, size, DMA_COPY_BLOCKING); if (ret < 0) return ret; @@ -113,12 +113,12 @@ int dma_copy_to_host(struct dma_copy *dc, struct dma_sg_config *host_sg, config.elem_array.count = 1; /* start the DMA */ - err = dma_set_config(dc->chan, &config); + err = dma_set_config_legacy(dc->chan, &config); if (err < 0) return err; - err = dma_copy(dc->chan, local_sg_elem.size, - DMA_COPY_ONE_SHOT | DMA_COPY_BLOCKING); + err = dma_copy_legacy(dc->chan, local_sg_elem.size, + DMA_COPY_ONE_SHOT | DMA_COPY_BLOCKING); if (err < 0) return err; @@ -144,7 +144,7 @@ int dma_copy_new(struct dma_copy *dc) #if !CONFIG_DMA_GW /* get DMA channel from DMAC0 */ - dc->chan = dma_channel_get(dc->dmac, CONFIG_TRACE_CHANNEL); + dc->chan = dma_channel_get_legacy(dc->dmac, CONFIG_TRACE_CHANNEL); if (!dc->chan) { tr_err(&dmacpy_tr, "dma_copy_new(): dc->chan is NULL"); return -ENODEV; @@ -159,7 +159,7 @@ int dma_copy_new(struct dma_copy *dc) int dma_copy_set_stream_tag(struct dma_copy *dc, uint32_t stream_tag) { /* get DMA channel from DMAC */ - dc->chan = dma_channel_get(dc->dmac, stream_tag - 1); + dc->chan = dma_channel_get_legacy(dc->dmac, stream_tag - 1); if (!dc->chan) { tr_err(&dmacpy_tr, "dma_copy_set_stream_tag(): dc->chan is NULL"); return -EINVAL; diff --git a/src/ipc/ipc3/dai.c b/src/ipc/ipc3/dai.c index c780e88c5169..a054aa1e62e6 100644 --- a/src/ipc/ipc3/dai.c +++ b/src/ipc/ipc3/dai.c @@ -257,7 +257,7 @@ void dai_dma_release(struct comp_dev *dev) if (dd->chan) { /* remove callback */ notifier_unregister(dev, dd->chan, NOTIFIER_ID_DMA_COPY); - dma_channel_put(dd->chan); + dma_channel_put_legacy(dd->chan); dd->chan->dev_data = NULL; dd->chan = NULL; } @@ -302,7 +302,7 @@ int dai_config(struct comp_dev *dev, struct ipc_config_dai *common_config, /* stop DMA and reset config for two-step stop DMA */ if (dd->delayed_dma_stop) { - ret = dma_stop_delayed(dd->chan); + ret = dma_stop_delayed_legacy(dd->chan); if (ret < 0) return ret; @@ -314,7 +314,7 @@ int dai_config(struct comp_dev *dev, struct ipc_config_dai *common_config, if (!dd->chan) return 0; - return dma_stop_delayed(dd->chan); + return dma_stop_delayed_legacy(dd->chan); default: break; } diff --git a/src/ipc/ipc3/host-page-table.c b/src/ipc/ipc3/host-page-table.c index c813f5c631d8..6b3ed66907b0 100644 --- a/src/ipc/ipc3/host-page-table.c +++ b/src/ipc/ipc3/host-page-table.c @@ -94,7 +94,7 @@ static int ipc_get_page_descriptors(struct dma *dmac, uint8_t *page_table, int ret = 0; /* get DMA channel from DMAC */ - chan = dma_channel_get(dmac, 0); + chan = dma_channel_get_legacy(dmac, 0); if (!chan) { tr_err(&ipc_tr, "ipc_get_page_descriptors(): chan is NULL"); return -ENODEV; @@ -125,14 +125,14 @@ static int ipc_get_page_descriptors(struct dma *dmac, uint8_t *page_table, config.elem_array.elems = &elem; config.elem_array.count = 1; - ret = dma_set_config(chan, &config); + ret = dma_set_config_legacy(chan, &config); if (ret < 0) { tr_err(&ipc_tr, "ipc_get_page_descriptors(): dma_set_config() failed"); goto out; } /* start the copy of page table to DSP */ - ret = dma_copy(chan, elem.size, DMA_COPY_ONE_SHOT | DMA_COPY_BLOCKING); + ret = dma_copy_legacy(chan, elem.size, DMA_COPY_ONE_SHOT | DMA_COPY_BLOCKING); if (ret < 0) { tr_err(&ipc_tr, "ipc_get_page_descriptors(): dma_start() failed"); goto out; @@ -140,7 +140,7 @@ static int ipc_get_page_descriptors(struct dma *dmac, uint8_t *page_table, /* compressed page tables now in buffer at _ipc->page_table */ out: - dma_channel_put(chan); + dma_channel_put_legacy(chan); return ret; } diff --git a/src/ipc/ipc4/dai.c b/src/ipc/ipc4/dai.c index d9482981fc3f..f0e9a322837a 100644 --- a/src/ipc/ipc4/dai.c +++ b/src/ipc/ipc4/dai.c @@ -202,11 +202,11 @@ void dai_dma_release(struct comp_dev *dev) * pause to stop. * TODO: refine power management when stream is paused */ - dma_stop(dd->chan); + dma_stop_legacy(dd->chan); /* remove callback */ notifier_unregister(dev, dd->chan, NOTIFIER_ID_DMA_COPY); - dma_channel_put(dd->chan); + dma_channel_put_legacy(dd->chan); dd->chan->dev_data = NULL; dd->chan = NULL; } @@ -229,7 +229,7 @@ static void dai_dma_position_init(struct dai_data *dd) int dai_config(struct comp_dev *dev, struct ipc_config_dai *common_config, void *spec_config) -{ +{trace_point(9); struct ipc4_copier_module_cfg *copier_cfg = spec_config; struct dai_data *dd = comp_get_drvdata(dev); int size; @@ -300,7 +300,7 @@ int dai_position(struct comp_dev *dev, struct sof_ipc_stream_posn *posn) posn->wallclock = dd->wallclock; status.ipc_posn_data = &posn->comp_posn; - dma_status(dd->chan, &status, dev->direction); + dma_status_legacy(dd->chan, &status, dev->direction); return 0; } @@ -319,7 +319,7 @@ void dai_dma_position_update(struct comp_dev *dev) return; status.ipc_posn_data = llp_data; - dma_status(dd->chan, &status, dev->direction); + dma_status_legacy(dd->chan, &status, dev->direction); platform_dai_wallclock(dev, &dd->wallclock); diff --git a/src/lib/dma.c b/src/lib/dma.c index f7aa0843c40f..819f968128a4 100644 --- a/src/lib/dma.c +++ b/src/lib/dma.c @@ -28,6 +28,138 @@ DECLARE_SOF_UUID("dma", dma_uuid, 0xbc3526a7, 0x9b86, 0x4ab4, DECLARE_TR_CTX(dma_tr, SOF_UUID(dma_uuid), LOG_LEVEL_INFO); +#if CONFIG_ZEPHYR_NATIVE_DRIVERS +static int dma_init(struct dma *dma); + +struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) +{ + const struct dma_info *info = dma_info_get(); + int users, ret = 0; + int min_users = INT32_MAX; + struct dma *d = NULL, *dmin = NULL; + k_spinlock_key_t key; + + if (!info->num_dmas) { + tr_err(&dma_tr, "dma_get(): No DMACs installed"); + return NULL; + } + + /* find DMAC with free channels that matches request */ + for (d = info->dma_array; d < info->dma_array + info->num_dmas; + d++) { + /* skip if this DMAC does not support the requested dir */ + if (dir && (d->plat_data.dir & dir) == 0) + continue; + + /* skip if this DMAC does not support the requested caps */ + if (cap && (d->plat_data.caps & cap) == 0) + continue; + + /* skip if this DMAC does not support the requested dev */ + if (dev && (d->plat_data.devs & dev) == 0) + continue; + + /* if exclusive access is requested */ + if (flags & DMA_ACCESS_EXCLUSIVE) { + /* ret DMA with no users */ + if (!d->sref) { + dmin = d; + break; + } + } else { + /* get number of users for this DMAC*/ + users = d->sref; + + /* pick DMAC with the least num of users */ + if (users < min_users) { + dmin = d; + min_users = users; + } + } + } + + if (!dmin) { + tr_err(&dma_tr, "No DMAC dir %d caps 0x%x dev 0x%x flags 0x%x", + dir, cap, dev, flags); + + for (d = info->dma_array; + d < info->dma_array + info->num_dmas; + d++) { + tr_err(&dma_tr, " DMAC ID %d users %d busy channels %ld", + d->plat_data.id, d->sref, + atomic_read(&d->num_channels_busy)); + tr_err(&dma_tr, " caps 0x%x dev 0x%x", + d->plat_data.caps, d->plat_data.devs); + } + + return NULL; + } + + /* return DMAC */ + tr_dbg(&dma_tr, "dma_get(), dma-probe id = %d", + dmin->plat_data.id); + + /* Shared DMA controllers with multiple channels + * may be requested many times, let the probe() + * do on-first-use initialization. + */ + key = k_spin_lock(&dmin->lock); + + if (!dmin->sref) { + ret = dma_init(dmin); + if (ret < 0) { + tr_err(&dma_tr, "dma_get(): dma-probe failed id = %d, ret = %d", + dmin->plat_data.id, ret); + } + } + if (!ret) + dmin->sref++; + + tr_info(&dma_tr, "dma_get() ID %d sref = %d busy channels %ld", + dmin->plat_data.id, dmin->sref, + atomic_read(&dmin->num_channels_busy)); + + k_spin_unlock(&dmin->lock, key); + return dmin; +} + +void dma_put(struct dma *dma) +{ + k_spinlock_key_t key; + + key = k_spin_lock(&dma->lock); + --dma->sref; + + tr_info(&dma_tr, "dma_put(), dma = %p, sref = %d", + dma, dma->sref); + k_spin_unlock(&dma->lock, key); +} + +static int dma_init(struct dma *dma) +{ + struct dma_chan_data *chan; + int i; + + /* allocate dma channels */ + dma->chan = rzalloc(SOF_MEM_ZONE_RUNTIME_SHARED, 0, SOF_MEM_CAPS_RAM, + sizeof(struct dma_chan_data) * dma->plat_data.channels); + + if (!dma->chan) { + tr_err(&dma_tr, "dma_probe_sof(): dma %d allocaction of channels failed", + dma->plat_data.id); + return -ENOMEM; + } + + /* init work */ + for (i = 0, chan = dma->chan; i < dma->plat_data.channels; + i++, chan++) { + chan->dma = dma; + chan->index = i; + } + + return 0; +} +#else struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) { const struct dma_info *info = dma_info_get(); @@ -109,7 +241,7 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) ret = 0; if (!dmin->sref) { - ret = dma_probe(dmin); + ret = dma_probe_legacy(dmin); if (ret < 0) { tr_err(&dma_tr, "dma_get(): dma-probe failed id = %d, ret = %d", dmin->plat_data.id, ret); @@ -133,7 +265,7 @@ void dma_put(struct dma *dma) key = k_spin_lock(&dma->lock); if (--dma->sref == 0) { - ret = dma_remove(dma); + ret = dma_remove_legacy(dma); if (ret < 0) { tr_err(&dma_tr, "dma_put(): dma_remove() failed id = %d, ret = %d", dma->plat_data.id, ret); @@ -143,6 +275,7 @@ void dma_put(struct dma *dma) dma, dma->sref); k_spin_unlock(&dma->lock, key); } +#endif int dma_sg_alloc(struct dma_sg_elem_array *elem_array, enum mem_zone zone, diff --git a/src/platform/intel/cavs/lib/dma.c b/src/platform/intel/cavs/lib/dma.c index 9e96a26a6f53..39d7dc7dc9d6 100644 --- a/src/platform/intel/cavs/lib/dma.c +++ b/src/platform/intel/cavs/lib/dma.c @@ -15,6 +15,9 @@ #include #include #include +#ifdef __ZEPHYR__ +#include +#endif #if CONFIG_APOLLOLAKE #define DMAC0_CLASS 1 @@ -246,6 +249,9 @@ static const struct dma_info lib_dma = { /* Initialize all platform DMAC's */ int dmac_init(struct sof *sof) { +#if CONFIG_ZEPHYR_NATIVE_DRIVERS + struct device *z_dev; +#endif int i; /* no probing before first use */ @@ -254,8 +260,30 @@ int dmac_init(struct sof *sof) sof->dma_info = &lib_dma; /* early lock initialization for ref counting */ - for (i = 0; i < sof->dma_info->num_dmas; i++) + for (i = 0; i < sof->dma_info->num_dmas; i++) { k_spinlock_init(&sof->dma_info->dma_array[i].lock); +#if CONFIG_ZEPHYR_NATIVE_DRIVERS + switch (sof->dma_info->dma_array[i].plat_data.id) { + case DMA_HOST_IN_DMAC: + z_dev = device_get_binding("HDA_HOST_IN"); + break; + case DMA_HOST_OUT_DMAC: + z_dev = device_get_binding("HDA_HOST_OUT"); + break; + case DMA_GP_LP_DMAC0: + z_dev = device_get_binding("DMA_0"); + break; + case DMA_GP_LP_DMAC1: + z_dev = device_get_binding("DMA_1"); + break; + default: + continue; + } + if (!z_dev) + return -EINVAL; + sof->dma_info->dma_array[i].z_dev = z_dev; +#endif + } return 0; } diff --git a/src/probe/probe.c b/src/probe/probe.c index 4a1a14ef1a61..265c6fe9fea2 100644 --- a/src/probe/probe.c +++ b/src/probe/probe.c @@ -154,7 +154,7 @@ static int probe_dma_init(struct probe_dma_ext *dma, uint32_t direction) if (err < 0) return err; - err = dma_set_config(dma->dc.chan, &config); + err = dma_set_config_legacy(dma->dc.chan, &config); if (err < 0) return err; @@ -172,13 +172,13 @@ static int probe_dma_deinit(struct probe_dma_ext *dma) { int err = 0; - err = dma_stop(dma->dc.chan); + err = dma_stop_legacy(dma->dc.chan); if (err < 0) { tr_err(&pr_tr, "probe_dma_deinit(): dma_stop() failed"); return err; } - dma_channel_put(dma->dc.chan); + dma_channel_put_legacy(dma->dc.chan); dma_put(dma->dc.dmac); rfree((void *)dma->dmapb.addr); @@ -258,7 +258,7 @@ int probe_init(struct probe_dma *probe_dma) return err; } - err = dma_start(_probe->ext_dma.dc.chan); + err = dma_start_legacy(_probe->ext_dma.dc.chan); if (err < 0) { tr_err(&pr_tr, "probe_init(): failed to start extraction dma"); @@ -825,9 +825,9 @@ static void probe_cb_produce(void *arg, enum notify_id type, void *data) } dma = &_probe->inject_dma[j]; /* get avail data info */ - ret = dma_get_data_size(dma->dc.chan, - &dma->dmapb.avail, - &free_bytes); + ret = dma_get_data_size_legacy(dma->dc.chan, + &dma->dmapb.avail, + &free_bytes); if (ret < 0) { tr_err(&pr_tr, "probe_cb_produce(): dma_get_data_size() failed, ret = %u", ret); @@ -1013,7 +1013,7 @@ int probe_point_add(uint32_t count, struct probe_point *probe) return -EINVAL; } - if (dma_start(_probe->inject_dma[j].dc.chan) < 0) { + if (dma_start_legacy(_probe->inject_dma[j].dc.chan) < 0) { tr_err(&pr_tr, "probe_point_add(): failed to start dma"); return -EBUSY; diff --git a/src/schedule/dma_multi_chan_domain.c b/src/schedule/dma_multi_chan_domain.c index 760e8cf730e6..94f1bdc32dcb 100644 --- a/src/schedule/dma_multi_chan_domain.c +++ b/src/schedule/dma_multi_chan_domain.c @@ -149,7 +149,7 @@ static int dma_multi_chan_domain_register(struct ll_schedule_domain *domain, if (dma_domain->channel_mask[i][core] & BIT(j)) continue; - dma_interrupt(&dmas[i].chan[j], DMA_IRQ_CLEAR); + dma_interrupt_legacy(&dmas[i].chan[j], DMA_IRQ_CLEAR); /* register only if not aggregated or not registered */ if (!dma_domain->aggregated_irq || @@ -171,7 +171,7 @@ static int dma_multi_chan_domain_register(struct ll_schedule_domain *domain, interrupt_clear_mask(dma_domain->data[i][j].irq, BIT(j)); - dma_interrupt(&dmas[i].chan[j], DMA_IRQ_UNMASK); + dma_interrupt_legacy(&dmas[i].chan[j], DMA_IRQ_UNMASK); dma_domain->data[i][j].task = pipe_task; dma_domain->channel_mask[i][core] |= BIT(j); @@ -239,8 +239,8 @@ static int dma_multi_chan_domain_unregister(struct ll_schedule_domain *domain, if (!(dma_domain->channel_mask[i][core] & BIT(j))) continue; - dma_interrupt(&dmas[i].chan[j], DMA_IRQ_MASK); - dma_interrupt(&dmas[i].chan[j], DMA_IRQ_CLEAR); + dma_interrupt_legacy(&dmas[i].chan[j], DMA_IRQ_MASK); + dma_interrupt_legacy(&dmas[i].chan[j], DMA_IRQ_CLEAR); interrupt_clear_mask(dma_domain->data[i][j].irq, BIT(j)); @@ -282,8 +282,8 @@ static bool dma_multi_chan_domain_is_pending(struct ll_schedule_domain *domain, for (i = 0; i < dma_domain->num_dma; ++i) { for (j = 0; j < dmas[i].plat_data.channels; ++j) { if (!*comp) { - status = dma_interrupt(&dmas[i].chan[j], - DMA_IRQ_STATUS_GET); + status = dma_interrupt_legacy(&dmas[i].chan[j], + DMA_IRQ_STATUS_GET); if (!status) continue; @@ -329,7 +329,7 @@ static bool dma_multi_chan_domain_is_pending(struct ll_schedule_domain *domain, /* clear interrupt */ if (pipe_task->registrable) { - dma_interrupt(&dmas[i].chan[j], DMA_IRQ_CLEAR); + dma_interrupt_legacy(&dmas[i].chan[j], DMA_IRQ_CLEAR); interrupt_clear_mask(dma_domain->data[i][j].irq, BIT(j)); } diff --git a/src/schedule/dma_single_chan_domain.c b/src/schedule/dma_single_chan_domain.c index ac604d1e14c6..a5853eb61c6b 100644 --- a/src/schedule/dma_single_chan_domain.c +++ b/src/schedule/dma_single_chan_domain.c @@ -210,8 +210,8 @@ static int dma_single_chan_domain_register(struct ll_schedule_domain *domain, /* unregister from current channel */ dma_single_chan_domain_irq_unregister(data); - dma_interrupt(data->channel, DMA_IRQ_MASK); - dma_interrupt(data->channel, DMA_IRQ_CLEAR); + dma_interrupt_legacy(data->channel, DMA_IRQ_MASK); + dma_interrupt_legacy(data->channel, DMA_IRQ_CLEAR); dma_domain->channel_changed = true; @@ -234,7 +234,7 @@ static int dma_single_chan_domain_register(struct ll_schedule_domain *domain, goto out; /* enable channel interrupt */ - dma_interrupt(data->channel, DMA_IRQ_UNMASK); + dma_interrupt_legacy(data->channel, DMA_IRQ_UNMASK); /* unmask if we are the owner */ if (dma_domain->owner == core) @@ -333,8 +333,8 @@ static void dma_domain_unregister_owner(struct ll_schedule_domain *domain, /* no other channel is running */ dma_single_chan_domain_irq_unregister(data); - dma_interrupt(data->channel, DMA_IRQ_MASK); - dma_interrupt(data->channel, DMA_IRQ_CLEAR); + dma_interrupt_legacy(data->channel, DMA_IRQ_MASK); + dma_interrupt_legacy(data->channel, DMA_IRQ_CLEAR); data->channel = NULL; if (channel) { @@ -413,7 +413,7 @@ static void dma_single_chan_domain_enable(struct ll_schedule_domain *domain, if (!data->channel) return; - dma_interrupt(data->channel, DMA_IRQ_UNMASK); + dma_interrupt_legacy(data->channel, DMA_IRQ_UNMASK); interrupt_unmask(data->irq, core); } @@ -477,7 +477,7 @@ static void dma_single_chan_domain_clear(struct ll_schedule_domain *domain) if (!data->channel) return; - dma_interrupt(data->channel, DMA_IRQ_CLEAR); + dma_interrupt_legacy(data->channel, DMA_IRQ_CLEAR); } /** @@ -511,8 +511,8 @@ static void dma_domain_changed(void *arg, enum notify_id type, void *data) dma_single_chan_domain_irq_unregister(domain_data); if (domain_data->channel->core == core) { - dma_interrupt(domain_data->channel, DMA_IRQ_MASK); - dma_interrupt(domain_data->channel, DMA_IRQ_CLEAR); + dma_interrupt_legacy(domain_data->channel, DMA_IRQ_MASK); + dma_interrupt_legacy(domain_data->channel, DMA_IRQ_CLEAR); } /* register to the new DMA channel */ diff --git a/src/trace/dma-trace.c b/src/trace/dma-trace.c index 730d823c837b..2a76d323a0d1 100644 --- a/src/trace/dma-trace.c +++ b/src/trace/dma-trace.c @@ -374,7 +374,7 @@ static int dma_trace_start(struct dma_trace_data *d) d->active_stream_tag); schedule_task_cancel(&d->dmat_work); - err = dma_stop(d->dc.chan); + err = dma_stop_legacy(d->dc.chan); if (err < 0) { mtrace_printf(LOG_LEVEL_ERROR, "dma_trace_start(): DMA channel failed to stop"); @@ -384,7 +384,7 @@ static int dma_trace_start(struct dma_trace_data *d) "dma_trace_start(): stream_tag change from %u to %u", d->active_stream_tag, d->stream_tag); - dma_channel_put(d->dc.chan); + dma_channel_put_legacy(d->dc.chan); d->dc.chan = NULL; err = dma_copy_set_stream_tag(&d->dc, d->stream_tag); } @@ -400,18 +400,18 @@ static int dma_trace_start(struct dma_trace_data *d) d->active_stream_tag = d->stream_tag; - err = dma_set_config(d->dc.chan, &d->gw_config); + err = dma_set_config_legacy(d->dc.chan, &d->gw_config); if (err < 0) { mtrace_printf(LOG_LEVEL_ERROR, "dma_set_config() failed: %d", err); goto error; } - err = dma_start(d->dc.chan); + err = dma_start_legacy(d->dc.chan); if (err == 0) return 0; error: - dma_channel_put(d->dc.chan); + dma_channel_put_legacy(d->dc.chan); d->dc.chan = NULL; return err; @@ -504,8 +504,8 @@ void dma_trace_disable(struct dma_trace_data *d) schedule_task_cancel(&d->dmat_work); if (d->dc.chan) { - dma_stop(d->dc.chan); - dma_channel_put(d->dc.chan); + dma_stop_legacy(d->dc.chan); + dma_channel_put_legacy(d->dc.chan); d->dc.chan = NULL; } diff --git a/zephyr/CMakeLists.txt b/zephyr/CMakeLists.txt index 2f803eacedaf..8b0d160b1537 100644 --- a/zephyr/CMakeLists.txt +++ b/zephyr/CMakeLists.txt @@ -508,7 +508,6 @@ zephyr_library_sources( ${SOF_AUDIO_PATH}/pipeline/pipeline-schedule.c ${SOF_AUDIO_PATH}/pipeline/pipeline-stream.c ${SOF_AUDIO_PATH}/pipeline/pipeline-xrun.c - ${SOF_AUDIO_PATH}/host.c # SOF core infrastructure - runs on top of Zephyr ${SOF_SRC_PATH}/init/init.c @@ -526,6 +525,16 @@ zephyr_library_sources( schedule.c ) +if(CONFIG_ZEPHYR_NATIVE_DRIVERS) + zephyr_library_sources( + ${SOF_AUDIO_PATH}/host-zephyr.c + ) +else() + zephyr_library_sources( + ${SOF_AUDIO_PATH}/host-legacy.c + ) +endif() + zephyr_library_sources_ifdef(CONFIG_IPC_MAJOR_3 ${SOF_IPC_PATH}/ipc3/handler.c ${SOF_IPC_PATH}/ipc3/helper.c @@ -596,9 +605,15 @@ zephyr_library_sources_ifdef(CONFIG_COMP_TONE ${SOF_AUDIO_PATH}/tone.c ) -zephyr_library_sources_ifdef(CONFIG_COMP_DAI - ${SOF_AUDIO_PATH}/dai.c +if(CONFIG_ZEPHYR_NATIVE_DRIVERS) + zephyr_library_sources_ifdef(CONFIG_COMP_DAI + ${SOF_AUDIO_PATH}/dai-zephyr.c ) +else() + zephyr_library_sources_ifdef(CONFIG_COMP_DAI + ${SOF_AUDIO_PATH}/dai-legacy.c +) +endif() zephyr_library_sources_ifdef(CONFIG_SAMPLE_KEYPHRASE ${SOF_SAMPLES_PATH}/audio/detect_test.c diff --git a/zephyr/Kconfig b/zephyr/Kconfig index 635095a138a0..7700bd5da336 100644 --- a/zephyr/Kconfig +++ b/zephyr/Kconfig @@ -9,4 +9,13 @@ config SOF_ZEPHYR_HEAP_CACHED Enable cached heap by mapping cached SOF memory zones to different Zephyr sys_heap objects and enable caching for non-shared zones. +config ZEPHYR_NATIVE_DRIVERS + bool "Use Zephyr native drivers" + default n + help + Enable Zephyr native api drivers for host and dai audio components + host-zephyr + dai-zephyr + will be used instead of legacy xtos version. + endif