diff --git a/app/boards/intel_adsp_ace15_mtpm.conf b/app/boards/intel_adsp_ace15_mtpm.conf index 7f0938bbdd5b..efd336d4aee0 100644 --- a/app/boards/intel_adsp_ace15_mtpm.conf +++ b/app/boards/intel_adsp_ace15_mtpm.conf @@ -90,3 +90,6 @@ CONFIG_DEBUG_COREDUMP_MEMORY_DUMP_MIN=y CONFIG_PROBE=y CONFIG_PROBE_DMA_MAX=2 + +CONFIG_COMP_GOOGLE_RTC_AUDIO_PROCESSING=y +CONFIG_COMP_STUBS=y \ No newline at end of file diff --git a/src/audio/copier/copier.c b/src/audio/copier/copier.c index adeeae8c3dc4..d2c69daecc7d 100644 --- a/src/audio/copier/copier.c +++ b/src/audio/copier/copier.c @@ -405,7 +405,7 @@ static int do_conversion_copy(struct comp_dev *dev, comp_get_copy_limits(src, sink, processed_data); - i = IPC4_SINK_QUEUE_ID(sink->id); + i = IPC4_SINK_QUEUE_ID(buf_get_id(sink)); if (i >= IPC4_COPIER_MODULE_OUTPUT_PINS_COUNT) return -EINVAL; buffer_stream_invalidate(src, processed_data->source_bytes); @@ -483,7 +483,7 @@ static int copier_module_copy(struct processing_module *mod, uint32_t samples; int sink_queue_id; - sink_queue_id = IPC4_SINK_QUEUE_ID(sink_c->id); + sink_queue_id = IPC4_SINK_QUEUE_ID(buf_get_id(sink_c)); if (sink_queue_id >= IPC4_COPIER_MODULE_OUTPUT_PINS_COUNT) return -EINVAL; @@ -668,7 +668,7 @@ static int copier_set_sink_fmt(struct comp_dev *dev, const void *data, sink = container_of(sink_list, struct comp_buffer, source_list); - sink_id = IPC4_SINK_QUEUE_ID(sink->id); + sink_id = IPC4_SINK_QUEUE_ID(buf_get_id(sink)); if (sink_id == sink_fmt->sink_id) { ipc4_update_buffer_format(sink, &sink_fmt->sink_fmt); break; diff --git a/src/audio/copier/copier_generic.c b/src/audio/copier/copier_generic.c index c4991363eb7f..8c3a3278c7d1 100644 --- a/src/audio/copier/copier_generic.c +++ b/src/audio/copier/copier_generic.c @@ -85,7 +85,7 @@ void copier_update_params(struct copier_data *cd, struct comp_dev *dev, sink = container_of(sink_list, struct comp_buffer, source_list); - j = IPC4_SINK_QUEUE_ID(sink->id); + j = IPC4_SINK_QUEUE_ID(buf_get_id(sink)); ipc4_update_buffer_format(sink, &cd->out_fmt[j]); } diff --git a/src/audio/dai-zephyr.c b/src/audio/dai-zephyr.c index d983b8699fa5..df79d74a522b 100644 --- a/src/audio/dai-zephyr.c +++ b/src/audio/dai-zephyr.c @@ -284,7 +284,7 @@ dai_dma_cb(struct dai_data *dd, struct comp_dev *dev, uint32_t bytes, sink_dev = sink->sink; - j = IPC4_SINK_QUEUE_ID(sink->id); + j = IPC4_SINK_QUEUE_ID(buf_get_id(sink)); if (j >= IPC4_COPIER_MODULE_OUTPUT_PINS_COUNT) { comp_err(dev, "Sink queue ID: %d >= max output pin count: %d\n", @@ -1635,7 +1635,7 @@ int dai_zephyr_unbind(struct dai_data *dd, struct comp_dev *dev, void *data) buf_id = IPC4_COMP_ID(bu->extension.r.src_queue, bu->extension.r.dst_queue); if (dd && dd->local_buffer) { - if (dd->local_buffer->id == buf_id) { + if (buf_get_id(dd->local_buffer) == buf_id) { comp_dbg(dev, "dai_zephyr_unbind: local_buffer %x unbound", buf_id); dd->local_buffer = NULL; } diff --git a/src/audio/dp_queue.c b/src/audio/dp_queue.c index 8001594a27bc..a201dde82130 100644 --- a/src/audio/dp_queue.c +++ b/src/audio/dp_queue.c @@ -245,7 +245,8 @@ static const struct sink_ops dp_queue_sink_ops = { .audio_set_ipc_params = dp_queue_set_ipc_params_sink, }; -struct dp_queue *dp_queue_create(size_t min_available, size_t min_free_space, uint32_t flags) +struct dp_queue *dp_queue_create(size_t min_available, size_t min_free_space, uint32_t flags, + uint32_t id) { struct dp_queue *dp_queue; @@ -286,8 +287,9 @@ struct dp_queue *dp_queue_create(size_t min_available, size_t min_free_space, ui if (!dp_queue->_data_buffer) goto err; - tr_info(&dp_queue_tr, "DpQueue created, shared: %u min_available: %u min_free_space %u, size %u", - dp_queue_is_shared(dp_queue), min_available, min_free_space, + dp_queue->audio_stream_params.id = id; + tr_info(&dp_queue_tr, "DpQueue created, id: %u shared: %u min_available: %u min_free_space %u, size %u", + id, dp_queue_is_shared(dp_queue), min_available, min_free_space, dp_queue->data_buffer_size); /* return a pointer to allocated structure */ diff --git a/src/audio/google/Kconfig b/src/audio/google/Kconfig index 027e15a2c229..cfb2f7cbbc97 100644 --- a/src/audio/google/Kconfig +++ b/src/audio/google/Kconfig @@ -16,6 +16,7 @@ config COMP_GOOGLE_RTC_AUDIO_PROCESSING bool "Google Real Time Communication Audio processing" select COMP_BLOB select GOOGLE_RTC_AUDIO_PROCESSING_MOCK if COMP_STUBS + depends on IPC_MAJOR_4 default n help Select for Google real-time communication audio processing. It @@ -35,7 +36,7 @@ config COMP_GOOGLE_RTC_AUDIO_PROCESSING_SAMPLE_RATE_HZ config COMP_GOOGLE_RTC_AUDIO_PROCESSING_NUM_CHANNELS depends on COMP_GOOGLE_RTC_AUDIO_PROCESSING int "Number of channels to process for Google Real Time Communication Audio processing" - default 1 + default 4 help Sets the number of channels to process in the Google real-time communication audio processing. diff --git a/src/audio/google/google_rtc_audio_processing.c b/src/audio/google/google_rtc_audio_processing.c index effdef6d0f25..f628d1433a2b 100644 --- a/src/audio/google/google_rtc_audio_processing.c +++ b/src/audio/google/google_rtc_audio_processing.c @@ -41,6 +41,7 @@ #define GOOGLE_RTC_AUDIO_PROCESSING_FREQENCY_TO_PERIOD_FRAMES 100 #define GOOGLE_RTC_NUM_INPUT_PINS 2 +#define GOOGLE_RTC_NUM_OUTPUT_PINS 1 LOG_MODULE_REGISTER(google_rtc_audio_processing, CONFIG_SOF_LOG_LEVEL); @@ -53,19 +54,15 @@ DECLARE_TR_CTX(google_rtc_audio_processing_tr, SOF_UUID(google_rtc_audio_process LOG_LEVEL_INFO); struct google_rtc_audio_processing_comp_data { -#if CONFIG_IPC_MAJOR_4 struct sof_ipc4_aec_config config; -#endif uint32_t num_frames; int num_aec_reference_channels; int num_capture_channels; GoogleRtcAudioProcessingState *state; - int16_t *aec_reference_buffer; - int aec_reference_frame_index; - int16_t *raw_mic_buffer; - int raw_mic_buffer_frame_index; - int16_t *output_buffer; - int output_buffer_frame_index; + float *aec_reference_buffer; + float *aec_reference_buffer_ptrs[SOF_IPC_MAX_CHANNELS]; + float *process_buffer; + float *process_buffer_ptrs[SOF_IPC_MAX_CHANNELS]; uint8_t *memory_buffer; struct comp_data_blob_handler *tuning_handler; bool reconfigure; @@ -83,8 +80,10 @@ void GoogleRtcFree(void *ptr) return rfree(ptr); } -#if CONFIG_IPC_MAJOR_4 -static void google_rtc_audio_processing_params(struct processing_module *mod) +static void google_rtc_audio_processing_params(struct processing_module *mod, + struct sof_source *ref, + struct sof_source *mic, + struct sof_sink *out) { struct google_rtc_audio_processing_comp_data *cd = module_get_private_data(mod); struct sof_ipc_stream_params *params = mod->stream_params; @@ -94,19 +93,16 @@ static void google_rtc_audio_processing_params(struct processing_module *mod) ipc4_base_module_cfg_to_stream_params(&mod->priv.cfg.base_cfg, params); component_set_nearest_period_frames(dev, params->rate); - - list_for_item(source_list, &dev->bsource_list) { - sourceb = container_of(source_list, struct comp_buffer, sink_list); - if (IPC4_SINK_QUEUE_ID(source->id) == SOF_AEC_FEEDBACK_QUEUE_ID) - ipc4_update_buffer_format(sourceb, &cd->config.reference_fmt); - else - ipc4_update_buffer_format(sourceb, &mod->priv.cfg.base_cfg.audio_fmt); - } - - sinkb = list_first_item(&dev->bsink_list, struct comp_buffer, source_list); - ipc4_update_buffer_format(sinkb, &mod->priv.cfg.base_cfg.audio_fmt); + /* TODO - it does not work, to be checked before merging!! + * ipc4_update_source_format(ref, &cd->config.reference_fmt); + */ + source_set_channels(ref, + CONFIG_COMP_GOOGLE_RTC_AUDIO_PROCESSING_NUM_AEC_REFERENCE_CHANNELS); + /* ipc4_update_source_format(mic, &mod->priv.cfg.base_cfg.audio_fmt); */ + source_set_channels(mic, CONFIG_COMP_GOOGLE_RTC_AUDIO_PROCESSING_NUM_CHANNELS); + /* ipc4_update_sink_format(out, &mod->priv.cfg.base_cfg.audio_fmt); */ + sink_set_channels(out, CONFIG_COMP_GOOGLE_RTC_AUDIO_PROCESSING_NUM_CHANNELS); } -#endif static int google_rtc_audio_processing_reconfigure(struct processing_module *mod) { @@ -255,60 +251,6 @@ static int google_rtc_audio_processing_reconfigure(struct processing_module *mod return 0; } -#if CONFIG_IPC_MAJOR_3 -static int google_rtc_audio_processing_cmd_set_data(struct processing_module *mod, - struct sof_ipc_ctrl_data *cdata) -{ - struct google_rtc_audio_processing_comp_data *cd = module_get_private_data(mod); - int ret; - - switch (cdata->cmd) { - case SOF_CTRL_CMD_BINARY: - ret = comp_data_blob_set_cmd(cd->tuning_handler, cdata); - if (ret) - return ret; - /* Accept the new blob immediately so that userspace can write - * the control in quick succession without error. - * This ensures the last successful control write from userspace - * before prepare/copy is applied. - * The config blob is not referenced after reconfigure() returns - * so it is safe to call comp_get_data_blob here which frees the - * old blob. This assumes cmd() and prepare()/copy() cannot run - * concurrently which is the case when there is no preemption. - */ - if (comp_is_new_data_blob_available(cd->tuning_handler)) { - comp_get_data_blob(cd->tuning_handler, NULL, NULL); - cd->reconfigure = true; - } - return 0; - default: - comp_err(mod->dev, - "google_rtc_audio_processing_ctrl_set_data(): Only binary controls supported %d", - cdata->cmd); - return -EINVAL; - } -} - -static int google_rtc_audio_processing_cmd_get_data(struct processing_module *mod, - struct sof_ipc_ctrl_data *cdata, - size_t max_data_size) -{ - struct google_rtc_audio_processing_comp_data *cd = module_get_private_data(mod); - - comp_info(mod->dev, "google_rtc_audio_processing_ctrl_get_data(): %u", cdata->cmd); - - switch (cdata->cmd) { - case SOF_CTRL_CMD_BINARY: - return comp_data_blob_get_cmd(cd->tuning_handler, cdata, max_data_size); - default: - comp_err(mod->dev, - "google_rtc_audio_processing_ctrl_get_data(): Only binary controls supported %d", - cdata->cmd); - return -EINVAL; - } -} -#endif - static int google_rtc_audio_processing_set_config(struct processing_module *mod, uint32_t param_id, enum module_cfg_fragment_position pos, uint32_t data_offset_size, @@ -316,7 +258,6 @@ static int google_rtc_audio_processing_set_config(struct processing_module *mod, size_t fragment_size, uint8_t *response, size_t response_size) { -#if CONFIG_IPC_MAJOR_4 struct google_rtc_audio_processing_comp_data *cd = module_get_private_data(mod); int ret; @@ -350,25 +291,14 @@ static int google_rtc_audio_processing_set_config(struct processing_module *mod, } return 0; -#elif CONFIG_IPC_MAJOR_3 - struct sof_ipc_ctrl_data *cdata = (struct sof_ipc_ctrl_data *)fragment; - - return google_rtc_audio_processing_cmd_set_data(mod, cdata); -#endif } static int google_rtc_audio_processing_get_config(struct processing_module *mod, uint32_t param_id, uint32_t *data_offset_size, uint8_t *fragment, size_t fragment_size) { -#if CONFIG_IPC_MAJOR_4 comp_err(mod->dev, "google_rtc_audio_processing_ctrl_get_config(): Not supported"); return -EINVAL; -#elif CONFIG_IPC_MAJOR_3 - struct sof_ipc_ctrl_data *cdata = (struct sof_ipc_ctrl_data *)fragment; - - return google_rtc_audio_processing_cmd_get_data(mod, cdata, fragment_size); -#endif } static int google_rtc_audio_processing_init(struct processing_module *mod) @@ -377,6 +307,8 @@ static int google_rtc_audio_processing_init(struct processing_module *mod) struct comp_dev *dev = mod->dev; struct google_rtc_audio_processing_comp_data *cd; int ret; + int channel; + size_t buf_size; comp_info(dev, "google_rtc_audio_processing_init()"); @@ -389,7 +321,7 @@ static int google_rtc_audio_processing_init(struct processing_module *mod) md->private = cd; -#if CONFIG_IPC_MAJOR_4 + struct module_config *cfg = &md->cfg; const struct ipc4_base_module_extended_cfg *base_cfg = md->cfg.init_data; struct ipc4_input_pin_format reference_fmt, output_fmt; const size_t size = sizeof(struct ipc4_input_pin_format); @@ -404,7 +336,7 @@ static int google_rtc_audio_processing_init(struct processing_module *mod) cd->config.reference_fmt = reference_fmt.audio_fmt; cd->config.output_fmt = output_fmt.audio_fmt; -#endif + cd->config = *(const struct sof_ipc4_aec_config *)cfg->init_data; cd->tuning_handler = comp_data_blob_handler_new(dev); if (!cd->tuning_handler) { @@ -454,36 +386,31 @@ static int google_rtc_audio_processing_init(struct processing_module *mod) goto fail; } - cd->raw_mic_buffer = rballoc( - 0, SOF_MEM_CAPS_RAM, - cd->num_frames * cd->num_capture_channels * sizeof(cd->raw_mic_buffer[0])); - if (!cd->raw_mic_buffer) { + buf_size = cd->num_frames * cd->num_capture_channels * sizeof(cd->process_buffer[0]); + comp_dbg(dev, "Allocating process_buffer of size %u", buf_size); + cd->process_buffer = rballoc(0, SOF_MEM_CAPS_RAM, buf_size); + if (!cd->process_buffer) { + comp_err(dev, "Allocating process_buffer failure"); ret = -EINVAL; goto fail; } - bzero(cd->raw_mic_buffer, cd->num_frames * cd->num_capture_channels * sizeof(cd->raw_mic_buffer[0])); - cd->raw_mic_buffer_frame_index = 0; - - cd->aec_reference_buffer = rballoc( - 0, SOF_MEM_CAPS_RAM, - cd->num_frames * sizeof(cd->aec_reference_buffer[0]) * - cd->num_aec_reference_channels); + bzero(cd->process_buffer, buf_size); + for (channel = 0; channel < cd->num_capture_channels; channel++) + cd->process_buffer_ptrs[channel] = &cd->process_buffer[channel * cd->num_frames]; + + buf_size = cd->num_frames * sizeof(cd->aec_reference_buffer[0]) * + cd->num_aec_reference_channels; + comp_dbg(dev, "Allocating aec_reference_buffer of size %u", buf_size); + cd->aec_reference_buffer = rballoc(0, SOF_MEM_CAPS_RAM, buf_size); if (!cd->aec_reference_buffer) { + comp_err(dev, "Allocating aec_reference_buffer failure"); ret = -ENOMEM; goto fail; } - bzero(cd->aec_reference_buffer, cd->num_frames * cd->num_aec_reference_channels * sizeof(cd->aec_reference_buffer[0])); - cd->aec_reference_frame_index = 0; - - cd->output_buffer = rballoc( - 0, SOF_MEM_CAPS_RAM, - cd->num_frames * cd->num_capture_channels * sizeof(cd->output_buffer[0])); - if (!cd->output_buffer) { - ret = -ENOMEM; - goto fail; - } - bzero(cd->output_buffer, cd->num_frames * sizeof(cd->output_buffer[0])); - cd->output_buffer_frame_index = 0; + bzero(cd->aec_reference_buffer, buf_size); + for (channel = 0; channel < cd->num_aec_reference_channels; channel++) + cd->aec_reference_buffer_ptrs[channel] = + &cd->aec_reference_buffer[channel * cd->num_frames]; /* comp_is_new_data_blob_available always returns false for the first * control write with non-empty config. The first non-empty write may @@ -501,14 +428,13 @@ static int google_rtc_audio_processing_init(struct processing_module *mod) fail: comp_err(dev, "google_rtc_audio_processing_init(): Failed"); if (cd) { - rfree(cd->output_buffer); rfree(cd->aec_reference_buffer); if (cd->state) { GoogleRtcAudioProcessingFree(cd->state); } GoogleRtcAudioProcessingDetachMemoryBuffer(); rfree(cd->memory_buffer); - rfree(cd->raw_mic_buffer); + rfree(cd->process_buffer); comp_data_blob_handler_free(cd->tuning_handler); rfree(cd); } @@ -524,11 +450,10 @@ static int google_rtc_audio_processing_free(struct processing_module *mod) GoogleRtcAudioProcessingFree(cd->state); cd->state = NULL; - rfree(cd->output_buffer); rfree(cd->aec_reference_buffer); GoogleRtcAudioProcessingDetachMemoryBuffer(); rfree(cd->memory_buffer); - rfree(cd->raw_mic_buffer); + rfree(cd->process_buffer); comp_data_blob_handler_free(cd->tuning_handler); rfree(cd); return 0; @@ -543,7 +468,6 @@ static int google_rtc_audio_processing_prepare(struct processing_module *mod, struct comp_dev *dev = mod->dev; struct google_rtc_audio_processing_comp_data *cd = module_get_private_data(mod); struct list_item *source_buffer_list_item; - struct comp_buffer *output; unsigned int aec_channels = 0, frame_fmt, rate; int microphone_stream_channels = 0; int output_stream_channels; @@ -552,34 +476,38 @@ static int google_rtc_audio_processing_prepare(struct processing_module *mod, comp_info(dev, "google_rtc_audio_processing_prepare()"); -#if CONFIG_IPC_MAJOR_4 - google_rtc_audio_processing_params(mod); -#endif + if (num_of_sources != GOOGLE_RTC_NUM_INPUT_PINS) { + comp_err(dev, "Expecting 2 sources - ref and mic, got %u", num_of_sources); + return -EINVAL; + } + + if (num_of_sinks != GOOGLE_RTC_NUM_OUTPUT_PINS) { + comp_err(dev, "Expecting 1 sink, got %u", num_of_sinks); + return -EINVAL; + } /* searching for stream and feedback source buffers */ - list_for_item(source_buffer_list_item, &dev->bsource_list) { - struct comp_buffer *source = container_of(source_buffer_list_item, - struct comp_buffer, sink_list); -#if CONFIG_IPC_MAJOR_4 - if (IPC4_SINK_QUEUE_ID(source->id) == SOF_AEC_FEEDBACK_QUEUE_ID) { -#else - if (source->source->pipeline->pipeline_id != dev->pipeline->pipeline_id) { -#endif + for (i = 0; i < num_of_sources; i++) { + + if (IPC4_SINK_QUEUE_ID(source_get_id(sources[i])) == SOF_AEC_FEEDBACK_QUEUE_ID) { + cd->aec_reference_source = i; - aec_channels = audio_stream_get_channels(&source->stream); + aec_channels = source_get_channels(sources[i]); comp_dbg(dev, "reference index = %d, channels = %d", i, aec_channels); } else { cd->raw_microphone_source = i; - microphone_stream_channels = audio_stream_get_channels(&source->stream); + microphone_stream_channels = source_get_channels(sources[i]); comp_dbg(dev, "microphone index = %d, channels = %d", i, microphone_stream_channels); } - - audio_stream_init_alignment_constants(1, 1, &source->stream); - i++; + source_set_alignment_constants(sources[i], 1, 1); } - output = list_first_item(&dev->bsink_list, struct comp_buffer, source_list); + google_rtc_audio_processing_params(mod, + sources[cd->aec_reference_source], + sources[cd->raw_microphone_source], + sinks[0]); + /* On some platform the playback output is left right left right due to a crossover * later on the signal processing chain. That makes the aec_reference be 4 channels @@ -591,10 +519,10 @@ static int google_rtc_audio_processing_prepare(struct processing_module *mod, return -EINVAL; } - audio_stream_init_alignment_constants(1, 1, &output->stream); - frame_fmt = audio_stream_get_frm_fmt(&output->stream); - rate = audio_stream_get_rate(&output->stream); - output_stream_channels = audio_stream_get_channels(&output->stream); + sink_set_alignment_constants(sinks[0], 1, 1); + frame_fmt = sink_get_frm_fmt(sinks[0]); + rate = sink_get_rate(sinks[0]); + output_stream_channels = sink_get_channels(sinks[0]); if (cd->num_capture_channels > microphone_stream_channels) { comp_err(dev, "unsupported number of microphone channels: %d", @@ -623,6 +551,28 @@ static int google_rtc_audio_processing_prepare(struct processing_module *mod, return -EINVAL; } + /* check IBS/OBS in streams */ + if (cd->num_frames * source_get_frame_bytes(sources[cd->raw_microphone_source]) != + source_get_min_available(sources[cd->raw_microphone_source])) { + comp_warn(dev, "Incorrect IBS on microphone source: %d, expected %u", + source_get_min_available(sources[cd->raw_microphone_source]), + cd->num_frames * + source_get_frame_bytes(sources[cd->raw_microphone_source])); + } + if (cd->num_frames * sink_get_frame_bytes(sinks[0]) != + sink_get_min_free_space(sinks[0])) { + comp_warn(dev, "Incorrect OBS on sink :%d, expected %u", + sink_get_min_free_space(sinks[0]), + cd->num_frames * sink_get_frame_bytes(sinks[0])); + } + if (cd->num_frames * source_get_frame_bytes(sources[cd->aec_reference_source]) != + source_get_min_available(sources[cd->aec_reference_source])) { + comp_warn(dev, "Incorrect IBS on reference source: %d, expected %u", + source_get_min_available(sources[cd->aec_reference_source]), + cd->num_frames * + source_get_frame_bytes(sources[cd->aec_reference_source])); + } + /* Blobs sent during COMP_STATE_READY is assigned to blob_handler->data * directly, so comp_is_new_data_blob_available always returns false. */ @@ -630,6 +580,7 @@ static int google_rtc_audio_processing_prepare(struct processing_module *mod, if (ret) return ret; + comp_dbg(dev, "google_rtc_audio_processing_prepare() success"); return 0; } @@ -640,27 +591,62 @@ static int google_rtc_audio_processing_reset(struct processing_module *mod) return 0; } +static int16_t convert_float_to_uint16_hifi(float data) +{ + const xtfloat ratio = 2 << 14; + xtfloat x0 = data; + xtfloat x1; + int16_t x; + + x1 = XT_MUL_S(x0, ratio); + x = XT_TRUNC_S(x1, 0); + + return x; +} + +static float convert_uint16_to_float_hifi(int16_t data) +{ + const xtfloat ratio = 2 << 14; + xtfloat x0 = data; + float x; + + x = XT_DIV_S(x0, ratio); + + return x; +} + +/* todo CONFIG_FORMAT_S32LE */ static int google_rtc_audio_processing_process(struct processing_module *mod, - struct input_stream_buffer *input_buffers, - int num_input_buffers, - struct output_stream_buffer *output_buffers, - int num_output_buffers) + struct sof_source **sources, int num_of_sources, + struct sof_sink **sinks, int num_of_sinks) { - struct google_rtc_audio_processing_comp_data *cd = module_get_private_data(mod); - int16_t *src, *dst, *ref; - uint32_t num_aec_reference_frames; - uint32_t num_aec_reference_bytes; + + int ret; + uint16_t const *src; + uint8_t const *src_buf_start; + uint8_t const *src_buf_end; + size_t src_buf_size; + + uint16_t const *ref; + uint8_t const *ref_buf_start; + uint8_t const *ref_buf_end; + size_t ref_buf_size; + + uint16_t *dst; + uint8_t *dst_buf_start; + uint8_t *dst_buf_end; + size_t dst_buf_size; + + size_t num_of_bytes_to_process; int num_samples_remaining; int num_frames_remaining; int channel; - int frames; int nmax; - int ret; - int i, j, n; - struct input_stream_buffer *ref_streamb, *mic_streamb; - struct output_stream_buffer *out_streamb; - struct audio_stream *ref_stream, *mic_stream, *out_stream; + struct sof_source *ref_stream, *src_stream; + struct sof_sink *dst_stream; + + struct google_rtc_audio_processing_comp_data *cd = module_get_private_data(mod); if (cd->reconfigure) { ret = google_rtc_audio_processing_reconfigure(mod); @@ -668,97 +654,128 @@ static int google_rtc_audio_processing_process(struct processing_module *mod, return ret; } - ref_streamb = &input_buffers[cd->aec_reference_source]; - ref_stream = ref_streamb->data; - ref = audio_stream_get_rptr(ref_stream); - - num_aec_reference_frames = input_buffers[cd->aec_reference_source].size; - num_aec_reference_bytes = audio_stream_frame_bytes(ref_stream) * num_aec_reference_frames; - - num_samples_remaining = num_aec_reference_frames * audio_stream_get_channels(ref_stream); - while (num_samples_remaining) { - nmax = audio_stream_samples_without_wrap_s16(ref_stream, ref); - n = MIN(num_samples_remaining, nmax); - for (i = 0; i < n; i += cd->num_aec_reference_channels) { - j = cd->num_aec_reference_channels * cd->aec_reference_frame_index; - for (channel = 0; channel < cd->num_aec_reference_channels; ++channel) - cd->aec_reference_buffer[j++] = ref[channel]; - - ref += audio_stream_get_channels(ref_stream); - ++cd->aec_reference_frame_index; - - if (cd->aec_reference_frame_index == cd->num_frames) { - GoogleRtcAudioProcessingAnalyzeRender_int16(cd->state, - cd->aec_reference_buffer); - cd->aec_reference_frame_index = 0; - } + src_stream = sources[cd->raw_microphone_source]; + ref_stream = sources[cd->aec_reference_source]; + dst_stream = sinks[0]; + + num_of_bytes_to_process = cd->num_frames * source_get_frame_bytes(ref_stream); + ret = source_get_data(ref_stream, num_of_bytes_to_process, (const void **)&ref, + (const void **)&ref_buf_start, &ref_buf_size); + + /* problems here are extremely unlikely, as it has been checked that + * the buffer contains enough data + */ + assert(!ret); + ref_buf_end = ref_buf_start + ref_buf_size; + + + /* can't use source_get_data_frames_available as number of available data may have changed + * other processes may put some data to the buffer + */ + num_samples_remaining = num_of_bytes_to_process * source_get_channels(ref_stream) / + source_get_frame_bytes(ref_stream); + + /* de-interlace ref buffer, convert it to float */ + for (int i = 0; i < cd->num_frames; i++) { + for (channel = 0; channel < cd->num_aec_reference_channels; ++channel) { + cd->aec_reference_buffer_ptrs[channel][i] = + convert_uint16_to_float_hifi(ref[channel]); } - num_samples_remaining -= n; - ref = audio_stream_wrap(ref_stream, ref); + ref += cd->num_aec_reference_channels; + if ((void *)ref >= (void *)ref_buf_end) + ref = (void *)ref_buf_start; } - input_buffers[cd->aec_reference_source].consumed = num_aec_reference_bytes; - - mic_streamb = &input_buffers[cd->raw_microphone_source]; - mic_stream = mic_streamb->data; - out_streamb = &output_buffers[0]; - out_stream = out_streamb->data; - - src = audio_stream_get_rptr(mic_stream); - dst = audio_stream_get_wptr(out_stream); - - frames = input_buffers[cd->raw_microphone_source].size; - num_frames_remaining = frames; - - while (num_frames_remaining) { - nmax = audio_stream_frames_without_wrap(mic_stream, src); - n = MIN(num_frames_remaining, nmax); - nmax = audio_stream_frames_without_wrap(out_stream, dst); - n = MIN(n, nmax); - for (i = 0; i < n; i++) { - memcpy_s(&(cd->raw_mic_buffer[cd->raw_mic_buffer_frame_index * - cd->num_capture_channels]), - cd->num_frames * cd->num_capture_channels * - sizeof(cd->raw_mic_buffer[0]), src, - sizeof(int16_t) * cd->num_capture_channels); - ++cd->raw_mic_buffer_frame_index; - - memcpy_s(dst, cd->num_frames * cd->num_capture_channels * - sizeof(cd->output_buffer[0]), - &(cd->output_buffer[cd->output_buffer_frame_index * - cd->num_capture_channels]), - sizeof(int16_t) * cd->num_capture_channels); - ++cd->output_buffer_frame_index; - - if (cd->raw_mic_buffer_frame_index == cd->num_frames) { - GoogleRtcAudioProcessingProcessCapture_int16(cd->state, - cd->raw_mic_buffer, - cd->output_buffer); - cd->output_buffer_frame_index = 0; - cd->raw_mic_buffer_frame_index = 0; - } - src += audio_stream_get_channels(mic_stream); - dst += audio_stream_get_channels(out_stream); - } - num_frames_remaining -= n; - src = audio_stream_wrap(mic_stream, src); - dst = audio_stream_wrap(out_stream, dst); + GoogleRtcAudioProcessingAnalyzeRender_float32( + cd->state, + (const float **)cd->aec_reference_buffer_ptrs); + + source_release_data(ref_stream, num_of_bytes_to_process); + + /* process main stream - de interlace and convert */ + num_of_bytes_to_process = cd->num_frames * source_get_frame_bytes(src_stream); + ret = source_get_data(src_stream, num_of_bytes_to_process, (const void **)&src, + (const void **)&src_buf_start, &src_buf_size); + assert(!ret); + src_buf_end = src_buf_start + src_buf_size; + + for (int i = 0; i < cd->num_frames; i++) { + for (channel = 0; channel < cd->num_capture_channels; channel++) + cd->process_buffer_ptrs[channel][i] = + convert_uint16_to_float_hifi(src[channel]); + + src += cd->num_capture_channels; + if ((void *)src >= (void *)src_buf_end) + src = (void *)src_buf_start; + } + + source_release_data(src_stream, num_of_bytes_to_process); + + /* call the library, use same in/out buffers */ + GoogleRtcAudioProcessingProcessCapture_float32(cd->state, + (const float **)cd->process_buffer_ptrs, + cd->process_buffer_ptrs); + + /* same numnber of bytes to process for output stream as for mic stream */ + ret = sink_get_buffer(dst_stream, num_of_bytes_to_process, (void **)&dst, + (void **)&dst_buf_start, &dst_buf_size); + assert(!ret); + dst_buf_end = dst_buf_start + dst_buf_size; + + for (int i = 0; i < cd->num_frames; i++) { + for (channel = 0; channel < cd->num_capture_channels; channel++) + dst[channel] = + convert_float_to_uint16_hifi(cd->process_buffer_ptrs[channel][i]); + dst += cd->num_capture_channels; + if ((void *)dst >= (void *)dst_buf_end) + dst = (void *)dst_buf_start; } - module_update_buffer_position(&input_buffers[cd->raw_microphone_source], - &output_buffers[0], frames); + sink_commit_buffer(dst_stream, num_of_bytes_to_process); return 0; } +bool google_rtc_audio_is_ready_to_process(struct processing_module *mod, + struct sof_source **sources, int num_of_sources, + struct sof_sink **sinks, int num_of_sinks) +{ + struct google_rtc_audio_processing_comp_data *cd = module_get_private_data(mod); + struct sof_source *ref_stream, *mic_stream; + struct sof_sink *out_stream; + size_t min_ref_bytes; + + + /* check if both input streams and output stream have enough data/space */ + mic_stream = sources[cd->raw_microphone_source]; + ref_stream = sources[cd->aec_reference_source]; + out_stream = sinks[0]; + + /* this should source_get_min_available(ref_stream)!!! + * Currently the topology sets IBS incorrectly + */ + if (source_get_data_available(ref_stream) < cd->num_frames * + source_get_frame_bytes(ref_stream)) + return false; + + if (source_get_data_available(mic_stream) < source_get_min_available(mic_stream)) + return false; + + if (sink_get_free_size(out_stream) < sink_get_min_free_space(out_stream)) + return false; + + return true; +} + static struct module_interface google_rtc_audio_processing_interface = { .init = google_rtc_audio_processing_init, .free = google_rtc_audio_processing_free, - .process_audio_stream = google_rtc_audio_processing_process, + .process = google_rtc_audio_processing_process, .prepare = google_rtc_audio_processing_prepare, .set_configuration = google_rtc_audio_processing_set_config, .get_configuration = google_rtc_audio_processing_get_config, .reset = google_rtc_audio_processing_reset, + .is_ready_to_process = google_rtc_audio_is_ready_to_process, }; DECLARE_MODULE_ADAPTER(google_rtc_audio_processing_interface, diff --git a/src/audio/google/google_rtc_audio_processing_mock.c b/src/audio/google/google_rtc_audio_processing_mock.c index a6c55c641270..07f2f02bd722 100644 --- a/src/audio/google/google_rtc_audio_processing_mock.c +++ b/src/audio/google/google_rtc_audio_processing_mock.c @@ -10,8 +10,6 @@ #include #include -#include -#include #include #include "ipc/topology.h" @@ -23,7 +21,7 @@ struct GoogleRtcAudioProcessingState { int num_aec_reference_channels; int num_output_channels; int num_frames; - int16_t *aec_reference; + float *aec_reference; }; static void SetFormats(GoogleRtcAudioProcessingState *const state, @@ -140,46 +138,40 @@ int GoogleRtcAudioProcessingReconfigure(GoogleRtcAudioProcessingState *const sta return 0; } -int GoogleRtcAudioProcessingProcessCapture_int16(GoogleRtcAudioProcessingState *const state, - const int16_t *const src, - int16_t *const dest) +int GoogleRtcAudioProcessingProcessCapture_float32(GoogleRtcAudioProcessingState *const state, + const float *const *src, + float * const *dest) { - int16_t *ref = state->aec_reference; - int16_t *mic = (int16_t *) src; - int16_t *out = dest; - int n, io, im, ir; - - /* Mix input and reference channels to output. The matching channels numbers - * are mixed. If e.g. microphone and output channels count is 4, and reference - * has 2 channels, output channels 3 and 4 are copy of microphone channels 3 and 4, - * and output channels 1 and 2 are sum of microphone and reference. - */ - memset(dest, 0, sizeof(int16_t) * state->num_output_channels * state->num_frames); - for (n = 0; n < state->num_frames; ++n) { - im = 0; - ir = 0; - for (io = 0; io < state->num_output_channels; io++) { - out[io] = sat_int16( - (im < state->num_capture_channels ? (int32_t)mic[im++] : 0) + - (ir < state->num_aec_reference_channels ? (int32_t)ref[ir++] : 0)); + float *ref = state->aec_reference; + float **mic = (float **)src; + int n, chan, ref_chan; + + for (chan = 0; chan < state->num_output_channels; chan++) { + for (n = 0; n < state->num_frames; ++n) { + float mic_save = mic[chan][n]; /* allow same in/out buffer */ + + if (chan < state->num_aec_reference_channels) + dest[chan][n] = mic_save + ref[n + (chan * state->num_frames)]; + else + dest[chan][n] = mic_save; } - - ref += state->num_aec_reference_channels; - out += state->num_output_channels; - mic += state->num_capture_channels; } return 0; } -int GoogleRtcAudioProcessingAnalyzeRender_int16(GoogleRtcAudioProcessingState *const state, - const int16_t *const data) +int GoogleRtcAudioProcessingAnalyzeRender_float32(GoogleRtcAudioProcessingState *const state, + const float *const *data) { const size_t buffer_size = sizeof(state->aec_reference[0]) - * state->num_frames - * state->num_aec_reference_channels; - memcpy_s(state->aec_reference, buffer_size, - data, buffer_size); + * state->num_frames; + int channel; + + for (channel = 0; channel < state->num_aec_reference_channels; channel++) { + memcpy_s(&state->aec_reference[channel * state->num_frames], buffer_size, + data[channel], buffer_size); + } + return 0; } diff --git a/src/audio/kpb.c b/src/audio/kpb.c index 48bc6bd3eeec..a7ef3c1e2a45 100644 --- a/src/audio/kpb.c +++ b/src/audio/kpb.c @@ -357,7 +357,7 @@ static int kpb_bind(struct comp_dev *dev, void *data) break; } - sink_buf_id = sink->id; + sink_buf_id = buf_get_id(sink); if (sink_buf_id == buf_id) { if (sink_buf_id == 0) @@ -903,7 +903,7 @@ static int kpb_prepare(struct comp_dev *dev) audio_stream_init_alignment_constants(byte_align, frame_align_req, &sink->stream); - sink_id = sink->id; + sink_id = buf_get_id(sink); if (sink_id == 0) audio_stream_set_channels(&sink->stream, kpb->num_of_sel_mic); diff --git a/src/audio/mixin_mixout/mixin_mixout.c b/src/audio/mixin_mixout/mixin_mixout.c index ba3184c66e12..b15b38e1bc2f 100644 --- a/src/audio/mixin_mixout/mixin_mixout.c +++ b/src/audio/mixin_mixout/mixin_mixout.c @@ -302,7 +302,7 @@ static int mixin_process(struct processing_module *mod, unused_in_between_buf_c = container_of(output_buffers[i].data, struct comp_buffer, stream); mixout = unused_in_between_buf_c->sink; - sink_id = IPC4_SRC_QUEUE_ID(unused_in_between_buf_c->id); + sink_id = IPC4_SRC_QUEUE_ID(buf_get_id(unused_in_between_buf_c)); active_mixouts[i] = mixout; sinks_ids[i] = sink_id; @@ -570,7 +570,7 @@ static int mixin_params(struct processing_module *mod) /* Applying channel remapping may produce sink stream with channel count * different from source channel count. */ - sink_id = IPC4_SRC_QUEUE_ID(sink->id); + sink_id = IPC4_SRC_QUEUE_ID(buf_get_id(sink)); if (sink_id >= MIXIN_MAX_SINKS) { comp_err(dev, "Sink index out of range: %u, max sink count: %u", (uint32_t)sink_id, MIXIN_MAX_SINKS); diff --git a/src/audio/module_adapter/module_adapter.c b/src/audio/module_adapter/module_adapter.c index a1f18cd034b4..ad41f681b72c 100644 --- a/src/audio/module_adapter/module_adapter.c +++ b/src/audio/module_adapter/module_adapter.c @@ -168,6 +168,9 @@ static int module_adapter_dp_queue_prepare(struct comp_dev *dev) * first, set all parameters by calling "module prepare" with pointers to * "main" audio_stream buffers */ + list_init(&mod->dp_queue_ll_to_dp_list); + list_init(&mod->dp_queue_dp_to_ll_list); + ret = module_adapter_sink_src_prepare(dev); if (ret) return ret; @@ -177,7 +180,6 @@ static int module_adapter_dp_queue_prepare(struct comp_dev *dev) * and copy stream parameters to shadow buffers */ i = 0; - list_init(&mod->dp_queue_ll_to_dp_list); list_for_item(blist, &dev->bsource_list) { struct comp_buffer *source_buffer = container_of(blist, struct comp_buffer, sink_list); @@ -189,7 +191,8 @@ static int module_adapter_dp_queue_prepare(struct comp_dev *dev) sink_get_min_free_space(audio_stream_get_sink(&source_buffer->stream)); /* create a shadow dp queue */ - dp_queue = dp_queue_create(min_available, min_free_space, dp_mode); + dp_queue = dp_queue_create(min_available, min_free_space, dp_mode, + buf_get_id(source_buffer)); if (!dp_queue) goto err; @@ -211,7 +214,6 @@ static int module_adapter_dp_queue_prepare(struct comp_dev *dev) unsigned int period = UINT32_MAX; i = 0; - list_init(&mod->dp_queue_dp_to_ll_list); list_for_item(blist, &dev->bsink_list) { struct comp_buffer *sink_buffer = container_of(blist, struct comp_buffer, source_list); @@ -223,7 +225,8 @@ static int module_adapter_dp_queue_prepare(struct comp_dev *dev) sink_get_min_free_space(audio_stream_get_sink(&sink_buffer->stream)); /* create a shadow dp queue */ - dp_queue = dp_queue_create(min_available, min_free_space, dp_mode); + dp_queue = dp_queue_create(min_available, min_free_space, dp_mode, + buf_get_id(sink_buffer)); if (!dp_queue) goto err; @@ -1068,24 +1071,33 @@ static int module_adapter_copy_dp_queues(struct comp_dev *dev) dp_queue = dp_queue_get_next_item(dp_queue); } - dp_queue = dp_queue_get_first_item(&mod->dp_queue_dp_to_ll_list); - list_for_item(blist, &dev->bsink_list) { + if (!mod->DP_startup_delay) { + dp_queue = dp_queue_get_first_item(&mod->dp_queue_dp_to_ll_list); + list_for_item(blist, &dev->bsink_list) { /* output - we need to copy data from dp_queue (as source) * to audio_stream (as sink) */ - assert(dp_queue); - struct comp_buffer *buffer = - container_of(blist, struct comp_buffer, source_list); - struct sof_sink *data_sink = audio_stream_get_sink(&buffer->stream); - struct sof_source *data_src = dp_queue_get_source(dp_queue); - uint32_t to_copy = MIN(sink_get_free_size(data_sink), - source_get_data_available(data_src)); + assert(dp_queue); + struct comp_buffer *buffer = + container_of(blist, struct comp_buffer, source_list); + struct sof_sink *data_sink = audio_stream_get_sink(&buffer->stream); + struct sof_source *following_mod_data_source = + audio_stream_get_source(&buffer->stream); + struct sof_source *data_src = dp_queue_get_source(dp_queue); + size_t dp_data_available = source_get_data_available(data_src); - err = source_to_sink_copy(data_src, data_sink, true, to_copy); - if (err) - return err; + if (!dp_data_available) + comp_err(dev, "!!!! no data available from DP"); - dp_queue = dp_queue_get_next_item(dp_queue); + uint32_t to_copy = MIN(source_get_min_available(following_mod_data_source), + dp_data_available); + + err = source_to_sink_copy(data_src, data_sink, true, to_copy); + return err; + + dp_queue = dp_queue_get_next_item(dp_queue); + + } } return 0; } diff --git a/src/audio/mux/mux.c b/src/audio/mux/mux.c index 92d278a22186..de16ee754c98 100644 --- a/src/audio/mux/mux.c +++ b/src/audio/mux/mux.c @@ -315,7 +315,7 @@ static void set_mux_params(struct processing_module *mod) source = container_of(source_list, struct comp_buffer, sink_list); audio_stream_init_alignment_constants(byte_align, frame_align_req, &source->stream); - j = source->id; + j = buf_get_id(source); cd->config.streams[j].pipeline_id = source->pipeline_id; if (j == BASE_CFG_QUEUED_ID) audio_fmt = &cd->md.base_cfg.audio_fmt; diff --git a/src/audio/pipeline/pipeline-graph.c b/src/audio/pipeline/pipeline-graph.c index 82fe49bbed20..02191a6f5b0a 100644 --- a/src/audio/pipeline/pipeline-graph.c +++ b/src/audio/pipeline/pipeline-graph.c @@ -181,9 +181,9 @@ int pipeline_connect(struct comp_dev *comp, struct comp_buffer *buffer, uint32_t flags; if (dir == PPL_CONN_DIR_COMP_TO_BUFFER) - comp_info(comp, "connect buffer %d as sink", buffer->id); + comp_info(comp, "connect buffer %d as sink", buf_get_id(buffer)); else - comp_info(comp, "connect buffer %d as source", buffer->id); + comp_info(comp, "connect buffer %d as source", buf_get_id(buffer)); irq_local_disable(flags); @@ -202,9 +202,9 @@ void pipeline_disconnect(struct comp_dev *comp, struct comp_buffer *buffer, int uint32_t flags; if (dir == PPL_CONN_DIR_COMP_TO_BUFFER) - comp_dbg(comp, "disconnect buffer %d as sink", buffer->id); + comp_dbg(comp, "disconnect buffer %d as sink", buf_get_id(buffer)); else - comp_dbg(comp, "disconnect buffer %d as source", buffer->id); + comp_dbg(comp, "disconnect buffer %d as source", buf_get_id(buffer)); irq_local_disable(flags); diff --git a/src/audio/rtnr/rtnr.c b/src/audio/rtnr/rtnr.c index 49db44eac1cb..7db70e4425ac 100644 --- a/src/audio/rtnr/rtnr.c +++ b/src/audio/rtnr/rtnr.c @@ -780,7 +780,8 @@ static int rtnr_copy(struct comp_dev *dev) /* Process integer multiple of RTNR internal block length */ frames = frames & ~RTNR_BLK_LENGTH_MASK; - comp_dbg(dev, "rtnr_copy() source->id: %d, frames = %d", source->id, frames); + comp_dbg(dev, "rtnr_copy() source_id: %d, frames = %d", buf_get_id(source), + frames); if (frames) { source_bytes = frames * audio_stream_frame_bytes(&source->stream); diff --git a/src/audio/sink_api_helper.c b/src/audio/sink_api_helper.c index 46ffbfd6b860..9961ee61dcb8 100644 --- a/src/audio/sink_api_helper.c +++ b/src/audio/sink_api_helper.c @@ -185,3 +185,9 @@ size_t sink_get_min_free_space(struct sof_sink *sink) { return sink->min_free_space; } + +uint32_t sink_get_id(struct sof_sink *sink) +{ + return sink->audio_stream_params->id; +} + diff --git a/src/audio/source_api_helper.c b/src/audio/source_api_helper.c index 91eb026812a9..f1aec4924f9b 100644 --- a/src/audio/source_api_helper.c +++ b/src/audio/source_api_helper.c @@ -96,6 +96,14 @@ bool source_get_underrun(struct sof_source *source) return source->audio_stream_params->underrun_permitted; } +int source_set_frm_fmt(struct sof_source *source, enum sof_ipc_frame frm_fmt) +{ + source->audio_stream_params->frame_fmt = frm_fmt; + if (source->ops->on_audio_format_set) + return source->ops->on_audio_format_set(source); + return 0; +} + int source_set_valid_fmt(struct sof_source *source, enum sof_ipc_frame valid_sample_fmt) { @@ -175,3 +183,8 @@ size_t source_get_min_available(struct sof_source *source) { return source->min_available; } + +uint32_t source_get_id(struct sof_source *source) +{ + return source->audio_stream_params->id; +} diff --git a/src/idc/zephyr_idc.c b/src/idc/zephyr_idc.c index 5f07e97ea628..887cff0eb375 100644 --- a/src/idc/zephyr_idc.c +++ b/src/idc/zephyr_idc.c @@ -30,6 +30,16 @@ #include #include #include +#include +#include + +LOG_MODULE_REGISTER(zephyr_idc, CONFIG_SOF_LOG_LEVEL); + +/* 5f1ec3f8-faaf-4099-903c-cee98351f169 */ +DECLARE_SOF_UUID("zephyr-idc", zephyr_idc_uuid, 0x5f1ec3f8, 0xfaaf, 0x4099, + 0x90, 0x3c, 0xce, 0xe9, 0x83, 0x51, 0xf1, 0x69); + +DECLARE_TR_CTX(zephyr_idc_tr, SOF_UUID(zephyr_idc_uuid), LOG_LEVEL_INFO); /* * Inter-CPU communication is only used in @@ -119,6 +129,10 @@ int idc_send_msg(struct idc_msg *msg, uint32_t mode) work->handler = idc_handler; work->sync = mode == IDC_BLOCKING; + if (!cpu_is_core_enabled(target_cpu)) { + tr_err(&zephyr_idc_tr, "Core %u is down, cannot sent IDC message", target_cpu); + return -EACCES; + } if (msg->payload) { idc_send_memcpy_err = memcpy_s(payload->data, sizeof(payload->data), msg->payload, msg->size); diff --git a/src/include/ipc4/base-config.h b/src/include/ipc4/base-config.h index 2e54478efd3a..d28728c3e63f 100644 --- a/src/include/ipc4/base-config.h +++ b/src/include/ipc4/base-config.h @@ -244,5 +244,11 @@ void ipc4_base_module_cfg_to_stream_params(const struct ipc4_base_module_cfg *ba struct comp_buffer; void ipc4_update_buffer_format(struct comp_buffer *buf_c, const struct ipc4_audio_format *fmt); +struct sof_source; +void ipc4_update_source_format(struct sof_source *source, + const struct ipc4_audio_format *fmt); +struct sof_sink; +void ipc4_update_sink_format(struct sof_sink *sink, + const struct ipc4_audio_format *fmt); #endif diff --git a/src/include/sof/audio/audio_stream.h b/src/include/sof/audio/audio_stream.h index 75060b0e1ef3..4243c25f8350 100644 --- a/src/include/sof/audio/audio_stream.h +++ b/src/include/sof/audio/audio_stream.h @@ -40,6 +40,7 @@ * TODO: compressed formats */ struct sof_audio_stream_params { + uint32_t id; enum sof_ipc_frame frame_fmt; /**< Sample data format */ enum sof_ipc_frame valid_sample_fmt; diff --git a/src/include/sof/audio/buffer.h b/src/include/sof/audio/buffer.h index 568a121db575..e636771f9f93 100644 --- a/src/include/sof/audio/buffer.h +++ b/src/include/sof/audio/buffer.h @@ -47,7 +47,7 @@ extern struct tr_ctx buffer_tr; #define trace_buf_get_id(buf_ptr) ((buf_ptr)->pipeline_id) /** \brief Retrieves subid (comp id) from the buffer */ -#define trace_buf_get_subid(buf_ptr) ((buf_ptr)->id) +#define buf_get_id(buf_ptr) ((buf_ptr)->stream.runtime_stream_params.id) #if defined(__ZEPHYR__) && defined(CONFIG_ZEPHYR_LOG) @@ -58,36 +58,36 @@ extern struct tr_ctx buffer_tr; #endif #define buf_err(buf_ptr, __e, ...) LOG_ERR(__BUF_FMT __e, trace_buf_get_id(buf_ptr), \ - trace_buf_get_subid(buf_ptr), ##__VA_ARGS__) + buf_get_id(buf_ptr), ##__VA_ARGS__) #define buf_warn(buf_ptr, __e, ...) LOG_WRN(__BUF_FMT __e, trace_buf_get_id(buf_ptr), \ - trace_buf_get_subid(buf_ptr), ##__VA_ARGS__) + buf_get_id(buf_ptr), ##__VA_ARGS__) #define buf_info(buf_ptr, __e, ...) LOG_INF(__BUF_FMT __e, trace_buf_get_id(buf_ptr), \ - trace_buf_get_subid(buf_ptr), ##__VA_ARGS__) + buf_get_id(buf_ptr), ##__VA_ARGS__) #define buf_dbg(buf_ptr, __e, ...) LOG_DBG(__BUF_FMT __e, trace_buf_get_id(buf_ptr), \ - trace_buf_get_subid(buf_ptr), ##__VA_ARGS__) + buf_get_id(buf_ptr), ##__VA_ARGS__) #else /** \brief Trace error message from buffer */ #define buf_err(buf_ptr, __e, ...) \ trace_dev_err(trace_buf_get_tr_ctx, trace_buf_get_id, \ - trace_buf_get_subid, \ + buf_get_id, \ (__sparse_force const struct comp_buffer *)buf_ptr, \ __e, ##__VA_ARGS__) /** \brief Trace warning message from buffer */ #define buf_warn(buf_ptr, __e, ...) \ trace_dev_warn(trace_buf_get_tr_ctx, trace_buf_get_id, \ - trace_buf_get_subid, \ + buf_get_id, \ (__sparse_force const struct comp_buffer *)buf_ptr, \ __e, ##__VA_ARGS__) /** \brief Trace info message from buffer */ #define buf_info(buf_ptr, __e, ...) \ trace_dev_info(trace_buf_get_tr_ctx, trace_buf_get_id, \ - trace_buf_get_subid, \ + buf_get_id, \ (__sparse_force const struct comp_buffer *)buf_ptr, \ __e, ##__VA_ARGS__) @@ -97,7 +97,7 @@ extern struct tr_ctx buffer_tr; #else #define buf_dbg(buf_ptr, __e, ...) \ trace_dev_dbg(trace_buf_get_tr_ctx, trace_buf_get_id, \ - trace_buf_get_subid, \ + buf_get_id, \ (__sparse_force const struct comp_buffer *)buf_ptr, \ __e, ##__VA_ARGS__) #endif @@ -139,7 +139,6 @@ struct comp_buffer { struct audio_stream stream; /* configuration */ - uint32_t id; uint32_t pipeline_id; uint32_t caps; uint32_t core; diff --git a/src/include/sof/audio/dp_queue.h b/src/include/sof/audio/dp_queue.h index b187eb7a6f86..1c699f11e273 100644 --- a/src/include/sof/audio/dp_queue.h +++ b/src/include/sof/audio/dp_queue.h @@ -138,8 +138,11 @@ struct dp_queue { * * @param flags a combinatin of DP_QUEUE_MODE_* flags determining working mode * + * @param id a stream ID, accessible later by sink_get_id/source_get_id + * */ -struct dp_queue *dp_queue_create(size_t min_available, size_t min_free_space, uint32_t flags); +struct dp_queue *dp_queue_create(size_t min_available, size_t min_free_space, uint32_t flags, + uint32_t id); /** * @brief remove the queue from the list, free dp queue memory diff --git a/src/include/sof/audio/module_adapter/module/generic.h b/src/include/sof/audio/module_adapter/module/generic.h index 90f91ab0cef7..50884cf15efd 100644 --- a/src/include/sof/audio/module_adapter/module/generic.h +++ b/src/include/sof/audio/module_adapter/module/generic.h @@ -225,6 +225,35 @@ struct processing_module { /* module-specific flags for comp_verify_params() */ uint32_t verify_params_flags; + /* indicates that this DP module did not yet reach its first deadline and + * no data should be passed yet to next LL module + * + * why: lets assume DP with 10ms period (a.k.a a deadline). It starts and finishes + * Earlier, i.e. in 2ms providing 10ms of data. LL starts consuming data in 1ms chunks and + * will drain 10ms buffer in 10ms, expecting a new portion of data on 11th ms + * BUT - the DP module deadline is still 10ms, regardless if it had finished earlier + * and it is completely fine that processing in next cycle takes full 10ms - as long as it + * fits into the deadline. + * It may lead to underruns: + * + * LL1 (1ms) ---> DP (10ms) -->LL2 (1ms) + * + * ticks 0..9 -> LL1 is producing 1ms data portions, DP is waiting, LL2 is waiting + * tick 10 - DP has enough data to run, it starts processing + * tick 12 - DP finishes earlier, LL2 starts consuming, LL1 is producing data + * ticks 13-19 LL1 is producing data, LL2 is consuming data (both in 1ms chunks) + * tick 20 - DP starts processing a new portion of 10ms data, having 10ms to finish + * !!!! but LL2 has already consumed 8ms !!!! + * tick 22 - LL2 is consuming the last 1ms data chunk + * tick 23 - DP is still processing, LL2 has no data to process + * !!! UNDERRUN !!!! + * tick 19 - DP finishes properly in a deadline time + * + * Solution: even if DP finishes before its deadline, the data must be held till + * deadline time, so LL2 may start processing no earlier than tick 20 + */ + bool DP_startup_delay; + /* flag to indicate module does not pause */ bool no_pause; diff --git a/src/include/sof/audio/sink_api.h b/src/include/sof/audio/sink_api.h index bb279166c167..e9c1fa7e2d80 100644 --- a/src/include/sof/audio/sink_api.h +++ b/src/include/sof/audio/sink_api.h @@ -131,6 +131,7 @@ int sink_set_overrun(struct sof_sink *sink, bool overrun_permitted); int sink_set_buffer_fmt(struct sof_sink *sink, uint32_t buffer_fmt); void sink_set_min_free_space(struct sof_sink *sink, size_t min_free_space); size_t sink_get_min_free_space(struct sof_sink *sink); +uint32_t sink_get_id(struct sof_sink *sink); /** * initial set of audio parameters, provided in sof_ipc_stream_params diff --git a/src/include/sof/audio/source_api.h b/src/include/sof/audio/source_api.h index 70b3ce38c909..de224f4bc51a 100644 --- a/src/include/sof/audio/source_api.h +++ b/src/include/sof/audio/source_api.h @@ -132,8 +132,10 @@ unsigned int source_get_rate(struct sof_source *source); unsigned int source_get_channels(struct sof_source *source); uint32_t source_get_buffer_fmt(struct sof_source *source); bool source_get_underrun(struct sof_source *source); +uint32_t source_get_id(struct sof_source *source); /** set of functions for setting audio parameters */ +int source_set_frm_fmt(struct sof_source *source, enum sof_ipc_frame frm_fmt); int source_set_valid_fmt(struct sof_source *source, enum sof_ipc_frame valid_sample_fmt); int source_set_rate(struct sof_source *source, unsigned int rate); diff --git a/src/ipc/ipc-helper.c b/src/ipc/ipc-helper.c index 44983f6e6f95..dc18f8467c76 100644 --- a/src/ipc/ipc-helper.c +++ b/src/ipc/ipc-helper.c @@ -47,7 +47,7 @@ struct comp_buffer *buffer_new(const struct sof_ipc_buffer *desc, bool is_shared buffer = buffer_alloc(desc->size, desc->caps, desc->flags, PLATFORM_DCACHE_ALIGN, is_shared); if (buffer) { - buffer->id = desc->comp.id; + buffer->stream.runtime_stream_params.id = desc->comp.id; buffer->pipeline_id = desc->comp.pipeline_id; buffer->core = desc->comp.core; diff --git a/src/ipc/ipc4/helper.c b/src/ipc/ipc4/helper.c index 02fe108480ca..4006ce82be8d 100644 --- a/src/ipc/ipc4/helper.c +++ b/src/ipc/ipc4/helper.c @@ -614,7 +614,7 @@ int ipc_comp_disconnect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) buffer_id = IPC4_COMP_ID(bu->extension.r.src_queue, bu->extension.r.dst_queue); list_for_item(sink_list, &src->bsink_list) { struct comp_buffer *buf = container_of(sink_list, struct comp_buffer, source_list); - bool found = buf->id == buffer_id; + bool found = buf_get_id(buf) == buffer_id; if (found) { buffer = buf; @@ -1053,3 +1053,37 @@ void ipc4_update_buffer_format(struct comp_buffer *buf_c, buf_c->hw_params_configured = true; } + +void ipc4_update_source_format(struct sof_source *source, + const struct ipc4_audio_format *fmt) +{ + enum sof_ipc_frame valid_fmt, frame_fmt; + + source_set_channels(source, fmt->channels_count); + source_set_rate(source, fmt->sampling_frequency); + audio_stream_fmt_conversion(fmt->depth, + fmt->valid_bit_depth, + &frame_fmt, &valid_fmt, + fmt->s_type); + + source_set_frm_fmt(source, frame_fmt); + source_set_valid_fmt(source, valid_fmt); + source_set_buffer_fmt(source, fmt->interleaving_style); +} + +void ipc4_update_sink_format(struct sof_sink *sink, + const struct ipc4_audio_format *fmt) +{ + enum sof_ipc_frame valid_fmt, frame_fmt; + + sink_set_channels(sink, fmt->channels_count); + sink_set_rate(sink, fmt->sampling_frequency); + audio_stream_fmt_conversion(fmt->depth, + fmt->valid_bit_depth, + &frame_fmt, &valid_fmt, + fmt->s_type); + + sink_set_frm_fmt(sink, frame_fmt); + sink_set_valid_fmt(sink, valid_fmt); + sink_set_buffer_fmt(sink, fmt->interleaving_style); +} diff --git a/src/probe/probe.c b/src/probe/probe.c index 933cea584f6d..1e24331698eb 100644 --- a/src/probe/probe.c +++ b/src/probe/probe.c @@ -1073,7 +1073,7 @@ static struct comp_buffer *ipc4_get_buffer(struct ipc_comp_dev *dev, probe_point case PROBE_TYPE_INPUT: list_for_item(source_list, &dev->cd->bsource_list) { buf = container_of(source_list, struct comp_buffer, sink_list); - queue_id = IPC4_SRC_QUEUE_ID(buf->id); + queue_id = IPC4_SRC_QUEUE_ID(buf_get_id(buf)); if (queue_id == probe_point.fields.index) return buf; @@ -1082,7 +1082,7 @@ static struct comp_buffer *ipc4_get_buffer(struct ipc_comp_dev *dev, probe_point case PROBE_TYPE_OUTPUT: list_for_item(sink_list, &dev->cd->bsink_list) { buf = container_of(sink_list, struct comp_buffer, source_list); - queue_id = IPC4_SINK_QUEUE_ID(buf->id); + queue_id = IPC4_SINK_QUEUE_ID(buf_get_id(buf)); if (queue_id == probe_point.fields.index) return buf; diff --git a/src/samples/audio/smart_amp_test_ipc4.c b/src/samples/audio/smart_amp_test_ipc4.c index 616920543197..d09a8f9a9720 100644 --- a/src/samples/audio/smart_amp_test_ipc4.c +++ b/src/samples/audio/smart_amp_test_ipc4.c @@ -289,7 +289,8 @@ static int smart_amp_process(struct processing_module *mod, for (i = 0; i < num_input_buffers; i++) { buf = container_of(input_buffers[i].data, struct comp_buffer, stream); - if (IPC4_SINK_QUEUE_ID(buf->id) == SOF_SMART_AMP_FEEDBACK_QUEUE_ID) { + if (IPC4_SINK_QUEUE_ID(buf_get_id(buf)) == + SOF_SMART_AMP_FEEDBACK_QUEUE_ID) { fb_input = &input_buffers[i]; fb_buf_c = buf; } else { @@ -356,7 +357,8 @@ static int smart_amp_prepare(struct processing_module *mod, source_buffer = container_of(blist, struct comp_buffer, sink_list); audio_stream_init_alignment_constants(1, 1, &source_buffer->stream); - if (IPC4_SINK_QUEUE_ID(source_buffer->id) == SOF_SMART_AMP_FEEDBACK_QUEUE_ID) { + if (IPC4_SINK_QUEUE_ID(buf_get_id(source_buffer)) == + SOF_SMART_AMP_FEEDBACK_QUEUE_ID) { audio_stream_set_channels(&source_buffer->stream, sad->config.feedback_channels); audio_stream_set_rate(&source_buffer->stream, diff --git a/src/schedule/zephyr_dp_schedule.c b/src/schedule/zephyr_dp_schedule.c index cdc5e6a8ad32..da4bd94366e7 100644 --- a/src/schedule/zephyr_dp_schedule.c +++ b/src/schedule/zephyr_dp_schedule.c @@ -35,10 +35,12 @@ struct scheduler_dp_data { struct task_dp_pdata { k_tid_t thread_id; /* zephyr thread ID */ - uint32_t period_clock_ticks; /* period the task should be scheduled in Zephyr ticks */ + uint32_t deadline_clock_ticks; /* dp module deadline in Zephyr ticks */ + uint32_t deadline_ll_cycles; /* dp module deadline in LL cycles */ k_thread_stack_t __sparse_cache *p_stack; /* pointer to thread stack */ struct k_sem sem; /* semaphore for task scheduling */ struct processing_module *mod; /* the module to be scheduled */ + uint32_t LL_cycles_to_deadline; /* current number of LL cycles till deadline */ }; /* Single CPU-wide lock @@ -227,11 +229,16 @@ void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void * lock_key = scheduler_dp_lock(); list_for_item(tlist, &dp_sch->tasks) { curr_task = container_of(tlist, struct task, list); + pdata = curr_task->priv_data; + struct processing_module *mod = pdata->mod; + + if (pdata->LL_cycles_to_deadline) { + pdata->LL_cycles_to_deadline--; + if (!pdata->LL_cycles_to_deadline) + mod->DP_startup_delay = false; + } - /* step 1 - check if the module is ready for processing */ if (curr_task->state == SOF_TASK_STATE_QUEUED) { - pdata = curr_task->priv_data; - struct processing_module *mod = pdata->mod; bool mod_ready; mod_ready = module_is_ready_to_process(mod, mod->sources, @@ -240,11 +247,13 @@ void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void * mod->num_of_sinks); if (mod_ready) { /* set a deadline for given num of ticks, starting now */ - k_thread_deadline_set(pdata->thread_id, pdata->period_clock_ticks); + k_thread_deadline_set(pdata->thread_id, + pdata->deadline_clock_ticks); /* trigger the task */ curr_task->state = SOF_TASK_STATE_RUNNING; k_sem_give(&pdata->sem); + pdata->LL_cycles_to_deadline = pdata->deadline_ll_cycles; } } } @@ -352,7 +361,7 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta struct scheduler_dp_data *dp_sch = (struct scheduler_dp_data *)data; struct task_dp_pdata *pdata = task->priv_data; unsigned int lock_key; - uint64_t period_clock_ticks; + uint64_t deadline_clock_ticks; lock_key = scheduler_dp_lock(); @@ -371,13 +380,16 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta task->state = SOF_TASK_STATE_QUEUED; list_item_prepend(&task->list, &dp_sch->tasks); - period_clock_ticks = period * CONFIG_SYS_CLOCK_TICKS_PER_SEC; - /* period is in us - convert to seconds in next step + deadline_clock_ticks = period * CONFIG_SYS_CLOCK_TICKS_PER_SEC; + /* period/deadline is in us - convert to seconds in next step * or it always will be zero because of fixed point calculation */ - period_clock_ticks /= 1000000; + deadline_clock_ticks /= 1000000; - pdata->period_clock_ticks = period_clock_ticks; + pdata->deadline_clock_ticks = deadline_clock_ticks; + pdata->deadline_ll_cycles = period / LL_TIMER_PERIOD_US; + pdata->LL_cycles_to_deadline = 0; + pdata->mod->DP_startup_delay = true; scheduler_dp_unlock(lock_key); tr_dbg(&dp_tr, "DP task scheduled with period %u [us]", (uint32_t)period);