diff --git a/src/audio/mixin_mixout/mixin_mixout.c b/src/audio/mixin_mixout/mixin_mixout.c index 0847007bfa40..b9efa59d0a60 100644 --- a/src/audio/mixin_mixout/mixin_mixout.c +++ b/src/audio/mixin_mixout/mixin_mixout.c @@ -75,8 +75,7 @@ struct mixin_sink_config { /* mixin component private data */ struct mixin_data { - normal_mix_func normal_mix_channel; - mute_func mute_channel; + mix_func mix; struct mixin_sink_config sink_config[MIXIN_MAX_SINKS]; }; @@ -183,10 +182,10 @@ static int mixout_free(struct processing_module *mod) return 0; } -static int mix_and_remap(struct comp_dev *dev, const struct mixin_data *mixin_data, - uint16_t sink_index, struct audio_stream *sink, - uint32_t start_frame, uint32_t mixed_frames, - const struct audio_stream *source, uint32_t frame_count) +static int mix(struct comp_dev *dev, const struct mixin_data *mixin_data, + uint16_t sink_index, struct audio_stream *sink, + uint32_t start_frame, uint32_t mixed_frames, + const struct audio_stream *source, uint32_t frame_count) { const struct mixin_sink_config *sink_config; @@ -198,17 +197,11 @@ static int mix_and_remap(struct comp_dev *dev, const struct mixin_data *mixin_da sink_config = &mixin_data->sink_config[sink_index]; - /* Mix streams. mix_channel() is reused here to mix streams, not individual - * channels. To do so, (multichannel) stream is treated as single channel: - * channel count is passed as 1, channel index is 0, frame indices (start_frame - * and mixed_frame) and frame count are multiplied by real stream channel count. - */ - mixin_data->normal_mix_channel(sink, start_frame * audio_stream_get_channels(sink), - mixed_frames * audio_stream_get_channels(sink), - source, - frame_count * audio_stream_get_channels(sink), - sink_config->gain); - + mixin_data->mix(sink, start_frame * audio_stream_get_channels(sink), + mixed_frames * audio_stream_get_channels(sink), + source, + frame_count * audio_stream_get_channels(sink), + sink_config->gain); return 0; } @@ -396,9 +389,9 @@ static int mixin_process(struct processing_module *mod, * sink buffer has some data (written by another mixin) mix that data * with source data. */ - ret = mix_and_remap(dev, mixin_data, sinks_ids[i], &sink->stream, - start_frame, mixout_data->mixed_frames, - input_buffers[0].data, frames_to_copy); + ret = mix(dev, mixin_data, sinks_ids[i], &sink->stream, + start_frame, mixout_data->mixed_frames, + input_buffers[0].data, frames_to_copy); if (ret < 0) { return ret; } @@ -509,8 +502,7 @@ static int mixin_reset(struct processing_module *mod) comp_dbg(dev, "mixin_reset()"); - mixin_data->normal_mix_channel = NULL; - mixin_data->mute_channel = NULL; + mixin_data->mix = NULL; return 0; } @@ -635,15 +627,14 @@ static int mixin_prepare(struct processing_module *mod, case SOF_IPC_FRAME_S16_LE: case SOF_IPC_FRAME_S24_4LE: case SOF_IPC_FRAME_S32_LE: - md->normal_mix_channel = normal_mix_get_processing_function(fmt); - md->mute_channel = mute_mix_get_processing_function(fmt); + md->mix = mixin_get_processing_function(fmt); break; default: comp_err(dev, "unsupported data format %d", fmt); return -EINVAL; } - if (!md->normal_mix_channel || !md->mute_channel) { + if (!md->mix) { comp_err(dev, "have not found the suitable processing function"); return -EINVAL; } diff --git a/src/audio/mixin_mixout/mixin_mixout.h b/src/audio/mixin_mixout/mixin_mixout.h index 49184e2aa51b..6b097df99b16 100644 --- a/src/audio/mixin_mixout/mixin_mixout.h +++ b/src/audio/mixin_mixout/mixin_mixout.h @@ -103,59 +103,35 @@ struct ipc4_mixer_mode_config { } __packed __aligned(4); /** - * \brief normal mode mixin_mixout processing function interface + * \brief mixin processing function interface */ -typedef void (*normal_mix_func)(struct audio_stream *sink, int32_t start_frame, - int32_t mixed_frames, - const struct audio_stream *source, - int32_t frame_count, uint16_t gain); +typedef void (*mix_func)(struct audio_stream *sink, int32_t start_sample, + int32_t mixed_samples, + const struct audio_stream *source, + int32_t sample_count, uint16_t gain); /** - * \brief mixin_mixout mute processing function interface - */ -typedef void (*mute_func) (struct audio_stream *stream, int32_t channel_index, - int32_t start_frame, int32_t mixed_frames, int32_t frame_count); - -/** - * @brief mixin_mixout processing functions map. + * @brief mixin processing functions map. */ struct mix_func_map { - uint16_t frame_fmt; /* frame format */ - normal_mix_func normal_func; /* normal mode mixin_mixout processing function */ - mute_func mute_func; /* mute processing function */ + uint16_t frame_fmt; /* frame format */ + mix_func func; /* mixin processing function */ }; extern const struct mix_func_map mix_func_map[]; extern const size_t mix_count; /** - * \brief Retrievies normal mode mixer processing function. - * \param[in] fmt stream PCM frame format - */ -static inline normal_mix_func normal_mix_get_processing_function(int fmt) -{ - int i; - - /* map the normal mode mixin_mixout function for source and sink buffers */ - for (i = 0; i < mix_count; i++) { - if (fmt == mix_func_map[i].frame_fmt) - return mix_func_map[i].normal_func; - } - - return NULL; -} - -/** - * \brief Retrievies normal mode mixer processing function. + * \brief Retrievies mixin processing function. * \param[in] fmt stream PCM frame format */ -static inline mute_func mute_mix_get_processing_function(int fmt) +static inline mix_func mixin_get_processing_function(int fmt) { int i; - /* map the mute function for source and sink buffers */ + /* map mixin processing function for source and sink buffers */ for (i = 0; i < mix_count; i++) { if (fmt == mix_func_map[i].frame_fmt) - return mix_func_map[i].mute_func; + return mix_func_map[i].func; } return NULL; diff --git a/src/audio/mixin_mixout/mixin_mixout_generic.c b/src/audio/mixin_mixout/mixin_mixout_generic.c index 793a7f5d12c7..3c9895d18d45 100644 --- a/src/audio/mixin_mixout/mixin_mixout_generic.c +++ b/src/audio/mixin_mixout/mixin_mixout_generic.c @@ -12,35 +12,28 @@ #ifdef MIXIN_MIXOUT_GENERIC #if CONFIG_FORMAT_S16LE -/* Instead of using audio_stream_get_channels(sink) and audio_stream_get_channels(source), - * sink_channel_count and source_channel_count are supplied as parameters. This is done to reuse - * the function to also mix an entire stream. In this case the function is called with fake stream - * parameters: multichannel stream is treated as single channel and so the entire stream - * contents is mixed. - */ -static void normal_mix_channel_s16(struct audio_stream *sink, int32_t start_frame, - int32_t mixed_frames, - const struct audio_stream *source, - int32_t frame_count, uint16_t gain) +static void mix_s16(struct audio_stream *sink, int32_t start_sample, int32_t mixed_samples, + const struct audio_stream *source, + int32_t sample_count, uint16_t gain) { - int32_t frames_to_mix, frames_to_copy, left_frames; + int32_t samples_to_mix, samples_to_copy, left_samples; int32_t n, nmax, i; /* audio_stream_wrap() is required and is done below in a loop */ - int16_t *dst = (int16_t *)audio_stream_get_wptr(sink) + start_frame; + int16_t *dst = (int16_t *)audio_stream_get_wptr(sink) + start_sample; int16_t *src = audio_stream_get_rptr(source); - assert(mixed_frames >= start_frame); - frames_to_mix = mixed_frames - start_frame; - frames_to_mix = MIN(frames_to_mix, frame_count); - frames_to_copy = frame_count - frames_to_mix; + assert(mixed_samples >= start_sample); + samples_to_mix = mixed_samples - start_sample; + samples_to_mix = MIN(samples_to_mix, sample_count); + samples_to_copy = sample_count - samples_to_mix; - for (left_frames = frames_to_mix; left_frames > 0; left_frames -= n) { + for (left_samples = samples_to_mix; left_samples > 0; left_samples -= n) { src = audio_stream_wrap(source, src); dst = audio_stream_wrap(sink, dst); /* calculate the remaining samples*/ nmax = audio_stream_samples_without_wrap_s16(source, src); - n = MIN(left_frames, nmax); + n = MIN(left_samples, nmax); nmax = audio_stream_samples_without_wrap_s16(sink, dst); n = MIN(n, nmax); for (i = 0; i < n; i++) { @@ -49,11 +42,11 @@ static void normal_mix_channel_s16(struct audio_stream *sink, int32_t start_fram } } - for (left_frames = frames_to_copy; left_frames > 0; left_frames -= n) { + for (left_samples = samples_to_copy; left_samples > 0; left_samples -= n) { src = audio_stream_wrap(source, src); dst = audio_stream_wrap(sink, dst); nmax = audio_stream_samples_without_wrap_s16(source, src); - n = MIN(left_frames, nmax); + n = MIN(left_samples, nmax); nmax = audio_stream_samples_without_wrap_s16(sink, dst); n = MIN(n, nmax); memcpy_s(dst, n * sizeof(int16_t), src, n * sizeof(int16_t)); @@ -61,69 +54,30 @@ static void normal_mix_channel_s16(struct audio_stream *sink, int32_t start_fram src += n; } } - -static void mute_channel_s16(struct audio_stream *stream, int32_t channel_index, - int32_t start_frame, int32_t mixed_frames, int32_t frame_count) -{ - int32_t skip_mixed_frames, n, left_frames, i, channel_count, frames, samples; - int16_t *ptr; - - assert(mixed_frames >= start_frame); - skip_mixed_frames = mixed_frames - start_frame; - - if (frame_count <= skip_mixed_frames) - return; - frame_count -= skip_mixed_frames; - channel_count = audio_stream_get_channels(stream); - /* audio_stream_wrap() is needed here and it is just below in a loop */ - ptr = (int16_t *)audio_stream_get_wptr(stream) + - mixed_frames * audio_stream_get_channels(stream) + - channel_index; - - for (left_frames = frame_count; left_frames; left_frames -= frames) { - ptr = audio_stream_wrap(stream, ptr); - n = audio_stream_samples_without_wrap_s16(stream, ptr); - samples = left_frames * channel_count; - n = MIN(samples, n); - frames = 0; - for (i = 0; i < n; i += channel_count) { - *ptr = 0; - ptr += channel_count; - frames++; - } - } -} #endif /* CONFIG_FORMAT_S16LE */ #if CONFIG_FORMAT_S24LE -/* Instead of using audio_stream_get_channels(sink) and audio_stream_get_channels(source), - * sink_channel_count and source_channel_count are supplied as parameters. This is done to reuse - * the function to also mix an entire stream. In this case the function is called with fake stream - * parameters: multichannel stream is treated as single channel and so the entire stream - * contents is mixed. - */ -static void normal_mix_channel_s24(struct audio_stream *sink, int32_t start_frame, - int32_t mixed_frames, - const struct audio_stream *source, - int32_t frame_count, uint16_t gain) +static void mix_s24(struct audio_stream *sink, int32_t start_sample, int32_t mixed_samples, + const struct audio_stream *source, + int32_t sample_count, uint16_t gain) { - int32_t frames_to_mix, frames_to_copy, left_frames; + int32_t samples_to_mix, samples_to_copy, left_samples; int32_t n, nmax, i; /* audio_stream_wrap() is required and is done below in a loop */ - int32_t *dst = (int32_t *)audio_stream_get_wptr(sink) + start_frame; + int32_t *dst = (int32_t *)audio_stream_get_wptr(sink) + start_sample; int32_t *src = audio_stream_get_rptr(source); - assert(mixed_frames >= start_frame); - frames_to_mix = mixed_frames - start_frame; - frames_to_mix = MIN(frames_to_mix, frame_count); - frames_to_copy = frame_count - frames_to_mix; + assert(mixed_samples >= start_sample); + samples_to_mix = mixed_samples - start_sample; + samples_to_mix = MIN(samples_to_mix, sample_count); + samples_to_copy = sample_count - samples_to_mix; - for (left_frames = frames_to_mix; left_frames > 0; left_frames -= n) { + for (left_samples = samples_to_mix; left_samples > 0; left_samples -= n) { src = audio_stream_wrap(source, src); dst = audio_stream_wrap(sink, dst); /* calculate the remaining samples*/ nmax = audio_stream_samples_without_wrap_s24(source, src); - n = MIN(left_frames, nmax); + n = MIN(left_samples, nmax); nmax = audio_stream_samples_without_wrap_s24(sink, dst); n = MIN(n, nmax); for (i = 0; i < n; i++) { @@ -132,11 +86,11 @@ static void normal_mix_channel_s24(struct audio_stream *sink, int32_t start_fram } } - for (left_frames = frames_to_copy; left_frames > 0; left_frames -= n) { + for (left_samples = samples_to_copy; left_samples > 0; left_samples -= n) { src = audio_stream_wrap(source, src); dst = audio_stream_wrap(sink, dst); nmax = audio_stream_samples_without_wrap_s24(source, src); - n = MIN(left_frames, nmax); + n = MIN(left_samples, nmax); nmax = audio_stream_samples_without_wrap_s24(sink, dst); n = MIN(n, nmax); memcpy_s(dst, n * sizeof(int32_t), src, n * sizeof(int32_t)); @@ -148,33 +102,26 @@ static void normal_mix_channel_s24(struct audio_stream *sink, int32_t start_fram #endif /* CONFIG_FORMAT_S24LE */ #if CONFIG_FORMAT_S32LE -/* Instead of using audio_stream_get_channels(sink) and audio_stream_get_channels(source), - * sink_channel_count and source_channel_count are supplied as parameters. This is done to reuse - * the function to also mix an entire stream. In this case the function is called with fake stream - * parameters: multichannel stream is treated as single channel and so the entire stream - * contents is mixed. - */ -static void normal_mix_channel_s32(struct audio_stream *sink, int32_t start_frame, - int32_t mixed_frames, - const struct audio_stream *source, - int32_t frame_count, uint16_t gain) +static void mix_s32(struct audio_stream *sink, int32_t start_sample, int32_t mixed_samples, + const struct audio_stream *source, + int32_t sample_count, uint16_t gain) { - int32_t frames_to_mix, frames_to_copy, left_frames; + int32_t samples_to_mix, samples_to_copy, left_samples; int32_t n, nmax, i; - int32_t *dst = (int32_t *)audio_stream_get_wptr(sink) + start_frame; + int32_t *dst = (int32_t *)audio_stream_get_wptr(sink) + start_sample; int32_t *src = audio_stream_get_rptr(source); - assert(mixed_frames >= start_frame); - frames_to_mix = mixed_frames - start_frame; - frames_to_mix = MIN(frames_to_mix, frame_count); - frames_to_copy = frame_count - frames_to_mix; + assert(mixed_samples >= start_sample); + samples_to_mix = mixed_samples - start_sample; + samples_to_mix = MIN(samples_to_mix, sample_count); + samples_to_copy = sample_count - samples_to_mix; - for (left_frames = frames_to_mix; left_frames > 0; left_frames -= n) { + for (left_samples = samples_to_mix; left_samples > 0; left_samples -= n) { src = audio_stream_wrap(source, src); dst = audio_stream_wrap(sink, dst); /* calculate the remaining samples*/ nmax = audio_stream_samples_without_wrap_s32(source, src); - n = MIN(left_frames, nmax); + n = MIN(left_samples, nmax); nmax = audio_stream_samples_without_wrap_s32(sink, dst); n = MIN(n, nmax); for (i = 0; i < n; i++) { @@ -183,11 +130,11 @@ static void normal_mix_channel_s32(struct audio_stream *sink, int32_t start_fram } } - for (left_frames = frames_to_copy; left_frames > 0; left_frames -= n) { + for (left_samples = samples_to_copy; left_samples > 0; left_samples -= n) { src = audio_stream_wrap(source, src); dst = audio_stream_wrap(sink, dst); nmax = audio_stream_samples_without_wrap_s32(source, src); - n = MIN(left_frames, nmax); + n = MIN(left_samples, nmax); nmax = audio_stream_samples_without_wrap_s32(sink, dst); n = MIN(n, nmax); memcpy_s(dst, n * sizeof(int32_t), src, n * sizeof(int32_t)); @@ -198,50 +145,15 @@ static void normal_mix_channel_s32(struct audio_stream *sink, int32_t start_fram #endif /* CONFIG_FORMAT_S32LE */ -#if CONFIG_FORMAT_S32LE || CONFIG_FORMAT_S24LE -static void mute_channel_s32(struct audio_stream *stream, int32_t channel_index, - int32_t start_frame, int32_t mixed_frames, int32_t frame_count) -{ - int32_t skip_mixed_frames, left_frames, n, channel_count, i, frames, samples; - int32_t *ptr; - - assert(mixed_frames >= start_frame); - skip_mixed_frames = mixed_frames - start_frame; - - if (frame_count <= skip_mixed_frames) - return; - frame_count -= skip_mixed_frames; - channel_count = audio_stream_get_channels(stream); - - ptr = (int32_t *)audio_stream_get_wptr(stream) + - mixed_frames * audio_stream_get_channels(stream) + - channel_index; - - for (left_frames = frame_count; left_frames > 0; left_frames -= frames) { - ptr = audio_stream_wrap(stream, ptr); - n = audio_stream_samples_without_wrap_s32(stream, ptr); - samples = left_frames * channel_count; - n = MIN(samples, n); - frames = 0; - for (i = 0; i < n; i += channel_count) { - *ptr = 0; - ptr += channel_count; - frames++; - } - } -} - -#endif - const struct mix_func_map mix_func_map[] = { #if CONFIG_FORMAT_S16LE - { SOF_IPC_FRAME_S16_LE, normal_mix_channel_s16, mute_channel_s16}, + { SOF_IPC_FRAME_S16_LE, mix_s16 }, #endif #if CONFIG_FORMAT_S24LE - { SOF_IPC_FRAME_S24_4LE, normal_mix_channel_s24, mute_channel_s32}, + { SOF_IPC_FRAME_S24_4LE, mix_s24 }, #endif #if CONFIG_FORMAT_S32LE - { SOF_IPC_FRAME_S32_LE, normal_mix_channel_s32, mute_channel_s32} + { SOF_IPC_FRAME_S32_LE, mix_s32 } #endif }; diff --git a/src/audio/mixin_mixout/mixin_mixout_hifi3.c b/src/audio/mixin_mixout/mixin_mixout_hifi3.c index c5a44251e335..c03e43779b11 100644 --- a/src/audio/mixin_mixout/mixin_mixout_hifi3.c +++ b/src/audio/mixin_mixout/mixin_mixout_hifi3.c @@ -11,18 +11,11 @@ #ifdef MIXIN_MIXOUT_HIFI3 #if CONFIG_FORMAT_S16LE -/* Instead of using audio_stream_get_channels(sink) and audio_stream_get_channels(source), - * sink_channel_count and source_channel_count are supplied as parameters. This is done to reuse - * the function to also mix an entire stream. In this case the function is called with fake stream - * parameters: multichannel stream is treated as single channel and so the entire stream - * contents is mixed. - */ -static void normal_mix_channel_s16(struct audio_stream *sink, int32_t start_frame, - int32_t mixed_frames, - const struct audio_stream *source, - int32_t frame_count, uint16_t gain) +static void mix_s16(struct audio_stream *sink, int32_t start_sample, int32_t mixed_samples, + const struct audio_stream *source, + int32_t sample_count, uint16_t gain) { - int frames_to_mix, frames_to_copy, left_frames; + int samples_to_mix, samples_to_copy, left_samples; int n, nmax, i, m, left; ae_int16x4 in_sample; ae_int16x4 out_sample; @@ -32,20 +25,20 @@ static void normal_mix_channel_s16(struct audio_stream *sink, int32_t start_fram ae_valign outu1 = AE_ZALIGN64(); ae_valign outu2 = AE_ZALIGN64(); /* audio_stream_wrap() is required and is done below in a loop */ - ae_int16 *dst = (ae_int16 *)audio_stream_get_wptr(sink) + start_frame; + ae_int16 *dst = (ae_int16 *)audio_stream_get_wptr(sink) + start_sample; ae_int16 *src = audio_stream_get_rptr(source); - assert(mixed_frames >= start_frame); - frames_to_mix = AE_MIN_32_signed(mixed_frames - start_frame, frame_count); - frames_to_copy = frame_count - frames_to_mix; + assert(mixed_samples >= start_sample); + samples_to_mix = AE_MIN_32_signed(mixed_samples - start_sample, sample_count); + samples_to_copy = sample_count - samples_to_mix; n = 0; - for (left_frames = frames_to_mix; left_frames > 0; left_frames -= n) { + for (left_samples = samples_to_mix; left_samples > 0; left_samples -= n) { src = audio_stream_wrap(source, src + n); dst = audio_stream_wrap(sink, dst + n); /* calculate the remaining samples*/ nmax = audio_stream_samples_without_wrap_s16(source, src); - n = AE_MIN_32_signed(left_frames, nmax); + n = AE_MIN_32_signed(left_samples, nmax); nmax = audio_stream_samples_without_wrap_s16(sink, dst); n = AE_MIN_32_signed(n, nmax); in = (ae_int16x4 *)src; @@ -75,12 +68,12 @@ static void normal_mix_channel_s16(struct audio_stream *sink, int32_t start_fram } } - for (left_frames = frames_to_copy; left_frames > 0; left_frames -= n) { + for (left_samples = samples_to_copy; left_samples > 0; left_samples -= n) { src = audio_stream_wrap(source, src + n); dst = audio_stream_wrap(sink, dst + n); /* calculate the remaining samples*/ nmax = audio_stream_samples_without_wrap_s16(source, src); - n = AE_MIN_32_signed(left_frames, nmax); + n = AE_MIN_32_signed(left_samples, nmax); nmax = audio_stream_samples_without_wrap_s16(sink, dst); n = AE_MIN_32_signed(n, nmax); in = (ae_int16x4 *)src; @@ -104,49 +97,14 @@ static void normal_mix_channel_s16(struct audio_stream *sink, int32_t start_fram } } } - -static void mute_channel_s16(struct audio_stream *stream, int32_t channel_index, - int32_t start_frame, int32_t mixed_frames, int32_t frame_count) -{ - int skip_mixed_frames, left_frames; - int off = audio_stream_get_channels(stream) * sizeof(ae_int16); - ae_int16 *ptr; - ae_int16x4 zero = AE_ZERO16(); - - assert(mixed_frames >= start_frame); - skip_mixed_frames = mixed_frames - start_frame; - - if (frame_count <= skip_mixed_frames) - return; - frame_count -= skip_mixed_frames; - - AE_SETCBEGIN0(audio_stream_get_addr(stream)); - AE_SETCEND0(audio_stream_get_end_addr(stream)); - - /* audio_stream_wrap() is needed here and it is just below in a loop */ - ptr = (ae_int16 *)audio_stream_get_wptr(stream) + - mixed_frames * audio_stream_get_channels(stream) + - channel_index; - ptr = audio_stream_wrap(stream, ptr); - - for (left_frames = frame_count ; left_frames; left_frames--) - AE_S16_0_XC(zero, ptr, off); -} #endif /* CONFIG_FORMAT_S16LE */ #if CONFIG_FORMAT_S24LE -/* Instead of using audio_stream_get_channels(sink) and audio_stream_get_channels(source), - * sink_channel_count and source_channel_count are supplied as parameters. This is done to reuse - * the function to also mix an entire stream. In this case the function is called with fake stream - * parameters: multichannel stream is treated as single channel and so the entire stream - * contents is mixed. - */ -static void normal_mix_channel_s24(struct audio_stream *sink, int32_t start_frame, - int32_t mixed_frames, - const struct audio_stream *source, - int32_t frame_count, uint16_t gain) +static void mix_s24(struct audio_stream *sink, int32_t start_sample, int32_t mixed_samples, + const struct audio_stream *source, + int32_t sample_count, uint16_t gain) { - int frames_to_mix, frames_to_copy, left_frames; + int samples_to_mix, samples_to_copy, left_samples; int n, nmax, i, m, left; ae_int32x2 in_sample; ae_int32x2 out_sample; @@ -156,20 +114,20 @@ static void normal_mix_channel_s24(struct audio_stream *sink, int32_t start_fram ae_valign outu1 = AE_ZALIGN64(); ae_valign outu2 = AE_ZALIGN64(); /* audio_stream_wrap() is required and is done below in a loop */ - int32_t *dst = (int32_t *)audio_stream_get_wptr(sink) + start_frame; + int32_t *dst = (int32_t *)audio_stream_get_wptr(sink) + start_sample; int32_t *src = audio_stream_get_rptr(source); - assert(mixed_frames >= start_frame); - frames_to_mix = AE_MIN_32_signed(mixed_frames - start_frame, frame_count); - frames_to_copy = frame_count - frames_to_mix; + assert(mixed_samples >= start_sample); + samples_to_mix = AE_MIN_32_signed(mixed_samples - start_sample, sample_count); + samples_to_copy = sample_count - samples_to_mix; n = 0; - for (left_frames = frames_to_mix; left_frames > 0; left_frames -= n) { + for (left_samples = samples_to_mix; left_samples > 0; left_samples -= n) { src = audio_stream_wrap(source, src + n); dst = audio_stream_wrap(sink, dst + n); /* calculate the remaining samples*/ nmax = audio_stream_samples_without_wrap_s24(source, src); - n = AE_MIN_32_signed(left_frames, nmax); + n = AE_MIN_32_signed(left_samples, nmax); nmax = audio_stream_samples_without_wrap_s24(sink, dst); n = AE_MIN_32_signed(n, nmax); in = (ae_int32x2 *)src; @@ -197,11 +155,11 @@ static void normal_mix_channel_s24(struct audio_stream *sink, int32_t start_fram } } - for (left_frames = frames_to_copy; left_frames > 0; left_frames -= n) { + for (left_samples = samples_to_copy; left_samples > 0; left_samples -= n) { src = audio_stream_wrap(source, src + n); dst = audio_stream_wrap(sink, dst + n); nmax = audio_stream_samples_without_wrap_s24(source, src); - n = AE_MIN_32_signed(left_frames, nmax); + n = AE_MIN_32_signed(left_samples, nmax); nmax = audio_stream_samples_without_wrap_s24(sink, dst); n = AE_MIN_32_signed(n, nmax); in = (ae_int32x2 *)src; @@ -225,18 +183,11 @@ static void normal_mix_channel_s24(struct audio_stream *sink, int32_t start_fram #endif /* CONFIG_FORMAT_S24LE */ #if CONFIG_FORMAT_S32LE -/* Instead of using audio_stream_get_channels(sink) and audio_stream_get_channels(source), - * sink_channel_count and source_channel_count are supplied as parameters. This is done to reuse - * the function to also mix an entire stream. In this case the function is called with fake stream - * parameters: multichannel stream is treated as single channel and so the entire stream - * contents is mixed. - */ -static void normal_mix_channel_s32(struct audio_stream *sink, int32_t start_frame, - int32_t mixed_frames, - const struct audio_stream *source, - int32_t frame_count, uint16_t gain) +static void mix_s32(struct audio_stream *sink, int32_t start_sample, int32_t mixed_samples, + const struct audio_stream *source, + int32_t sample_count, uint16_t gain) { - int frames_to_mix, frames_to_copy, left_frames; + int samples_to_mix, samples_to_copy, left_samples; int n, nmax, i, m, left; ae_int32x2 in_sample; ae_int32x2 out_sample; @@ -246,20 +197,20 @@ static void normal_mix_channel_s32(struct audio_stream *sink, int32_t start_fram ae_valign outu1 = AE_ZALIGN64(); ae_valign outu2 = AE_ZALIGN64(); /* audio_stream_wrap() is required and is done below in a loop */ - int32_t *dst = (int32_t *)audio_stream_get_wptr(sink) + start_frame; + int32_t *dst = (int32_t *)audio_stream_get_wptr(sink) + start_sample; int32_t *src = audio_stream_get_rptr(source); - assert(mixed_frames >= start_frame); - frames_to_mix = AE_MIN_32_signed(mixed_frames - start_frame, frame_count); - frames_to_copy = frame_count - frames_to_mix; + assert(mixed_samples >= start_sample); + samples_to_mix = AE_MIN_32_signed(mixed_samples - start_sample, sample_count); + samples_to_copy = sample_count - samples_to_mix; n = 0; - for (left_frames = frames_to_mix; left_frames > 0; left_frames -= n) { + for (left_samples = samples_to_mix; left_samples > 0; left_samples -= n) { src = audio_stream_wrap(source, src + n); dst = audio_stream_wrap(sink, dst + n); /* calculate the remaining samples*/ nmax = audio_stream_samples_without_wrap_s32(source, src); - n = AE_MIN_32_signed(left_frames, nmax); + n = AE_MIN_32_signed(left_samples, nmax); nmax = audio_stream_samples_without_wrap_s32(sink, dst); n = AE_MIN_32_signed(n, nmax); in = (ae_int32x2 *)src; @@ -286,12 +237,12 @@ static void normal_mix_channel_s32(struct audio_stream *sink, int32_t start_fram } } - for (left_frames = frames_to_copy; left_frames > 0; left_frames -= n) { + for (left_samples = samples_to_copy; left_samples > 0; left_samples -= n) { src = audio_stream_wrap(source, src + n); dst = audio_stream_wrap(sink, dst + n); /* calculate the remaining samples*/ nmax = audio_stream_samples_without_wrap_s32(source, src); - n = AE_MIN_32_signed(left_frames, nmax); + n = AE_MIN_32_signed(left_samples, nmax); nmax = audio_stream_samples_without_wrap_s32(sink, dst); n = AE_MIN_32_signed(n, nmax); in = (ae_int32x2 *)src; @@ -315,46 +266,15 @@ static void normal_mix_channel_s32(struct audio_stream *sink, int32_t start_fram #endif /* CONFIG_FORMAT_S32LE */ -#if CONFIG_FORMAT_S32LE || CONFIG_FORMAT_S24LE -static void mute_channel_s32(struct audio_stream *stream, int32_t channel_index, - int32_t start_frame, int32_t mixed_frames, int32_t frame_count) -{ - int skip_mixed_frames, left_frames; - ae_int32 *ptr; - int off = audio_stream_get_channels(stream) * sizeof(ae_int32); - ae_int32x2 zero = AE_ZERO32(); - - assert(mixed_frames >= start_frame); - skip_mixed_frames = mixed_frames - start_frame; - - if (frame_count <= skip_mixed_frames) - return; - frame_count -= skip_mixed_frames; - - AE_SETCBEGIN0(audio_stream_get_addr(stream)); - AE_SETCEND0(audio_stream_get_end_addr(stream)); - - /* audio_stream_wrap() is needed here and it is just below in a loop */ - ptr = (ae_int32 *)audio_stream_get_wptr(stream) + - mixed_frames * audio_stream_get_channels(stream) + - channel_index; - ptr = audio_stream_wrap(stream, ptr); - - for (left_frames = frame_count ; left_frames > 0; left_frames--) - AE_S32_L_XC(zero, ptr, off); -} - -#endif - const struct mix_func_map mix_func_map[] = { #if CONFIG_FORMAT_S16LE - { SOF_IPC_FRAME_S16_LE, normal_mix_channel_s16, mute_channel_s16}, + { SOF_IPC_FRAME_S16_LE, mix_s16 }, #endif #if CONFIG_FORMAT_S24LE - { SOF_IPC_FRAME_S24_4LE, normal_mix_channel_s24, mute_channel_s32}, + { SOF_IPC_FRAME_S24_4LE, mix_s24 }, #endif #if CONFIG_FORMAT_S32LE - { SOF_IPC_FRAME_S32_LE, normal_mix_channel_s32, mute_channel_s32} + { SOF_IPC_FRAME_S32_LE, mix_s32 } #endif };