Skip to content

Commit b84b9e8

Browse files
DP: provide data to next LL module no earlier than DP deadline
lets assume DP with 10ms period (a.k.a a deadline). It starts and finishes earlier, i.e. in 2ms providing 10ms of data LL starts consuming data in 1ms chunks and will drain 10ms buffer in 10ms, expecting a new portion of data on 11th ms BUT - the DP module deadline is still 10ms, regardless if it had finished earlier and it is completely fine that processing in next cycle takes full 10ms - as long as it fits into the deadline. It may lead to underruns: LL1 (1ms) ---> DP (10ms) -->LL2 (1ms) ticks 0..9 -> LL1 is producing 1ms data portions, DP is waiting, LL2 is waiting tick 10 - DP has enough data to run, it starts processing tick 12 - DP finishes earlier, LL2 starts consuming, LL1 is producing data ticks 13-19 LL1 is producing data, LL2 is consuming data (both in 1ms chunks) tick 20 - DP starts processing a new portion of 10ms data, having 10ms to finish !!!! but LL2 has already consumed 8ms !!!! tick 22 - LL2 is consuming the last 1ms data chunk tick 23 - DP is still processing, LL2 has no data to process !!! UNDERRUN !!!! tick 19 - DP finishes properly in a deadline time Solution: even if DP finishes before its deadline, the data must be held till deadline time, so LL2 may start processing no earlier than tick 20 Signed-off-by: Marcin Szkudlinski <marcin.szkudlinski@intel.com>
1 parent 0405692 commit b84b9e8

File tree

3 files changed

+68
-27
lines changed

3 files changed

+68
-27
lines changed

src/audio/module_adapter/module_adapter.c

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -1069,32 +1069,28 @@ static int module_adapter_copy_dp_queues(struct comp_dev *dev)
10691069
dp_queue = dp_queue_get_next_item(dp_queue);
10701070
}
10711071

1072+
if (mod->dp_startup_delay)
1073+
return 0;
1074+
10721075
dp_queue = dp_queue_get_first_item(&mod->dp_queue_dp_to_ll_list);
10731076
list_for_item(blist, &dev->bsink_list) {
1074-
/* output - we need to copy data from dp_queue (as source)
1075-
* to audio_stream (as sink)
1076-
*
1077-
* a trick is needed there as a workaround
1078-
* DP may produce a huge chunk of output data (i.e. 10 LL
1079-
* cycles), and the following module should be able to consume it in 1 cycle chunks
1080-
*
1081-
* unfortunately some modules are not prepared to work when there's more than
1082-
* 1 data portion available in the buffer and are draining buffers with data loss
1083-
*
1084-
* a workaround: copy only the following module's IBS in each LL cycle
1085-
*
1086-
* required fix: all modules using sink/src interface must be aware to
1087-
* process only data they need, not forcefully draining a buffer
1088-
*/
1077+
/* output - we need to copy data from dp_queue (as source)
1078+
* to audio_stream (as sink)
1079+
*/
10891080
assert(dp_queue);
10901081
struct comp_buffer *buffer =
10911082
container_of(blist, struct comp_buffer, source_list);
10921083
struct sof_sink *data_sink = audio_stream_get_sink(&buffer->stream);
10931084
struct sof_source *following_mod_data_source =
10941085
audio_stream_get_source(&buffer->stream);
10951086
struct sof_source *data_src = dp_queue_get_source(dp_queue);
1087+
size_t dp_data_available = source_get_data_available(data_src);
1088+
1089+
if (!dp_data_available)
1090+
comp_err(dev, "!!!! no data available from DP");
1091+
10961092
uint32_t to_copy = MIN(source_get_min_available(following_mod_data_source),
1097-
source_get_data_available(data_src));
1093+
dp_data_available);
10981094

10991095
err = source_to_sink_copy(data_src, data_sink, true, to_copy);
11001096
if (err)

src/include/sof/audio/module_adapter/module/generic.h

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -225,6 +225,35 @@ struct processing_module {
225225
/* module-specific flags for comp_verify_params() */
226226
uint32_t verify_params_flags;
227227

228+
/* indicates that this DP module did not yet reach its first deadline and
229+
* no data should be passed yet to next LL module
230+
*
231+
* why: lets assume DP with 10ms period (a.k.a a deadline). It starts and finishes
232+
* Earlier, i.e. in 2ms providing 10ms of data. LL starts consuming data in 1ms chunks and
233+
* will drain 10ms buffer in 10ms, expecting a new portion of data on 11th ms
234+
* BUT - the DP module deadline is still 10ms, regardless if it had finished earlier
235+
* and it is completely fine that processing in next cycle takes full 10ms - as long as it
236+
* fits into the deadline.
237+
* It may lead to underruns:
238+
*
239+
* LL1 (1ms) ---> DP (10ms) -->LL2 (1ms)
240+
*
241+
* ticks 0..9 -> LL1 is producing 1ms data portions, DP is waiting, LL2 is waiting
242+
* tick 10 - DP has enough data to run, it starts processing
243+
* tick 12 - DP finishes earlier, LL2 starts consuming, LL1 is producing data
244+
* ticks 13-19 LL1 is producing data, LL2 is consuming data (both in 1ms chunks)
245+
* tick 20 - DP starts processing a new portion of 10ms data, having 10ms to finish
246+
* !!!! but LL2 has already consumed 8ms !!!!
247+
* tick 22 - LL2 is consuming the last 1ms data chunk
248+
* tick 23 - DP is still processing, LL2 has no data to process
249+
* !!! UNDERRUN !!!!
250+
* tick 19 - DP finishes properly in a deadline time
251+
*
252+
* Solution: even if DP finishes before its deadline, the data must be held till
253+
* deadline time, so LL2 may start processing no earlier than tick 20
254+
*/
255+
bool dp_startup_delay;
256+
228257
/* flag to indicate module does not pause */
229258
bool no_pause;
230259

src/schedule/zephyr_dp_schedule.c

Lines changed: 27 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -35,10 +35,12 @@ struct scheduler_dp_data {
3535

3636
struct task_dp_pdata {
3737
k_tid_t thread_id; /* zephyr thread ID */
38-
uint32_t period_clock_ticks; /* period the task should be scheduled in Zephyr ticks */
38+
uint32_t deadline_clock_ticks; /* dp module deadline in Zephyr ticks */
39+
uint32_t deadline_ll_cycles; /* dp module deadline in LL cycles */
3940
k_thread_stack_t __sparse_cache *p_stack; /* pointer to thread stack */
4041
struct k_sem sem; /* semaphore for task scheduling */
4142
struct processing_module *mod; /* the module to be scheduled */
43+
uint32_t ll_cycles_to_deadline; /* current number of LL cycles till deadline */
4244
};
4345

4446
/* Single CPU-wide lock
@@ -227,11 +229,20 @@ void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void *
227229
lock_key = scheduler_dp_lock();
228230
list_for_item(tlist, &dp_sch->tasks) {
229231
curr_task = container_of(tlist, struct task, list);
232+
pdata = curr_task->priv_data;
233+
struct processing_module *mod = pdata->mod;
234+
235+
/* decrease number of LL ticks/cycles left till the module reaches its deadline */
236+
if (pdata->ll_cycles_to_deadline) {
237+
pdata->ll_cycles_to_deadline--;
238+
if (!pdata->ll_cycles_to_deadline)
239+
/* deadline reached, clear startup delay flag.
240+
* see dp_startup_delay comment for details
241+
*/
242+
mod->dp_startup_delay = false;
243+
}
230244

231-
/* step 1 - check if the module is ready for processing */
232245
if (curr_task->state == SOF_TASK_STATE_QUEUED) {
233-
pdata = curr_task->priv_data;
234-
struct processing_module *mod = pdata->mod;
235246
bool mod_ready;
236247

237248
mod_ready = module_is_ready_to_process(mod, mod->sources,
@@ -240,11 +251,13 @@ void scheduler_dp_ll_tick(void *receiver_data, enum notify_id event_type, void *
240251
mod->num_of_sinks);
241252
if (mod_ready) {
242253
/* set a deadline for given num of ticks, starting now */
243-
k_thread_deadline_set(pdata->thread_id, pdata->period_clock_ticks);
254+
k_thread_deadline_set(pdata->thread_id,
255+
pdata->deadline_clock_ticks);
244256

245257
/* trigger the task */
246258
curr_task->state = SOF_TASK_STATE_RUNNING;
247259
k_sem_give(&pdata->sem);
260+
pdata->ll_cycles_to_deadline = pdata->deadline_ll_cycles;
248261
}
249262
}
250263
}
@@ -352,7 +365,7 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
352365
struct scheduler_dp_data *dp_sch = (struct scheduler_dp_data *)data;
353366
struct task_dp_pdata *pdata = task->priv_data;
354367
unsigned int lock_key;
355-
uint64_t period_clock_ticks;
368+
uint64_t deadline_clock_ticks;
356369

357370
lock_key = scheduler_dp_lock();
358371

@@ -371,13 +384,16 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta
371384
task->state = SOF_TASK_STATE_QUEUED;
372385
list_item_prepend(&task->list, &dp_sch->tasks);
373386

374-
period_clock_ticks = period * CONFIG_SYS_CLOCK_TICKS_PER_SEC;
375-
/* period is in us - convert to seconds in next step
376-
* or it always will be zero because of fixed point calculation
387+
deadline_clock_ticks = period * CONFIG_SYS_CLOCK_TICKS_PER_SEC;
388+
/* period/deadline is in us - convert to seconds in next step
389+
* or it always will be zero because of integer calculation
377390
*/
378-
period_clock_ticks /= 1000000;
391+
deadline_clock_ticks /= 1000000;
379392

380-
pdata->period_clock_ticks = period_clock_ticks;
393+
pdata->deadline_clock_ticks = deadline_clock_ticks;
394+
pdata->deadline_ll_cycles = period / LL_TIMER_PERIOD_US;
395+
pdata->ll_cycles_to_deadline = 0;
396+
pdata->mod->dp_startup_delay = true;
381397
scheduler_dp_unlock(lock_key);
382398

383399
tr_dbg(&dp_tr, "DP task scheduled with period %u [us]", (uint32_t)period);

0 commit comments

Comments
 (0)