diff --git a/src/arch/CMakeLists.txt b/src/arch/CMakeLists.txt index edaaec70a535..8f415856d6a4 100644 --- a/src/arch/CMakeLists.txt +++ b/src/arch/CMakeLists.txt @@ -1,3 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause add_subdirectory(${ARCH}) + +if(NOT CONFIG_ZEPHYR_SOF_MODULE) + add_subdirectory("xtos-wrapper") +endif() diff --git a/src/arch/xtensa/drivers/CMakeLists.txt b/src/arch/xtensa/drivers/CMakeLists.txt index 5fe79db46060..09565763093a 100644 --- a/src/arch/xtensa/drivers/CMakeLists.txt +++ b/src/arch/xtensa/drivers/CMakeLists.txt @@ -1,3 +1,7 @@ # SPDX-License-Identifier: BSD-3-Clause -add_local_sources(sof interrupt.c timer.c cache_attr.c) +add_local_sources(sof interrupt.c cache_attr.c) + +if(NOT CONFIG_ZEPHYR_SOF_MODULE) + add_local_sources(sof timer.c) +endif() diff --git a/src/arch/xtos-wrapper/CMakeLists.txt b/src/arch/xtos-wrapper/CMakeLists.txt new file mode 100644 index 000000000000..fd819740233b --- /dev/null +++ b/src/arch/xtos-wrapper/CMakeLists.txt @@ -0,0 +1 @@ +target_include_directories(sof_public_headers INTERFACE include) diff --git a/src/include/sof/drivers/timer.h b/src/arch/xtos-wrapper/include/sof/drivers/timer.h similarity index 73% rename from src/include/sof/drivers/timer.h rename to src/arch/xtos-wrapper/include/sof/drivers/timer.h index f016a12822de..7e0b965344f3 100644 --- a/src/include/sof/drivers/timer.h +++ b/src/arch/xtos-wrapper/include/sof/drivers/timer.h @@ -11,6 +11,7 @@ #include #include #include +#include #include struct comp_dev; @@ -76,6 +77,46 @@ static inline uint64_t platform_safe_get_time(struct timer *timer) void platform_timer_start(struct timer *timer); void platform_timer_stop(struct timer *timer); +static inline uint64_t k_ms_to_cyc_ceil64(uint64_t ms) +{ + return clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, ms); +} + +static inline uint64_t k_us_to_cyc_ceil64(uint64_t us) +{ + return clock_us_to_ticks(PLATFORM_DEFAULT_CLOCK, us); +} + +static inline uint64_t k_ns_to_cyc_near64(uint64_t ns) +{ + return clock_ns_to_ticks(PLATFORM_DEFAULT_CLOCK, ns); +} + +static inline uint64_t k_cyc_to_ms_near64(uint64_t ticks) +{ + return ticks / clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1); +} + +static inline uint64_t k_cyc_to_us_near64(uint64_t ticks) +{ + return ticks / clock_us_to_ticks(PLATFORM_DEFAULT_CLOCK, 1); +} + +static inline uint64_t k_cycle_get_64(void) +{ + return platform_timer_get(timer_get()); +} + +static inline uint64_t k_cycle_get_64_atomic(void) +{ + return platform_timer_get_atomic(timer_get()); +} + +static inline uint64_t k_cycle_get_64_safe(void) +{ + return platform_safe_get_time(timer_get()); +} + /* get timestamp for host stream DMA position */ void platform_host_timestamp(struct comp_dev *host, struct sof_ipc_stream_posn *posn); diff --git a/src/audio/kpb.c b/src/audio/kpb.c index b3f97d692adc..e8a64c08e995 100644 --- a/src/audio/kpb.c +++ b/src/audio/kpb.c @@ -856,7 +856,6 @@ static int kpb_buffer_data(struct comp_dev *dev, uint64_t current_time; enum kpb_state state_preserved = kpb->state; size_t sample_width = kpb->config.sampling_width; - struct timer *timer = timer_get(); comp_dbg(dev, "kpb_buffer_data()"); @@ -874,8 +873,7 @@ static int kpb_buffer_data(struct comp_dev *dev, kpb_change_state(kpb, KPB_STATE_BUFFERING); - timeout = platform_timer_get(timer) + - clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1); + timeout = k_cycle_get_64() + k_ms_to_cyc_ceil64(1); /* Let's store audio stream data in internal history buffer */ while (size_to_copy) { /* Reset was requested, it's time to stop buffering and finish @@ -888,12 +886,13 @@ static int kpb_buffer_data(struct comp_dev *dev, } /* Are we stuck in buffering? */ - current_time = platform_timer_get(timer); + current_time = k_cycle_get_64(); if (timeout < current_time) { - if (current_time - timeout <= UINT_MAX) + timeout = k_cyc_to_ms_near64(current_time - timeout); + if (timeout <= UINT_MAX) comp_err(dev, "kpb_buffer_data(): timeout of %u [ms] (current state %d, state log %x)", - (unsigned int)(current_time - timeout), kpb->state, + (unsigned int)(timeout), kpb->state, kpb->state_log); else comp_err(dev, @@ -1156,13 +1155,11 @@ static void kpb_init_draining(struct comp_dev *dev, struct kpb_client *cli) * shall take place. This time will be used to * synchronize us with application interrupts. */ - drain_interval = clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, - host_period_size / bytes_per_ms) / + drain_interval = k_ms_to_cyc_ceil64(host_period_size / bytes_per_ms) / KPB_DRAIN_NUM_OF_PPL_PERIODS_AT_ONCE; period_bytes_limit = host_period_size; comp_info(dev, "kpb_init_draining(): sync_draining_mode selected with interval %u [uS].", - (unsigned int)(drain_interval * 1000 / - clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1))); + (unsigned int)k_cyc_to_us_near64(drain_interval)); } else { /* Unlimited draining */ drain_interval = 0; @@ -1225,8 +1222,7 @@ static enum task_state kpb_draining_task(void *arg) uint64_t current_time; size_t period_bytes = 0; size_t period_bytes_limit = draining_data->pb_limit; - struct timer *timer = timer_get(); - size_t period_copy_start = platform_timer_get(timer); + size_t period_copy_start = k_cycle_get_64(); size_t time_taken; size_t *rt_stream_update = &draining_data->buffered_while_draining; struct comp_data *kpb = comp_get_drvdata(draining_data->dev); @@ -1243,7 +1239,7 @@ static enum task_state kpb_draining_task(void *arg) /* Change KPB internal state to DRAINING */ kpb_change_state(kpb, KPB_STATE_DRAINING); - draining_time_start = platform_timer_get(timer); + draining_time_start = k_cycle_get_64(); while (drain_req > 0) { /* Have we received reset request? */ @@ -1256,12 +1252,12 @@ static enum task_state kpb_draining_task(void *arg) * to read the data already provided? */ if (sync_mode_on && - next_copy_time > platform_timer_get(timer)) { + next_copy_time > k_cycle_get_64()) { period_bytes = 0; - period_copy_start = platform_timer_get(timer); + period_copy_start = k_cycle_get_64(); continue; } else if (next_copy_time == 0) { - period_copy_start = platform_timer_get(timer); + period_copy_start = k_cycle_get_64(); } size_to_read = (uintptr_t)buff->end_addr - (uintptr_t)buff->r_ptr; @@ -1308,7 +1304,7 @@ static enum task_state kpb_draining_task(void *arg) } if (sync_mode_on && period_bytes >= period_bytes_limit) { - current_time = platform_timer_get(timer); + current_time = k_cycle_get_64(); time_taken = current_time - period_copy_start; next_copy_time = current_time + drain_interval - time_taken; @@ -1338,14 +1334,13 @@ static enum task_state kpb_draining_task(void *arg) } out: - draining_time_end = platform_timer_get(timer); + draining_time_end = k_cycle_get_64(); /* Reset host-sink copy mode back to its pre-draining value */ comp_set_attribute(kpb->host_sink->sink, COMP_ATTR_COPY_TYPE, &kpb->draining_task_data.copy_type); - draining_time_ms = (draining_time_end - draining_time_start) - / clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1); + draining_time_ms = k_cyc_to_ms_near64(draining_time_end - draining_time_start); if (draining_time_ms <= UINT_MAX) comp_cl_info(&comp_kpb, "KPB: kpb_draining_task(), done. %u drained in %u ms", drained, (unsigned int)draining_time_ms); diff --git a/src/audio/pipeline/pipeline-stream.c b/src/audio/pipeline/pipeline-stream.c index 22cd6499cf14..88a9b5defe8f 100644 --- a/src/audio/pipeline/pipeline-stream.c +++ b/src/audio/pipeline/pipeline-stream.c @@ -424,8 +424,7 @@ int pipeline_trigger_run(struct pipeline *p, struct comp_dev *host, int cmd) list_init(&walk_ctx.pipelines); if (data.delay_ms) - wait_delay(clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, - data.delay_ms)); + wait_delay_ms(data.delay_ms); ret = walk_ctx.comp_func(host, NULL, &walk_ctx, host->direction); if (ret < 0) diff --git a/src/drivers/dw/dma.c b/src/drivers/dw/dma.c index 1d570256b4f0..cf0c27a5b71e 100644 --- a/src/drivers/dw/dma.c +++ b/src/drivers/dw/dma.c @@ -462,7 +462,7 @@ static int dw_dma_status(struct dma_chan_data *channel, status->state = channel->status; status->r_pos = dma_reg_read(channel->dma, DW_SAR(channel->index)); status->w_pos = dma_reg_read(channel->dma, DW_DAR(channel->index)); - status->timestamp = timer_get_system(timer_get()); + status->timestamp = k_cycle_get_64(); if (status->ipc_posn_data) { uint32_t *llp = (uint32_t *)status->ipc_posn_data; diff --git a/src/drivers/dw/ssi-spi.c b/src/drivers/dw/ssi-spi.c index acad4053d6ae..b0ebdef198c1 100644 --- a/src/drivers/dw/ssi-spi.c +++ b/src/drivers/dw/ssi-spi.c @@ -170,9 +170,7 @@ static void spi_stop(struct spi *spi) static void delay(unsigned int ms) { - uint64_t tick = clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, ms); - - wait_delay(tick); + wait_delay_ms(ms); } static int spi_trigger(struct spi *spi, int cmd, int direction) diff --git a/src/drivers/generic/dummy-dma.c b/src/drivers/generic/dummy-dma.c index a4654d95d1b8..cfec9481a809 100644 --- a/src/drivers/generic/dummy-dma.c +++ b/src/drivers/generic/dummy-dma.c @@ -315,7 +315,7 @@ static int dummy_dma_status(struct dma_chan_data *channel, status->r_pos = ch->r_pos; status->w_pos = ch->w_pos; - status->timestamp = timer_get_system(timer_get()); + status->timestamp = k_cycle_get_64(); return 0; } diff --git a/src/drivers/host/timer.c b/src/drivers/host/timer.c index d4ec2d0095bc..cf475b4024b8 100644 --- a/src/drivers/host/timer.c +++ b/src/drivers/host/timer.c @@ -20,6 +20,7 @@ void platform_dai_timestamp(struct comp_dev *dai, { } +#ifndef __ZEPHYR__ uint64_t platform_timer_get(struct timer *timer) { return 0; @@ -33,3 +34,4 @@ uint64_t platform_timer_get_atomic(struct timer *timer) void platform_timer_stop(struct timer *timer) { } +#endif /* __ZEPHYR__ */ diff --git a/src/drivers/imx/edma.c b/src/drivers/imx/edma.c index d43cd92bd7f5..ecc1a6aa78a2 100644 --- a/src/drivers/imx/edma.c +++ b/src/drivers/imx/edma.c @@ -234,7 +234,7 @@ static int edma_status(struct dma_chan_data *channel, */ status->r_pos = dma_chan_reg_read(channel, EDMA_TCD_SADDR); status->w_pos = dma_chan_reg_read(channel, EDMA_TCD_DADDR); - status->timestamp = timer_get_system(timer_get()); + status->timestamp = k_cycle_get_64(); return 0; } diff --git a/src/drivers/imx/sdma.c b/src/drivers/imx/sdma.c index 994536f8f884..be960d891b19 100644 --- a/src/drivers/imx/sdma.c +++ b/src/drivers/imx/sdma.c @@ -556,7 +556,7 @@ static int sdma_status(struct dma_chan_data *channel, status->flags = 0; status->w_pos = 0; status->r_pos = 0; - status->timestamp = timer_get_system(timer_get()); + status->timestamp = k_cycle_get_64(); bd = (struct sdma_bd *)pdata->ccb->current_bd_paddr; diff --git a/src/drivers/intel/baytrail/timer.c b/src/drivers/intel/baytrail/timer.c index 8e8eb0ac1e73..604a5ec92351 100644 --- a/src/drivers/intel/baytrail/timer.c +++ b/src/drivers/intel/baytrail/timer.c @@ -19,6 +19,7 @@ #include #include +#ifndef __ZEPHYR__ static void platform_timer_64_handler(void *arg) { struct timer *timer = arg; @@ -142,6 +143,7 @@ uint64_t platform_timer_get_atomic(struct timer *timer) { return platform_timer_get(timer); } +#endif /* __ZEPHYR__ */ /* get timestamp for host stream DMA position */ void platform_host_timestamp(struct comp_dev *host, @@ -167,7 +169,7 @@ void platform_dai_timestamp(struct comp_dev *dai, posn->flags |= SOF_TIME_DAI_VALID; /* get SSP wallclock - DAI sets this to stream start value */ - posn->wallclock = platform_timer_get(timer_get()) - posn->wallclock; + posn->wallclock = k_cycle_get_64() - posn->wallclock; posn->wallclock_hz = clock_get_freq(PLATFORM_DEFAULT_CLOCK); posn->flags |= SOF_TIME_WALL_VALID | SOF_TIME_WALL_64; } @@ -176,9 +178,10 @@ void platform_dai_timestamp(struct comp_dev *dai, void platform_dai_wallclock(struct comp_dev *dai, uint64_t *wallclock) { /* only 1 wallclock on BYT */ - *wallclock = platform_timer_get(timer_get()); + *wallclock = k_cycle_get_64(); } +#ifndef __ZEPHYR__ static int platform_timer_register(struct timer *timer, void (*handler)(void *arg), void *arg) { @@ -235,3 +238,4 @@ void timer_disable(struct timer *timer, void *arg, int core) interrupt_disable(timer->irq, arg); } +#endif /* __ZEPHYR__ */ diff --git a/src/drivers/intel/cavs/timer.c b/src/drivers/intel/cavs/timer.c index b6b10de9af03..e71cda57741b 100644 --- a/src/drivers/intel/cavs/timer.c +++ b/src/drivers/intel/cavs/timer.c @@ -20,6 +20,7 @@ /** \brief Minimum number of timer recovery cycles in case of delay. */ #define TIMER_MIN_RECOVER_CYCLES 240 /* ~10us at 24.576MHz */ +#ifndef __ZEPHYR__ void platform_timer_start(struct timer *timer) { /* run timer */ @@ -112,6 +113,7 @@ uint64_t platform_timer_get_atomic(struct timer *timer) return ticks_now; } +#endif /* __ZEPHYR__ */ /* get timestamp for host stream DMA position */ void platform_host_timestamp(struct comp_dev *host, @@ -137,7 +139,7 @@ void platform_dai_timestamp(struct comp_dev *dai, posn->flags |= SOF_TIME_DAI_VALID; /* get SSP wallclock - DAI sets this to stream start value */ - posn->wallclock = shim_read64(SHIM_DSPWC) - posn->wallclock; + posn->wallclock = k_cycle_get_64() - posn->wallclock; posn->wallclock_hz = clock_get_freq(PLATFORM_DEFAULT_CLOCK); posn->flags |= SOF_TIME_WALL_VALID; } @@ -145,9 +147,10 @@ void platform_dai_timestamp(struct comp_dev *dai, /* get current wallclock for componnent */ void platform_dai_wallclock(struct comp_dev *dai, uint64_t *wallclock) { - *wallclock = shim_read64(SHIM_DSPWC); + *wallclock = k_cycle_get_64(); } +#ifndef __ZEPHYR__ static int platform_timer_register(struct timer *timer, void (*handler)(void *arg), void *arg) { @@ -248,3 +251,4 @@ void timer_disable(struct timer *timer, void *arg, int core) } } +#endif /* __ZEPHYR__ */ diff --git a/src/drivers/intel/haswell/timer.c b/src/drivers/intel/haswell/timer.c index f0676b5d7343..e382bd62fc9e 100644 --- a/src/drivers/intel/haswell/timer.c +++ b/src/drivers/intel/haswell/timer.c @@ -14,6 +14,8 @@ #include #include +#ifndef __ZEPHYR__ + void platform_timer_start(struct timer *timer) { //nothing to do on BDW & HSW for cpu timer @@ -45,6 +47,8 @@ uint64_t platform_timer_get_atomic(struct timer *timer) return arch_timer_get_system(timer); } +#endif /* __ZEPHYR__ */ + /* get timestamp for host stream DMA position */ void platform_host_timestamp(struct comp_dev *host, struct sof_ipc_stream_posn *posn) @@ -69,7 +73,7 @@ void platform_dai_timestamp(struct comp_dev *dai, posn->flags |= SOF_TIME_DAI_VALID; /* get SSP wallclock - DAI sets this to stream start value */ - posn->wallclock = timer_get_system(timer_get()) - posn->wallclock; + posn->wallclock = k_cycle_get_64() - posn->wallclock; posn->wallclock_hz = clock_get_freq(PLATFORM_DEFAULT_CLOCK); posn->flags |= SOF_TIME_WALL_VALID | SOF_TIME_WALL_64; } @@ -78,9 +82,11 @@ void platform_dai_timestamp(struct comp_dev *dai, void platform_dai_wallclock(struct comp_dev *dai, uint64_t *wallclock) { /* only 1 wallclock on HSW */ - *wallclock = timer_get_system(timer_get()); + *wallclock = k_cycle_get_64(); } +#ifndef __ZEPHYR__ + int timer_register(struct timer *timer, void (*handler)(void *arg), void *arg) { int ret; @@ -116,3 +122,5 @@ void timer_disable(struct timer *timer, void *arg, int core) interrupt_disable(timer->irq, arg); } + +#endif /* __ZEPHYR__ */ diff --git a/src/drivers/intel/hda/hda-dma.c b/src/drivers/intel/hda/hda-dma.c index 8aa6461f7de5..7505726f5c9d 100644 --- a/src/drivers/intel/hda/hda-dma.c +++ b/src/drivers/intel/hda/hda-dma.c @@ -320,13 +320,10 @@ static inline int hda_dma_is_buffer_empty(struct dma_chan_data *chan) static int hda_dma_wait_for_buffer_full(struct dma_chan_data *chan) { - struct timer *timer = timer_get(); - uint64_t deadline = platform_timer_get(timer) + - clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1) * - HDA_DMA_TIMEOUT / 1000; + uint64_t deadline = k_cycle_get_64() + k_us_to_cyc_ceil64(HDA_DMA_TIMEOUT); while (!hda_dma_is_buffer_full(chan)) { - if (deadline < platform_timer_get(timer)) { + if (deadline < k_cycle_get_64()) { /* safe check in case we've got preempted after read */ if (hda_dma_is_buffer_full(chan)) return 0; @@ -344,13 +341,10 @@ static int hda_dma_wait_for_buffer_full(struct dma_chan_data *chan) static int hda_dma_wait_for_buffer_empty(struct dma_chan_data *chan) { - struct timer *timer = timer_get(); - uint64_t deadline = platform_timer_get(timer) + - clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1) * - HDA_DMA_TIMEOUT / 1000; + uint64_t deadline = k_cycle_get_64() + k_us_to_cyc_ceil64(HDA_DMA_TIMEOUT); while (!hda_dma_is_buffer_empty(chan)) { - if (deadline < platform_timer_get(timer)) { + if (deadline < k_cycle_get_64()) { /* safe check in case we've got preempted after read */ if (hda_dma_is_buffer_empty(chan)) return 0; @@ -730,7 +724,7 @@ static int hda_dma_status(struct dma_chan_data *channel, status->state = channel->status; status->r_pos = dma_chan_reg_read(channel, DGBRP); status->w_pos = dma_chan_reg_read(channel, DGBWP); - status->timestamp = timer_get_system(timer_get()); + status->timestamp = k_cycle_get_64(); return 0; } diff --git a/src/idc/idc.c b/src/idc/idc.c index 3eaa643537aa..fab7d9baeee8 100644 --- a/src/idc/idc.c +++ b/src/idc/idc.c @@ -90,18 +90,14 @@ int idc_msg_status_get(uint32_t core) */ int idc_wait_in_blocking_mode(uint32_t target_core, bool (*cond)(int)) { - struct timer *timer = timer_get(); - uint64_t deadline; - - deadline = platform_timer_get(timer) + - clock_us_to_ticks(PLATFORM_DEFAULT_CLOCK, IDC_TIMEOUT); + uint64_t deadline = k_cycle_get_64() + k_us_to_cyc_ceil64(IDC_TIMEOUT); while (!cond(target_core)) { /* spin here so other core can access IO and timers freely */ idelay(8192); - if (deadline < platform_timer_get(timer)) + if (deadline < k_cycle_get_64()) break; } diff --git a/src/include/sof/lib/clk.h b/src/include/sof/lib/clk.h index 5b6902ceb8b5..95d7eee966b4 100644 --- a/src/include/sof/lib/clk.h +++ b/src/include/sof/lib/clk.h @@ -56,13 +56,15 @@ void clock_set_freq(int clock, uint32_t hz); void clock_low_power_mode(int clock, bool enable); +#ifndef __ZEPHYR__ uint64_t clock_ms_to_ticks(int clock, uint64_t ms); uint64_t clock_us_to_ticks(int clock, uint64_t us); -uint64_t clock_ticks_per_sample(int clock, uint32_t sample_rate); +uint64_t clock_ns_to_ticks(int clock, uint64_t ns); +#endif /* __ZEPHYR__ */ -void platform_timer_set_delta(struct timer *timer, uint64_t ns); +uint64_t clock_ticks_per_sample(int clock, uint32_t sample_rate); static inline struct clock_info *clocks_get(void) { diff --git a/src/include/sof/lib/perf_cnt.h b/src/include/sof/lib/perf_cnt.h index 2a9d5c5027be..a5b94fed8b72 100644 --- a/src/include/sof/lib/perf_cnt.h +++ b/src/include/sof/lib/perf_cnt.h @@ -39,8 +39,8 @@ struct perf_cnt_data { /** \brief Initializes timestamps with current timer values. */ #define perf_cnt_init(pcd) do { \ - (pcd)->plat_ts = platform_timer_get(timer_get()); \ - (pcd)->cpu_ts = timer_get_system(cpu_timer_get()); \ + (pcd)->plat_ts = k_cycle_get_64(); \ + (pcd)->cpu_ts = k_cycle_get_64(); \ } while (0) /* Trace macros that can be used as trace_m argument of the perf_cnt_stamp() @@ -66,23 +66,23 @@ struct perf_cnt_data { * more precise line number is desired in the logs. * \param arg Argument passed to trace_m as arg. */ -#define perf_cnt_stamp(pcd, trace_m, arg) do { \ - uint32_t plat_ts = \ - (uint32_t) platform_timer_get(timer_get()); \ - uint32_t cpu_ts = \ - (uint32_t) arch_timer_get_system(cpu_timer_get());\ - if ((pcd)->plat_ts) { \ - (pcd)->plat_delta_last = plat_ts - (pcd)->plat_ts;\ - (pcd)->cpu_delta_last = cpu_ts - (pcd)->cpu_ts; \ - } \ - (pcd)->plat_ts = plat_ts; \ - (pcd)->cpu_ts = cpu_ts; \ - if ((pcd)->plat_delta_last > (pcd)->plat_delta_peak) \ - (pcd)->plat_delta_peak = (pcd)->plat_delta_last; \ - if ((pcd)->cpu_delta_last > (pcd)->cpu_delta_peak) { \ - (pcd)->cpu_delta_peak = (pcd)->cpu_delta_last; \ - trace_m(pcd, arg); \ - } \ +#define perf_cnt_stamp(pcd, trace_m, arg) do { \ + uint32_t plat_ts = \ + (uint32_t)k_cycle_get_64(); \ + uint32_t cpu_ts = \ + (uint32_t)k_cycle_get_64(); \ + if ((pcd)->plat_ts) { \ + (pcd)->plat_delta_last = plat_ts - (pcd)->plat_ts; \ + (pcd)->cpu_delta_last = cpu_ts - (pcd)->cpu_ts; \ + } \ + (pcd)->plat_ts = plat_ts; \ + (pcd)->cpu_ts = cpu_ts; \ + if ((pcd)->plat_delta_last > (pcd)->plat_delta_peak) \ + (pcd)->plat_delta_peak = (pcd)->plat_delta_last; \ + if ((pcd)->cpu_delta_last > (pcd)->cpu_delta_peak) { \ + (pcd)->cpu_delta_peak = (pcd)->cpu_delta_last; \ + trace_m(pcd, arg); \ + } \ } while (0) /** diff --git a/src/include/sof/lib/wait.h b/src/include/sof/lib/wait.h index 6a824cea3f86..a6b19650c493 100644 --- a/src/include/sof/lib/wait.h +++ b/src/include/sof/lib/wait.h @@ -38,14 +38,28 @@ static inline void wait_for_interrupt(int level) tr_dbg(&wait_tr, "WFX"); } +#if !CONFIG_LIBRARY /** * \brief Waits at least passed number of clocks. * \param[in] number_of_clks Minimum number of clocks to wait. */ -#if !CONFIG_LIBRARY void wait_delay(uint64_t number_of_clks); + +/** + * \brief Waits at least passed number of milliseconds. + * \param[in] ms Minimum number of milliseconds to wait. + */ +void wait_delay_ms(uint64_t ms); + +/** + * \brief Waits at least passed number of microseconds. + * \param[in] us Minimum number of microseconds to wait. + */ +void wait_delay_us(uint64_t us); #else static inline void wait_delay(uint64_t number_of_clks) {} +static inline void wait_delay_ms(uint64_t ms) {} +static inline void wait_delay_us(uint64_t us) {} #endif int poll_for_register_delay(uint32_t reg, uint32_t mask, diff --git a/src/include/sof/schedule/ll_schedule_domain.h b/src/include/sof/schedule/ll_schedule_domain.h index c2424338be03..cb9254c1c503 100644 --- a/src/include/sof/schedule/ll_schedule_domain.h +++ b/src/include/sof/schedule/ll_schedule_domain.h @@ -84,7 +84,11 @@ static inline struct ll_schedule_domain *domain_init domain->clk = clk; domain->synchronous = synchronous; domain->full_sync = false; +#ifdef __ZEPHYR__ + domain->ticks_per_ms = k_ms_to_cyc_ceil64(1); +#else domain->ticks_per_ms = clock_ms_to_ticks(clk, 1); +#endif domain->ops = ops; /* maximum value means no tick has been set to timer */ domain->next_tick = UINT64_MAX; @@ -188,8 +192,8 @@ static inline bool domain_is_pending(struct ll_schedule_domain *domain, #ifndef __ZEPHYR__ struct ll_schedule_domain *timer_domain_init(struct timer *timer, int clk); #else -struct ll_schedule_domain *zephyr_domain_init(struct timer *timer, int clk); -#define timer_domain_init zephyr_domain_init +struct ll_schedule_domain *zephyr_domain_init(int clk); +#define timer_domain_init(timer, clk) zephyr_domain_init(clk) #endif struct ll_schedule_domain *dma_multi_chan_domain_init(struct dma *dma_array, diff --git a/src/include/sof/sof.h b/src/include/sof/sof.h index b64f6b4b6b4f..a11b09456fac 100644 --- a/src/include/sof/sof.h +++ b/src/include/sof/sof.h @@ -57,11 +57,13 @@ struct sof { /* platform clock information */ struct clock_info *clocks; +#ifndef __ZEPHYR__ /* default platform timer */ struct timer *platform_timer; /* cpu (arch) timers - 1 per core */ struct timer *cpu_timers; +#endif /* timer domain for driving timer LL scheduler */ struct ll_schedule_domain *platform_timer_domain; diff --git a/src/include/sof/trace/dma-trace.h b/src/include/sof/trace/dma-trace.h index f05c0d4da7cb..8c7dbcd394e4 100644 --- a/src/include/sof/trace/dma-trace.h +++ b/src/include/sof/trace/dma-trace.h @@ -42,11 +42,12 @@ struct dma_trace_data { uint32_t copy_in_progress; uint32_t stream_tag; uint32_t active_stream_tag; - uint32_t dma_copy_align; /**< Minimal chunk of data possible to be - * copied by dma connected to host - */ - uint32_t dropped_entries; /* amount of dropped entries */ - struct k_spinlock lock; /* dma trace lock */ + uint32_t dma_copy_align; /* Minimal chunk of data possible to be + * copied by dma connected to host + */ + uint32_t dropped_entries; /* amount of dropped entries */ + struct k_spinlock lock; /* dma trace lock */ + uint64_t time_delta; /* difference between the host time */ }; int dma_trace_init_early(struct sof *sof); diff --git a/src/include/sof/trace/trace.h b/src/include/sof/trace/trace.h index d6638a7c5994..acc7664af381 100644 --- a/src/include/sof/trace/trace.h +++ b/src/include/sof/trace/trace.h @@ -286,11 +286,11 @@ do { \ /* Just like XTOS, only the most urgent messages go to limited * shared memory. */ -#define _log_nodict(atomic, arg_count, lvl, format, ...) \ -do { \ - if ((lvl) <= MTRACE_DUPLICATION_LEVEL) \ - printk("%llu " format "\n", platform_timer_get(NULL), \ - ##__VA_ARGS__); \ +#define _log_nodict(atomic, arg_count, lvl, format, ...) \ +do { \ + if ((lvl) <= MTRACE_DUPLICATION_LEVEL) \ + printk("%llu " format "\n", k_cycle_get_64(), \ + ##__VA_ARGS__); \ } while (0) #else #define _log_nodict(atomic, n_args, lvl, format, ...) diff --git a/src/ipc/ipc3/handler.c b/src/ipc/ipc3/handler.c index ad2b70fe7a81..0c0998f0fe3e 100644 --- a/src/ipc/ipc3/handler.c +++ b/src/ipc/ipc3/handler.c @@ -802,7 +802,6 @@ static int ipc_dma_trace_config(uint32_t header) struct dma_trace_data *dmat = dma_trace_data_get(); struct ipc *ipc = ipc_get(); struct sof_ipc_dma_trace_params_ext params; - struct timer *timer = timer_get(); int err; if (!dmat) { @@ -820,9 +819,9 @@ static int ipc_dma_trace_config(uint32_t header) * "SOF_IPC_TRACE_DMA_PARAMS_EXT" in your particular * kernel version. */ - platform_timer_set_delta(timer, params.timestamp_ns); + dmat->time_delta = k_ns_to_cyc_near64(params.timestamp_ns) - k_cycle_get_64(); else - timer->delta = 0; + dmat->time_delta = 0; #if CONFIG_HOST_PTABLE err = ipc_process_host_buffer(ipc, ¶ms.buffer, diff --git a/src/lib/agent.c b/src/lib/agent.c index 1aac47796897..ad32d35759ef 100644 --- a/src/lib/agent.c +++ b/src/lib/agent.c @@ -63,7 +63,7 @@ static enum task_state validate(void *data) uint64_t current; uint64_t delta; - current = platform_timer_get(timer_get()); + current = k_cycle_get_64(); delta = current - sa->last_check; perf_cnt_stamp(&sa->pcd, perf_sa_trace, 0 /* ignored */); @@ -101,11 +101,7 @@ void sa_init(struct sof *sof, uint64_t timeout) sof->sa = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*sof->sa)); /* set default timeouts */ -#ifdef __ZEPHYR__ ticks = k_us_to_cyc_ceil64(timeout); -#else - ticks = clock_us_to_ticks(PLATFORM_DEFAULT_CLOCK, timeout); -#endif /* TODO: change values after minimal drifts will be assured */ sof->sa->panic_timeout = 2 * ticks; /* 100% delay */ @@ -131,7 +127,7 @@ void sa_init(struct sof *sof, uint64_t timeout) schedule_task(&sof->sa->work, 0, timeout); /* set last check time to now to give time for boot completion */ - sof->sa->last_check = platform_timer_get(timer_get()); + sof->sa->last_check = k_cycle_get_64(); } diff --git a/src/lib/clk.c b/src/lib/clk.c index 7eb9199c9016..5e1b5eed9bf4 100644 --- a/src/lib/clk.c +++ b/src/lib/clk.c @@ -99,6 +99,7 @@ void clock_low_power_mode(int clock, bool enable) clk_info->low_power_mode(clock, enable); } +#ifndef __ZEPHYR__ uint64_t clock_ms_to_ticks(int clock, uint64_t ms) { struct clock_info *clk_info = clocks_get() + clock; @@ -119,6 +120,14 @@ uint64_t clock_us_to_ticks(int clock, uint64_t us) return ticks; } +uint64_t clock_ns_to_ticks(int clock, uint64_t ns) +{ + struct clock_info *clk_info = clocks_get() + clock; + + return clk_info->freqs[clk_info->current_freq_idx].ticks_per_msec * ns / 1000000ULL; +} +#endif /* __ZEPHYR__ */ + uint64_t clock_ticks_per_sample(int clock, uint32_t sample_rate) { struct clock_info *clk_info = clocks_get() + clock; @@ -131,15 +140,3 @@ uint64_t clock_ticks_per_sample(int clock, uint32_t sample_rate) return ticks_per_sample; } - -void platform_timer_set_delta(struct timer *timer, uint64_t ns) -{ - struct clock_info *clk_info = clocks_get() + PLATFORM_DEFAULT_CLOCK; - uint32_t ticks_per_msec = - clk_info->freqs[clk_info->current_freq_idx].ticks_per_msec; - uint64_t ticks; - - ticks = ticks_per_msec * ns / 1000000; - timer->delta = ticks - platform_timer_get(timer); - -} diff --git a/src/lib/pm_runtime.c b/src/lib/pm_runtime.c index 0489bbdc44d6..e27149611180 100644 --- a/src/lib/pm_runtime.c +++ b/src/lib/pm_runtime.c @@ -144,7 +144,7 @@ void init_dsp_r_state(enum dsp_r_state r_state) r_counters = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*r_counters)); prd->r_counters = r_counters; - r_counters->ts = platform_timer_get(timer_get()); + r_counters->ts = k_cycle_get_64(); r_counters->cur_r_state = r_state; } @@ -159,7 +159,7 @@ void report_dsp_r_state(enum dsp_r_state r_state) if (!r_counters || r_counters->cur_r_state == r_state) return; - ts = platform_timer_get(timer_get()); + ts = k_cycle_get_64(); delta = ts - r_counters->ts; delta += mailbox_sw_reg_read64(SRAM_REG_R_STATE_TRACE_BASE + diff --git a/src/lib/wait.c b/src/lib/wait.c index 89319941ebf9..a45150173e42 100644 --- a/src/lib/wait.c +++ b/src/lib/wait.c @@ -31,7 +31,7 @@ DECLARE_TR_CTX(wait_tr, SOF_UUID(wait_uuid), LOG_LEVEL_INFO); int poll_for_register_delay(uint32_t reg, uint32_t mask, uint32_t val, uint64_t us) { - uint64_t tick = clock_us_to_ticks(PLATFORM_DEFAULT_CLOCK, us); + uint64_t tick = k_us_to_cyc_ceil64(us); uint32_t tries = DEFAULT_TRY_TIMES; uint64_t delta = tick / tries; @@ -58,9 +58,18 @@ int poll_for_register_delay(uint32_t reg, uint32_t mask, void wait_delay(uint64_t number_of_clks) { - struct timer *timer = timer_get(); - uint64_t current = platform_timer_get(timer); + uint64_t timeout = k_cycle_get_64() + number_of_clks; - while ((platform_timer_get(timer) - current) < number_of_clks) + while (k_cycle_get_64() < timeout) idelay(PLATFORM_DEFAULT_DELAY); } + +void wait_delay_ms(uint64_t ms) +{ + wait_delay(k_ms_to_cyc_ceil64(ms)); +} + +void wait_delay_us(uint64_t us) +{ + wait_delay(k_us_to_cyc_ceil64(us)); +} diff --git a/src/platform/baytrail/platform.c b/src/platform/baytrail/platform.c index 621cdd587bed..c051eb869eef 100644 --- a/src/platform/baytrail/platform.c +++ b/src/platform/baytrail/platform.c @@ -143,6 +143,7 @@ const struct ext_man_windows xsram_window }, }; +#ifndef __ZEPHYR__ static SHARED_DATA struct timer timer = { .id = TIMER3, /* external timer */ .irq = IRQ_NUM_EXT_TIMER, @@ -152,6 +153,7 @@ static SHARED_DATA struct timer arch_timer = { .id = TIMER1, /* internal timer */ .irq = IRQ_NUM_TIMER1, }; +#endif /* __ZEPHYR__ */ int platform_boot_complete(uint32_t boot_message) { @@ -190,8 +192,10 @@ int platform_init(struct sof *sof) #endif int ret; +#ifndef __ZEPHYR__ sof->platform_timer = &timer; sof->cpu_timers = &arch_timer; +#endif /* __ZEPHYR__ */ /* clear mailbox for early trace and debug */ trace_point(TRACE_BOOT_PLATFORM_MBOX); diff --git a/src/platform/haswell/platform.c b/src/platform/haswell/platform.c index a2787bf6f9bb..25668c83d72b 100644 --- a/src/platform/haswell/platform.c +++ b/src/platform/haswell/platform.c @@ -129,10 +129,12 @@ const struct ext_man_windows xsram_window }, }; +#ifndef __ZEPHYR__ static SHARED_DATA struct timer timer = { .id = TIMER1, /* internal timer */ .irq = IRQ_NUM_TIMER2, }; +#endif int platform_boot_complete(uint32_t boot_message) { @@ -172,8 +174,10 @@ int platform_init(struct sof *sof) struct dai *ssp1; int ret; +#ifndef __ZEPHYR__ sof->platform_timer = &timer; sof->cpu_timers = &timer; +#endif /* clear mailbox for early trace and debug */ trace_point(TRACE_BOOT_PLATFORM_MBOX); diff --git a/src/platform/imx8/platform.c b/src/platform/imx8/platform.c index 59695c03d3e6..37f2a2851cbd 100644 --- a/src/platform/imx8/platform.c +++ b/src/platform/imx8/platform.c @@ -128,10 +128,12 @@ const struct ext_man_windows xsram_window } }; +#ifndef __ZEPHYR__ static SHARED_DATA struct timer timer = { .id = TIMER0, /* internal timer */ .irq = IRQ_NUM_TIMER0, }; +#endif int platform_boot_complete(uint32_t boot_message) { @@ -153,8 +155,10 @@ int platform_init(struct sof *sof) { int ret; +#ifndef __ZEPHYR__ sof->platform_timer = &timer; sof->cpu_timers = &timer; +#endif #ifdef __ZEPHYR__ /* initialize cascade interrupts before any usage */ diff --git a/src/platform/imx8m/platform.c b/src/platform/imx8m/platform.c index 239386fa54f9..15485cc86ed5 100644 --- a/src/platform/imx8m/platform.c +++ b/src/platform/imx8m/platform.c @@ -127,10 +127,12 @@ const struct ext_man_windows xsram_window }, }; +#ifndef __ZEPHYR__ static SHARED_DATA struct timer timer = { .id = TIMER0, /* internal timer */ .irq = IRQ_NUM_TIMER0, }; +#endif int platform_boot_complete(uint32_t boot_message) { @@ -152,8 +154,10 @@ int platform_init(struct sof *sof) { int ret; +#ifndef __ZEPHYR__ sof->platform_timer = &timer; sof->cpu_timers = &timer; +#endif #ifdef __ZEPHYR__ /* initialize cascade interrupts before any usage */ diff --git a/src/platform/imx8ulp/platform.c b/src/platform/imx8ulp/platform.c index f88d2a1da337..953f8f161711 100644 --- a/src/platform/imx8ulp/platform.c +++ b/src/platform/imx8ulp/platform.c @@ -124,10 +124,12 @@ const struct ext_man_windows xsram_window } }; +#ifndef __ZEPHYR__ static SHARED_DATA struct timer timer = { .id = TIMER0, /* internal timer */ .irq = IRQ_NUM_TIMER0, }; +#endif int platform_boot_complete(uint32_t boot_message) { @@ -149,8 +151,10 @@ int platform_init(struct sof *sof) { int ret; +#ifndef __ZEPHYR__ sof->platform_timer = &timer; sof->cpu_timers = &timer; +#endif platform_interrupt_init(); platform_clock_init(sof); diff --git a/src/platform/intel/cavs/platform.c b/src/platform/intel/cavs/platform.c index 9001cf1a158a..ab12dec9940b 100644 --- a/src/platform/intel/cavs/platform.c +++ b/src/platform/intel/cavs/platform.c @@ -268,6 +268,7 @@ const int n_iomux = ARRAY_SIZE(iomux_data); #endif +#ifndef __ZEPHYR__ static SHARED_DATA struct timer timer = { .id = TIMER3, /* external timer */ .irq = IRQ_EXT_TSTAMP0_LVL2, @@ -275,6 +276,7 @@ static SHARED_DATA struct timer timer = { }; static SHARED_DATA struct timer arch_timers[CONFIG_CORE_COUNT]; +#endif #if CONFIG_DW_SPI @@ -356,6 +358,7 @@ int platform_init(struct sof *sof) int ret; int i; +#ifndef __ZEPHYR__ sof->platform_timer = cache_to_uncache(&timer); sof->cpu_timers = (struct timer *)cache_to_uncache(&arch_timers); @@ -364,6 +367,7 @@ int platform_init(struct sof *sof) .id = TIMER1, /* internal timer */ .irq = IRQ_NUM_TIMER2, }; +#endif /* Turn off memory for all unused cores */ for (i = 0; i < CONFIG_CORE_COUNT; i++) @@ -401,8 +405,7 @@ int platform_init(struct sof *sof) scheduler_init_edf(); /* init low latency timer domain and scheduler */ - sof->platform_timer_domain = - timer_domain_init(sof->platform_timer, PLATFORM_DEFAULT_CLOCK); + sof->platform_timer_domain = timer_domain_init(sof->platform_timer, PLATFORM_DEFAULT_CLOCK); scheduler_init_ll(sof->platform_timer_domain); /* init the system agent */ diff --git a/src/platform/library/lib/clk.c b/src/platform/library/lib/clk.c index 90419118b045..bd758356604b 100644 --- a/src/platform/library/lib/clk.c +++ b/src/platform/library/lib/clk.c @@ -2,6 +2,7 @@ #include +#ifndef __ZEPHYR__ uint64_t clock_ms_to_ticks(int clock, uint64_t ms) { return 0; @@ -12,4 +13,8 @@ uint64_t clock_us_to_ticks(int clock, uint64_t us) return 0; } -void platform_timer_set_delta(struct timer *timer, uint64_t ns) {} +uint64_t clock_ns_to_ticks(int clock, uint64_t ns) +{ + return 0; +} +#endif /* __ZEPHYR__ */ diff --git a/src/platform/library/platform.c b/src/platform/library/platform.c index 61301c46b980..0b79c01af69b 100644 --- a/src/platform/library/platform.c +++ b/src/platform/library/platform.c @@ -13,7 +13,9 @@ #include #include +#ifndef __ZEPHYR__ static SHARED_DATA struct timer timer = {}; +#endif /* __ZEPHYR__ */ static uint8_t mailbox[MAILBOX_DSPBOX_SIZE + MAILBOX_HOSTBOX_SIZE + @@ -36,8 +38,10 @@ int dmac_init(struct sof *sof) int platform_init(struct sof *sof) { +#ifndef __ZEPHYR__ sof->platform_timer = &timer; sof->cpu_timers = &timer; +#endif platform_clock_init(sof); diff --git a/src/platform/mt8195/lib/clk.c b/src/platform/mt8195/lib/clk.c index 3a548417491e..078c17c8f988 100644 --- a/src/platform/mt8195/lib/clk.c +++ b/src/platform/mt8195/lib/clk.c @@ -82,11 +82,11 @@ static void clk_dsppll_enable(void) io_reg_update_bits(AUDIODSP_CK_CG, 0x1 << RG_AUDIODSP_SW_CG, 0x0); clk_setl(DSPPLL_CON4, PLL_PWR_ON); - wait_delay(clock_us_to_ticks(PLATFORM_DEFAULT_CLOCK, 1)); + wait_delay_us(1); clk_clrl(DSPPLL_CON4, PLL_ISO_EN); - wait_delay(clock_us_to_ticks(PLATFORM_DEFAULT_CLOCK, 1)); + wait_delay_us(1); clk_setl(DSPPLL_CON0, PLL_EN); - wait_delay(clock_us_to_ticks(PLATFORM_DEFAULT_CLOCK, 20)); + wait_delay_us(20); dsppll_enable = 1; } @@ -95,9 +95,9 @@ static void clk_dsppll_disable(void) tr_dbg(&clkdrv_tr, "clk_dsppll_disable\n"); clk_clrl(DSPPLL_CON0, PLL_EN); - wait_delay(clock_us_to_ticks(PLATFORM_DEFAULT_CLOCK, 1)); + wait_delay_us(1); clk_setl(DSPPLL_CON4, PLL_ISO_EN); - wait_delay(clock_us_to_ticks(PLATFORM_DEFAULT_CLOCK, 1)); + wait_delay_us(1); clk_clrl(DSPPLL_CON4, PLL_PWR_ON); dsppll_enable = 0; } diff --git a/src/probe/probe.c b/src/probe/probe.c index 0719493a783d..bbc6a91345b1 100644 --- a/src/probe/probe.c +++ b/src/probe/probe.c @@ -613,7 +613,7 @@ static int probe_gen_header(struct comp_buffer *buffer, uint32_t size, uint32_t crc; header = &_probe->header; - timestamp = platform_timer_get(timer_get()); + timestamp = k_cycle_get_64(); header->sync_word = PROBE_EXTRACT_SYNC_WORD; header->buffer_id = buffer->id; diff --git a/src/samples/audio/kwd_nn_detect_test.c b/src/samples/audio/kwd_nn_detect_test.c index e19ad55b9a3f..25cdb1efdeb1 100644 --- a/src/samples/audio/kwd_nn_detect_test.c +++ b/src/samples/audio/kwd_nn_detect_test.c @@ -62,7 +62,6 @@ void kwd_nn_detect_test(struct comp_dev *dev, if (test_keyword_get_input_size(dev) > one_sec_samples) { uint64_t time_start; uint64_t time_stop; - struct timer *timer = timer_get(); int result; int i, j; @@ -78,15 +77,14 @@ void kwd_nn_detect_test(struct comp_dev *dev, test_keyword_get_input_byte(dev, 6), test_keyword_get_input_byte(dev, 7) ); - time_start = platform_timer_get(timer); + time_start = k_cycle_get_64(); kwd_nn_preprocess_1s(test_keyword_get_input(dev), preprocessed_data); kwd_nn_process_data(preprocessed_data, confidences); result = kwd_nn_detect_postprocess(confidences); - time_stop = platform_timer_get(timer); + time_stop = k_cycle_get_64(); comp_dbg(dev, "KWD: kwd_nn_detect_test_copy() inference done in %u ms", - (unsigned int)((time_stop - time_start) - / clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1))); + (unsigned int)k_cyc_to_ms_near64(time_stop - time_start)); switch (result) { case KWD_NN_YES_KEYWORD: case KWD_NN_NO_KEYWORD: diff --git a/src/schedule/dma_multi_chan_domain.c b/src/schedule/dma_multi_chan_domain.c index 4883945aedbb..5e18652b918b 100644 --- a/src/schedule/dma_multi_chan_domain.c +++ b/src/schedule/dma_multi_chan_domain.c @@ -316,7 +316,7 @@ static bool dma_multi_chan_domain_is_pending(struct ll_schedule_domain *domain, /* it's too soon for this task */ if (!pipe_task->registrable && pipe_task->task.start > - platform_timer_get_atomic(timer_get())) + k_cycle_get_64_atomic()) continue; } diff --git a/src/schedule/dma_single_chan_domain.c b/src/schedule/dma_single_chan_domain.c index 8e3c33b30561..528baec505bb 100644 --- a/src/schedule/dma_single_chan_domain.c +++ b/src/schedule/dma_single_chan_domain.c @@ -450,7 +450,7 @@ static void dma_single_chan_domain_set(struct ll_schedule_domain *domain, return; if (dma_domain->channel_changed) { - domain->next_tick = platform_timer_get_atomic(timer_get()); + domain->next_tick = k_cycle_get_64_atomic(); dma_domain->channel_changed = false; } else { @@ -487,7 +487,7 @@ static void dma_single_chan_domain_clear(struct ll_schedule_domain *domain) static bool dma_single_chan_domain_is_pending(struct ll_schedule_domain *domain, struct task *task, struct comp_dev **comp) { - return task->start <= platform_timer_get_atomic(timer_get()); + return task->start <= k_cycle_get_64_atomic(); } /** diff --git a/src/schedule/ll_schedule.c b/src/schedule/ll_schedule.c index c6cf0ccfcd0a..dad692a9b710 100644 --- a/src/schedule/ll_schedule.c +++ b/src/schedule/ll_schedule.c @@ -247,7 +247,7 @@ static void schedule_ll_tasks_run(void *data) tr_dbg(&ll_tr, "timer interrupt on core %d, at %u, previous next_tick %u", core, - (unsigned int)platform_timer_get_atomic(timer_get()), + (unsigned int)k_cycle_get_64_atomic(), (unsigned int)domain->next_tick); irq_local_disable(flags); @@ -277,7 +277,7 @@ static void schedule_ll_tasks_run(void *data) key = k_spin_lock(&domain->lock); /* reset the new_target_tick for the first core */ - if (domain->new_target_tick < platform_timer_get_atomic(timer_get())) + if (domain->new_target_tick < k_cycle_get_64_atomic()) domain->new_target_tick = UINT64_MAX; /* update the new_target_tick according to tasks on current core */ @@ -327,7 +327,7 @@ static int schedule_ll_domain_set(struct ll_schedule_data *sch, task_start_us = period ? period : start; task_start_ticks = domain->ticks_per_ms * task_start_us / 1000; - task_start = task_start_ticks + platform_timer_get_atomic(timer_get()); + task_start = task_start_ticks + k_cycle_get_64_atomic(); if (reference) { task->start = reference->start; @@ -362,7 +362,7 @@ static int schedule_ll_domain_set(struct ll_schedule_data *sch, tr_info(&ll_tr, "new added task->start %u at %u", (unsigned int)task->start, - (unsigned int)platform_timer_get_atomic(timer_get())); + (unsigned int)k_cycle_get_64_atomic()); tr_info(&ll_tr, "num_tasks %ld total_num_tasks %ld", atomic_read(&sch->num_tasks), atomic_read(&domain->total_num_tasks)); @@ -645,7 +645,7 @@ static int reschedule_ll_task(void *data, struct task *task, uint64_t start) time = sch->domain->ticks_per_ms * start / 1000; - time += platform_timer_get_atomic(timer_get()); + time += k_cycle_get_64_atomic(); irq_local_disable(flags); @@ -688,7 +688,7 @@ static void scheduler_free_ll(void *data, uint32_t flags) static void ll_scheduler_recalculate_tasks(struct ll_schedule_data *sch, struct clock_notify_data *clk_data) { - uint64_t current = platform_timer_get_atomic(timer_get()); + uint64_t current = k_cycle_get_64_atomic(); struct list_item *tlist; struct task *task; uint64_t delta_ms; @@ -714,8 +714,7 @@ static void ll_scheduler_notify(void *arg, enum notify_id type, void *data) /* we need to recalculate tasks when clock frequency changes */ if (clk_data->message == CLOCK_NOTIFY_POST) { - sch->domain->ticks_per_ms = clock_ms_to_ticks(sch->domain->clk, - 1); + sch->domain->ticks_per_ms = k_ms_to_cyc_ceil64(1); ll_scheduler_recalculate_tasks(sch, clk_data); } diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index 6b9960268f3e..397916a34d4d 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -47,7 +47,6 @@ struct zephyr_domain_thread { struct zephyr_domain { struct k_timer timer; - struct timer *ll_timer; struct zephyr_domain_thread domain_thread[CONFIG_CORE_COUNT]; struct ll_schedule_domain *ll_domain; }; @@ -229,7 +228,7 @@ static const struct ll_schedule_domain_ops zephyr_domain_ops = { .domain_is_pending = zephyr_domain_is_pending }; -struct ll_schedule_domain *zephyr_domain_init(struct timer *timer, int clk) +struct ll_schedule_domain *zephyr_domain_init(int clk) { struct ll_schedule_domain *domain; struct zephyr_domain *zephyr_domain; @@ -240,7 +239,6 @@ struct ll_schedule_domain *zephyr_domain_init(struct timer *timer, int clk) zephyr_domain = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*zephyr_domain)); - zephyr_domain->ll_timer = timer; zephyr_domain->ll_domain = domain; ll_sch_domain_set_pdata(domain, zephyr_domain); diff --git a/src/trace/trace.c b/src/trace/trace.c index bd948d30e734..a2ad20d12c59 100644 --- a/src/trace/trace.c +++ b/src/trace/trace.c @@ -74,9 +74,9 @@ static void put_header(void *dst, const struct sof_uuid_entry *uid, uint32_t id_1, uint32_t id_2, uint32_t entry, uint64_t timestamp) { - struct timer *timer = timer_get(); + struct dma_trace_data *trace_data = dma_trace_data_get(); /* Support very early tracing */ - uint64_t delta = timer ? timer->delta : 0; + uint64_t delta = dma_trace_initialized(trace_data) ? trace_data->time_delta : 0; struct log_entry_header header; int ret; @@ -89,7 +89,6 @@ static void put_header(void *dst, const struct sof_uuid_entry *uid, ret = memcpy_s(dst, sizeof(header), &header, sizeof(header)); assert(!ret); - } #ifndef __ZEPHYR__ @@ -244,8 +243,7 @@ static void dma_trace_log(bool send_atomic, uint32_t log_entry, const struct tr_ int i; /* fill log content. arg_count is in the dictionary. */ - put_header(data, ctx->uuid_p, id_1, id_2, log_entry, - platform_safe_get_time(timer_get())); + put_header(data, ctx->uuid_p, id_1, id_2, log_entry, k_cycle_get_64_safe()); for (i = 0; i < arg_count; ++i) data[PAYLOAD_OFFSET(i)] = va_arg(vargs, uint32_t); @@ -286,7 +284,7 @@ void trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr #if CONFIG_TRACE_FILTERING_ADAPTIVE if (!trace->user_filter_override) { - const uint64_t current_ts = platform_safe_get_time(timer_get()); + const uint64_t current_ts = k_cycle_get_64_safe(); emit_recent_entries(current_ts); @@ -524,7 +522,7 @@ static void mtrace_dict_entry_vl(bool atomic_context, uint32_t dict_entry_addres int i; char packet[MESSAGE_SIZE(_TRACE_EVENT_MAX_ARGUMENT_COUNT)]; uint32_t *args = (uint32_t *)&packet[MESSAGE_SIZE(0)]; - const uint64_t tstamp = platform_safe_get_time(timer_get()); + const uint64_t tstamp = k_cycle_get_64_safe(); put_header(packet, dt_tr.uuid_p, _TRACE_INV_ID, _TRACE_INV_ID, dict_entry_address, tstamp); diff --git a/test/cmocka/CMakeLists.txt b/test/cmocka/CMakeLists.txt index b5928708af7a..be08c851375e 100644 --- a/test/cmocka/CMakeLists.txt +++ b/test/cmocka/CMakeLists.txt @@ -60,6 +60,7 @@ endif() if(CONFIG_CAVS) target_include_directories(sof_options INTERFACE ${PROJECT_SOURCE_DIR}/src/platform/intel/cavs/include) endif() +target_include_directories(sof_options INTERFACE ${PROJECT_SOURCE_DIR}/src/arch/xtos-wrapper/include) # linker script, just for log entries set(memory_mock_lds_in ${PROJECT_SOURCE_DIR}/test/cmocka/memory_mock.x.in) @@ -107,7 +108,7 @@ function(cmocka_test test_name) # Cmocka requires this define for stdint.h that defines uintptr target_compile_definitions(${test_name} PRIVATE -D_UINTPTR_T_DEFINED) - # Enable features those would be disabled in some platforms + # Enable features those would be disabled in some platforms target_compile_definitions(${test_name} PRIVATE -DCONFIG_NUMBERS_GCD -DCONFIG_NUMBERS_NORM -DCONFIG_NUMBERS_VECTOR_FIND) # Skip running alloc test on HOST until it's fixed (it passes and is run diff --git a/test/cmocka/src/common_mocks.c b/test/cmocka/src/common_mocks.c index bf5c55a6d7b9..315f3f10807d 100644 --- a/test/cmocka/src/common_mocks.c +++ b/test/cmocka/src/common_mocks.c @@ -32,7 +32,9 @@ /* global contexts */ WEAK struct ipc *_ipc; +#ifndef __ZEPHYR__ WEAK struct timer *platform_timer; +#endif WEAK struct schedulers *schedulers; WEAK struct sof sof; WEAK struct tr_ctx buffer_tr; @@ -228,6 +230,14 @@ void WEAK wait_delay(uint64_t number_of_clks) { } +void WEAK wait_delay_ms(uint64_t ms) +{ +} + +void WEAK wait_delay_us(uint64_t us) +{ +} + void WEAK xthal_icache_region_invalidate(void *addr, unsigned size) { } @@ -322,6 +332,7 @@ int WEAK comp_set_state(struct comp_dev *dev, int cmd) return 0; } +#ifndef __ZEPHYR__ uint64_t WEAK clock_ms_to_ticks(int clock, uint64_t ms) { (void)clock; @@ -338,6 +349,15 @@ uint64_t WEAK clock_us_to_ticks(int clock, uint64_t us) return 0; } +uint64_t WEAK clock_ns_to_ticks(int clock, uint64_t us) +{ + (void)clock; + (void)us; + + return 0; +} +#endif /* __ZEPHYR__ */ + #if CONFIG_MULTICORE && !CONFIG_LIBRARY int WEAK idc_send_msg(struct idc_msg *msg, uint32_t mode) diff --git a/zephyr/include/sof/drivers/timer.h b/zephyr/include/sof/drivers/timer.h new file mode 100644 index 000000000000..818464838cfd --- /dev/null +++ b/zephyr/include/sof/drivers/timer.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2022 Intel Corporation. All rights reserved. + */ + +#ifndef __SOF_DRIVERS_TIMER1_H__ +#define __SOF_DRIVERS_TIMER1_H__ + +#include +#include +#include +#include + +struct comp_dev; +struct sof_ipc_stream_posn; + +#define k_cycle_get_64_safe() k_cycle_get_64() +#define k_cycle_get_64_atomic() k_cycle_get_64() +#define platform_timer_stop(x) + +/* get timestamp for host stream DMA position */ +void platform_host_timestamp(struct comp_dev *host, + struct sof_ipc_stream_posn *posn); + +/* get timestamp for DAI stream DMA position */ +void platform_dai_timestamp(struct comp_dev *dai, + struct sof_ipc_stream_posn *posn); + +/* get current wallclock for componnent */ +void platform_dai_wallclock(struct comp_dev *dai, uint64_t *wallclock); + +#endif /* __SOF_DRIVERS_TIMER_H1__ */ diff --git a/zephyr/include/sof/trace/trace.h b/zephyr/include/sof/trace/trace.h index 64888cff22d6..caeeada6eab5 100644 --- a/zephyr/include/sof/trace/trace.h +++ b/zephyr/include/sof/trace/trace.h @@ -32,10 +32,10 @@ uint64_t platform_timer_get(struct timer *timer); #undef mtrace_printf #if USE_PRINTK -#define mtrace_printf(level, format, ...) \ - do { \ - if ((level) <= SOF_ZEPHYR_TRACE_LEVEL) \ - printk("%llu: " format "\n", platform_timer_get(NULL), \ +#define mtrace_printf(level, format, ...) \ + do { \ + if ((level) <= SOF_ZEPHYR_TRACE_LEVEL) \ + printk("%llu: " format "\n", k_cycle_get_64(), \ ##__VA_ARGS__); \ } while (0) #else diff --git a/zephyr/wrapper.c b/zephyr/wrapper.c index 76ed4760d2fd..e12c656299ef 100644 --- a/zephyr/wrapper.c +++ b/zephyr/wrapper.c @@ -363,92 +363,6 @@ void platform_interrupt_clear(uint32_t irq, uint32_t mask) } #endif -/* - * Timers. - * - * Mostly mapped. TODO: align with 64bit Zephyr timers when they are upstream. - */ - -#if !CONFIG_LIBRARY -uint64_t arch_timer_get_system(struct timer *timer) -{ - return platform_timer_get(timer); -} -#endif - -uint64_t platform_timer_get(struct timer *timer) -{ -#if CONFIG_SOC_SERIES_INTEL_ADSP_BAYTRAIL - uint32_t low; - uint32_t high; - uint64_t time; - - do { - /* TODO: check and see whether 32bit IRQ is pending for timer */ - high = timer->hitime; - /* read low 32 bits */ - low = shim_read(SHIM_EXT_TIMER_STAT); - } while (high != timer->hitime); - - time = ((uint64_t)high << 32) | low; - - return time; -#elif CONFIG_SOC_SERIES_INTEL_ADSP_BROADWELL || CONFIG_LIBRARY - // FIXME! - return 0; -#elif CONFIG_IMX - /* For i.MX use Xtensa timer, as we do now with SOF */ - uint64_t time = 0; - uint32_t low; - uint32_t high; - uint32_t ccompare; - - if (!timer || timer->id >= ARCH_TIMER_COUNT) - goto out; - - ccompare = xthal_get_ccompare(timer->id); - - /* read low 32 bits */ - low = xthal_get_ccount(); - - /* check and see whether 32bit IRQ is pending for timer */ - if (arch_interrupt_get_status() & (1 << timer->irq) && ccompare == 1) { - /* yes, overflow has occurred but handler has not run */ - high = timer->hitime + 1; - } else { - /* no overflow */ - high = timer->hitime; - } - - time = ((uint64_t)high << 32) | low; - -out: - - return time; -#elif CONFIG_SOF_ZEPHYR - return k_cycle_get_64(); -#else - /* CAVS versions */ - return shim_read64(SHIM_DSPWC); -#endif -} - -void platform_timer_stop(struct timer *timer) -{ -} - -uint64_t platform_timer_get_atomic(struct timer *timer) -{ - uint32_t flags; - uint64_t ticks_now; - - irq_local_disable(flags); - ticks_now = platform_timer_get(timer); - irq_local_enable(flags); - - return ticks_now; -} - /* * Notifier. * @@ -709,7 +623,7 @@ void platform_dai_timestamp(struct comp_dev *dai, posn->flags |= SOF_TIME_DAI_VALID; /* get SSP wallclock - DAI sets this to stream start value */ - posn->wallclock = platform_timer_get(NULL) - posn->wallclock; + posn->wallclock = k_cycle_get_64() - posn->wallclock; posn->wallclock_hz = clock_get_freq(PLATFORM_DEFAULT_CLOCK); posn->flags |= SOF_TIME_WALL_VALID; } @@ -717,7 +631,7 @@ void platform_dai_timestamp(struct comp_dev *dai, /* get current wallclock for componnent */ void platform_dai_wallclock(struct comp_dev *dai, uint64_t *wallclock) { - *wallclock = platform_timer_get(NULL); + *wallclock = k_cycle_get_64(); } /*