diff --git a/src/include/sof/trace/trace.h b/src/include/sof/trace/trace.h index 155964698c56..1d759a6bb95c 100644 --- a/src/include/sof/trace/trace.h +++ b/src/include/sof/trace/trace.h @@ -97,9 +97,6 @@ struct trace_filter { #if CONFIG_TRACE -#include -#include /* LOG_LEVEL_... */ - /* * trace_event macro definition * @@ -147,13 +144,10 @@ void trace_off(void); void trace_init(struct sof *sof); /* All tracing macros in this file end up calling these functions in the end. */ -typedef void (*log_func_t)(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list args); - void trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list args); + uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, ...); void trace_log_unfiltered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list args); + uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, ...); struct sof_ipc_trace_filter_elem *trace_filter_fill(struct sof_ipc_trace_filter_elem *elem, struct sof_ipc_trace_filter_elem *end, struct trace_filter *filter); @@ -204,24 +198,6 @@ void mtrace_event(const char *complete_packet, uint32_t length); format \ } -#ifdef CONFIG_TRACEM /* Send everything to shared memory too */ -# ifdef __ZEPHYR__ -/* We don't use Zephyr's dictionary yet so there's not enough space for - * DEBUG messages - */ -# define MTRACE_DUPLICATION_LEVEL LOG_LEVEL_INFO -# else -# define MTRACE_DUPLICATION_LEVEL LOG_LEVEL_DEBUG -# endif -#else /* copy only ERRORS */ -# define MTRACE_DUPLICATION_LEVEL LOG_LEVEL_ERROR -#endif /* CONFIG_TRACEM */ - -/* This function is _not_ passed the format string to save space */ -void _log_sofdict(log_func_t sofdict_logf, bool atomic, const void *log_entry, - const struct tr_ctx *ctx, const uint32_t lvl, - uint32_t id_1, uint32_t id_2, int arg_count, ...); - /* _log_message() */ #ifdef CONFIG_LIBRARY @@ -261,8 +237,7 @@ _thrown_from_macro_BASE_LOG_in_trace_h /** _log_message is where the memory-saving dictionary magic described * above happens: the "format" string argument is moved to a special - * linker section and replaced by a &log_entry pointer to it. This must - * be a macro for the source location to be meaningful. + * linker section and replaced by a &log_entry pointer to it. */ #define _log_message(log_func, atomic, lvl, comp_class, ctx, id_1, id_2, format, ...) \ do { \ @@ -273,26 +248,10 @@ do { \ META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), \ BASE_LOG_ASSERT_FAIL_MSG \ ); \ - _log_sofdict(log_func, atomic, &log_entry, ctx, lvl, id_1, id_2, \ - META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), ##__VA_ARGS__); \ - _log_nodict(atomic, META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), \ - lvl, format, ##__VA_ARGS__); \ + log_func(atomic, &log_entry, ctx, lvl, id_1, id_2, \ + META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), ##__VA_ARGS__); \ } while (0) -#ifdef __ZEPHYR__ -/* Just like XTOS, only the most urgent messages go to limited - * shared memory. - */ -#define _log_nodict(atomic, arg_count, lvl, format, ...) \ -do { \ - if ((lvl) <= MTRACE_DUPLICATION_LEVEL) \ - printk("%llu " format "\n", platform_timer_get(NULL), \ - ##__VA_ARGS__); \ -} while (0) -#else -#define _log_nodict(atomic, n_args, lvl, format, ...) -#endif - #endif /* CONFIG_LIBRARY */ #else /* CONFIG_TRACE */ diff --git a/src/trace/trace.c b/src/trace/trace.c index f311d6021950..f31fb3a1c3e8 100644 --- a/src/trace/trace.c +++ b/src/trace/trace.c @@ -92,7 +92,6 @@ static void put_header(void *dst, const struct sof_uuid_entry *uid, } -#ifndef __ZEPHYR__ /** Ring buffer for the mailbox trace */ void mtrace_event(const char *data, uint32_t length) { @@ -111,7 +110,6 @@ void mtrace_event(const char *data, uint32_t length) dcache_writeback_region(t + trace->pos, length); trace->pos += length; } -#endif /* __ZEPHYR__ */ #if CONFIG_TRACE_FILTERING_VERBOSITY /** @@ -224,15 +222,21 @@ static bool trace_filter_flood(uint32_t log_level, uint32_t entry, uint64_t mess /** Implementation shared and invoked by both adaptive filtering and * not. Serializes events into trace messages and passes them to - * dtrace_event() + * dtrace_event() or to mtrace_event() or to both depending on the log + * lvl and the Kconfiguration. */ -static void dma_trace_log(bool send_atomic, uint32_t log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vargs) +static void vatrace_log(bool send_atomic, uint32_t log_entry, const struct tr_ctx *ctx, + uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vargs) { uint32_t data[MESSAGE_SIZE_DWORDS(_TRACE_EVENT_MAX_ARGUMENT_COUNT)]; const int message_size = MESSAGE_SIZE(arg_count); int i; +#if CONFIG_TRACEM + unsigned long flags; + struct trace *trace = trace_get(); +#endif /* CONFIG TRACEM */ + /* fill log content. arg_count is in the dictionary. */ put_header(data, ctx->uuid_p, id_1, id_2, log_entry, platform_safe_get_time(timer_get())); @@ -246,25 +250,42 @@ static void dma_trace_log(bool send_atomic, uint32_t log_entry, const struct tr_ else dtrace_event((const char *)data, message_size); +#if CONFIG_TRACEM + /* send event by mail box too. */ + if (send_atomic) { + mtrace_event((const char *)data, MESSAGE_SIZE(arg_count)); + } else { + spin_lock_irq(&trace->lock, flags); + mtrace_event((const char *)data, MESSAGE_SIZE(arg_count)); + spin_unlock_irq(&trace->lock, flags); + } +#else + /* send event by mail box if level is LOG_LEVEL_CRITICAL. */ + if (lvl == LOG_LEVEL_CRITICAL) + mtrace_event((const char *)data, MESSAGE_SIZE(arg_count)); +#endif /* CONFIG_TRACEM */ } void trace_log_unfiltered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vl) + uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, ...) { struct trace *trace = trace_get(); + va_list vl; if (!trace->enable) { return; } - dma_trace_log(send_atomic, (uint32_t)log_entry, ctx, lvl, id_1, id_2, arg_count, vl); + va_start(vl, arg_count); + vatrace_log(send_atomic, (uint32_t)log_entry, ctx, lvl, id_1, id_2, arg_count, vl); + va_end(vl); } void trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vl) + uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, ...) { struct trace *trace = trace_get(); - + va_list vl; #if CONFIG_TRACE_FILTERING_ADAPTIVE uint64_t current_ts; #endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */ @@ -289,7 +310,9 @@ void trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr } #endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */ - dma_trace_log(send_atomic, (uint32_t)log_entry, ctx, lvl, id_1, id_2, arg_count, vl); + va_start(vl, arg_count); + vatrace_log(send_atomic, (uint32_t)log_entry, ctx, lvl, id_1, id_2, arg_count, vl); + va_end(vl); } struct sof_ipc_trace_filter_elem *trace_filter_fill(struct sof_ipc_trace_filter_elem *elem, @@ -509,9 +532,9 @@ void trace_init(struct sof *sof) dma_trace_init_early(sof); } -static void mtrace_dict_entry_vl(bool atomic_context, uint32_t dict_entry_address, - int n_args, va_list ap) +void mtrace_dict_entry(bool atomic_context, uint32_t dict_entry_address, int n_args, ...) { + va_list ap; int i; char packet[MESSAGE_SIZE(_TRACE_EVENT_MAX_ARGUMENT_COUNT)]; uint32_t *args = (uint32_t *)&packet[MESSAGE_SIZE(0)]; @@ -520,8 +543,10 @@ static void mtrace_dict_entry_vl(bool atomic_context, uint32_t dict_entry_addres put_header(packet, dt_tr.uuid_p, _TRACE_INV_ID, _TRACE_INV_ID, dict_entry_address, tstamp); + va_start(ap, n_args); for (i = 0; i < MIN(n_args, _TRACE_EVENT_MAX_ARGUMENT_COUNT); i++) args[i] = va_arg(ap, uint32_t); + va_end(ap); if (atomic_context) { mtrace_event(packet, MESSAGE_SIZE(n_args)); @@ -534,31 +559,3 @@ static void mtrace_dict_entry_vl(bool atomic_context, uint32_t dict_entry_addres spin_unlock_irq(&trace->lock, saved_flags); } } - -void mtrace_dict_entry(bool atomic_context, uint32_t dict_entry_address, int n_args, ...) -{ - va_list ap; - - va_start(ap, n_args); - mtrace_dict_entry_vl(atomic_context, dict_entry_address, n_args, ap); - va_end(ap); -} - -void _log_sofdict(log_func_t sofdict_logf, bool atomic, const void *log_entry, - const struct tr_ctx *ctx, const uint32_t lvl, - uint32_t id_1, uint32_t id_2, int arg_count, ...) -{ - va_list ap; - -#ifndef __ZEPHYR__ /* for Zephyr see _log_nodict() in trace.h */ - if (lvl <= MTRACE_DUPLICATION_LEVEL) { - va_start(ap, arg_count); - mtrace_dict_entry_vl(atomic, (uint32_t)log_entry, arg_count, ap); - va_end(ap); - } -#endif - - va_start(ap, arg_count); - sofdict_logf(atomic, log_entry, ctx, lvl, id_1, id_2, arg_count, ap); - va_end(ap); -} diff --git a/test/cmocka/src/common_mocks.c b/test/cmocka/src/common_mocks.c index ddabdad353d6..e362f4b953bd 100644 --- a/test/cmocka/src/common_mocks.c +++ b/test/cmocka/src/common_mocks.c @@ -102,8 +102,7 @@ void WEAK __panic(uint32_t p, char *filename, uint32_t linenum) #if CONFIG_TRACE void WEAK trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, - va_list args) + uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, ...) { (void) send_atomic; (void) log_entry; @@ -112,13 +111,6 @@ void WEAK trace_log_filtered(bool send_atomic, const void *log_entry, const stru (void) id_1; (void) id_2; (void) arg_count; - (void) args; -} - -void WEAK _log_sofdict(log_func_t sofdict_logf, bool atomic, const void *log_entry, - const struct tr_ctx *ctx, const uint32_t lvl, - uint32_t id_1, uint32_t id_2, int arg_count, ...) -{ } void WEAK trace_flush_dma_to_mbox(void) diff --git a/zephyr/include/sof/trace/trace.h b/zephyr/include/sof/trace/trace.h index 64888cff22d6..7d24708c8845 100644 --- a/zephyr/include/sof/trace/trace.h +++ b/zephyr/include/sof/trace/trace.h @@ -25,10 +25,10 @@ struct timer; uint64_t platform_timer_get(struct timer *timer); /* - * Override SOF mtrace_printf() macro for now to support Zephyr's shared - * memory logger. Note the DMA trace can be copied to the shared memory - * too thanks to CONFIG_TRACEM. + * Override SOF dictionary macros for now and let Zephyr take care of + * the physical log IO. */ +#undef _log_message #undef mtrace_printf #if USE_PRINTK @@ -38,8 +38,18 @@ uint64_t platform_timer_get(struct timer *timer); printk("%llu: " format "\n", platform_timer_get(NULL), \ ##__VA_ARGS__); \ } while (0) -#else -#error "TODO: Z_LOG()" +#define _log_message(log_func, atomic, level, comp_class, ctx, id1, id2, format, ...) \ + do { \ + if ((level) <= SOF_ZEPHYR_TRACE_LEVEL) \ + printk("%llu: " format "\n", platform_timer_get(NULL), \ + ##__VA_ARGS__); \ + } while (0) +#else /* not tested */ +#define _log_message(log_func, atomic, level, comp_class, ctx, id1, id2, format, ...) \ + do { \ + Z_LOG(level, "%u: " format, (uint32_t)platform_timer_get(NULL), \ + ##__VA_ARGS__); \ + } while (0) #endif #endif /* __INCLUDE_TRACE__ */