From 58c527cceaf985e1e38b05d2b5ac5fea394cad29 Mon Sep 17 00:00:00 2001 From: Baofeng Tian Date: Mon, 29 Jan 2024 14:18:28 +0800 Subject: [PATCH 01/13] dai-legacy: remove unused logging interface Dai class level logging is not used by dai legacy module, remove it. Signed-off-by: Baofeng Tian --- src/include/sof/lib/dai-legacy.h | 35 -------------------------------- 1 file changed, 35 deletions(-) diff --git a/src/include/sof/lib/dai-legacy.h b/src/include/sof/lib/dai-legacy.h index 328ac156dc05..7b914307c679 100644 --- a/src/include/sof/lib/dai-legacy.h +++ b/src/include/sof/lib/dai-legacy.h @@ -232,15 +232,6 @@ struct dai_type_info { #define trace_dai_get_subid(dai_p) ((dai_p)->index) #if defined(__ZEPHYR__) && defined(CONFIG_ZEPHYR_LOG) -/* driver level tracing */ -#define dai_cl_err(drv_p, __e, ...) LOG_ERR(__e, ##__VA_ARGS__) - -#define dai_cl_warn(drv_p, __e, ...) LOG_WRN(__e, ##__VA_ARGS__) - -#define dai_cl_info(drv_p, __e, ...) LOG_INF(__e, ##__VA_ARGS__) - -#define dai_cl_dbg(drv_p, __e, ...) LOG_DBG(__e, ##__VA_ARGS__) - /* device level tracing */ #define dai_err(dai_p, __e, ...) LOG_ERR(__e, ##__VA_ARGS__) @@ -251,32 +242,6 @@ struct dai_type_info { #define dai_dbg(dai_p, __e, ...) LOG_DBG(__e, ##__VA_ARGS__) #else -/* class (driver) level (no device object) tracing */ - -#define dai_cl_err(drv_p, __e, ...) \ - trace_dev_err(trace_dai_drv_get_tr_ctx, \ - trace_dai_drv_get_id, \ - trace_dai_drv_get_subid, \ - drv_p, __e, ##__VA_ARGS__) - -#define dai_cl_warn(drv_p, __e, ...) \ - trace_dev_warn(trace_dai_drv_get_tr_ctx,\ - trace_dai_drv_get_id, \ - trace_dai_drv_get_subid, \ - drv_p, __e, ##__VA_ARGS__) - -#define dai_cl_info(drv_p, __e, ...) \ - trace_dev_info(trace_dai_drv_get_tr_ctx,\ - trace_dai_drv_get_id, \ - trace_dai_drv_get_subid, \ - drv_p, __e, ##__VA_ARGS__) - -#define dai_cl_dbg(drv_p, __e, ...) \ - trace_dev_dbg(trace_dai_drv_get_tr_ctx, \ - trace_dai_drv_get_id, \ - trace_dai_drv_get_subid, \ - drv_p, __e, ##__VA_ARGS__) - /* device tracing */ #define dai_err(dai_p, __e, ...) \ From bd2a09b8516b49bf4828ec767268678188b4ad8e Mon Sep 17 00:00:00 2001 From: Baofeng Tian Date: Mon, 29 Jan 2024 14:27:30 +0800 Subject: [PATCH 02/13] Trace: remove trace verbosity config This is part of job for remove ipc3 logging ctx dependency. Signed-off-by: Baofeng Tian --- src/trace/Kconfig | 8 -------- src/trace/trace.c | 20 -------------------- 2 files changed, 28 deletions(-) diff --git a/src/trace/Kconfig b/src/trace/Kconfig index e691bc9e3cd3..74152ee17717 100644 --- a/src/trace/Kconfig +++ b/src/trace/Kconfig @@ -39,14 +39,6 @@ config TRACE_FILTERING help Filtering of trace messages based on their verbosity level and/or frequency. -config TRACE_FILTERING_VERBOSITY - bool "Filter by verbosity" - depends on TRACE_FILTERING - default y - help - Filtering by log verbosity level, where maximum verbosity allowed is specified for each - context and may be adjusted in runtime. Most basic feature found in every logger. - config TRACE_FILTERING_ADAPTIVE bool "Adaptive rate limiting" depends on TRACE_FILTERING diff --git a/src/trace/trace.c b/src/trace/trace.c index dc0eaff0e5d6..9b452fe021df 100644 --- a/src/trace/trace.c +++ b/src/trace/trace.c @@ -115,21 +115,6 @@ void mtrace_event(const char *data, uint32_t length) } #endif /* __ZEPHYR__ */ -#if CONFIG_TRACE_FILTERING_VERBOSITY -/** - * \brief Runtime trace filtering based on verbosity level - * \param lvl log level (LOG_LEVEL_ ERROR, INFO, DEBUG ...) - * \param uuid uuid address - * \return false when trace is filtered out, otherwise true - */ -static inline bool trace_filter_verbosity(uint32_t lvl, const struct tr_ctx *ctx) -{ - STATIC_ASSERT(LOG_LEVEL_CRITICAL < LOG_LEVEL_VERBOSE, - LOG_LEVEL_CRITICAL_MUST_HAVE_LOWEST_VALUE); - return lvl <= ctx->level; -} -#endif /* CONFIG_TRACE_FILTERING_VERBOSITY */ - #if CONFIG_TRACE_FILTERING_ADAPTIVE /** Report how many times an entry was suppressed and clear it. */ static void emit_suppressed_entry(struct recent_log_entry *entry) @@ -280,11 +265,6 @@ void trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr return; } -#if CONFIG_TRACE_FILTERING_VERBOSITY - if (!trace_filter_verbosity(lvl, ctx)) - return; -#endif /* CONFIG_TRACE_FILTERING_VERBOSITY */ - #if CONFIG_TRACE_FILTERING_ADAPTIVE if (!trace->user_filter_override) { const uint64_t current_ts = sof_cycle_get_64_safe(); From dee1fc2df30435c0f16ae04e108aad7427f09022 Mon Sep 17 00:00:00 2001 From: Baofeng Tian Date: Mon, 29 Jan 2024 14:43:34 +0800 Subject: [PATCH 03/13] Trace: remove uid usage in trace This is part of remove ipc3 logging context dependency. Signed-off-by: Baofeng Tian --- src/include/user/trace.h | 1 - src/trace/trace.c | 7 +++---- tools/logger/convert.c | 4 ++-- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/src/include/user/trace.h b/src/include/user/trace.h index 6550a91ba6ee..fa7ed78d3ee7 100644 --- a/src/include/user/trace.h +++ b/src/include/user/trace.h @@ -33,7 +33,6 @@ * Number of arguments is specified by the params_num field of log_entry */ struct log_entry_header { - uint32_t uid; uint32_t id_0 : TRACE_ID_LENGTH; /* e.g. Pipeline ID */ uint32_t id_1 : TRACE_ID_LENGTH; /* e.g. Component ID */ uint32_t core_id : 8; /* Reporting core's id */ diff --git a/src/trace/trace.c b/src/trace/trace.c index 9b452fe021df..d19109bc4ad6 100644 --- a/src/trace/trace.c +++ b/src/trace/trace.c @@ -72,7 +72,7 @@ struct trace { #define TRACE_ID_MASK ((1 << TRACE_ID_LENGTH) - 1) -static void put_header(void *dst, const struct sof_uuid_entry *uid, +static void put_header(void *dst, uint32_t id_1, uint32_t id_2, uint32_t entry, uint64_t timestamp) { @@ -82,7 +82,6 @@ static void put_header(void *dst, const struct sof_uuid_entry *uid, struct log_entry_header header; int ret; - header.uid = (uintptr_t)uid; header.id_0 = id_1 & TRACE_ID_MASK; header.id_1 = id_2 & TRACE_ID_MASK; header.core_id = cpu_get_id(); @@ -231,7 +230,7 @@ static void dma_trace_log(bool send_atomic, uint32_t log_entry, const struct tr_ int i; /* fill log content. arg_count is in the dictionary. */ - put_header(data, ctx->uuid_p, id_1, id_2, log_entry, sof_cycle_get_64_safe()); + put_header(data, id_1, id_2, log_entry, sof_cycle_get_64_safe()); for (i = 0; i < arg_count; ++i) data[PAYLOAD_OFFSET(i)] = va_arg(vargs, uint32_t); @@ -507,7 +506,7 @@ static void mtrace_dict_entry_vl(bool atomic_context, uint32_t dict_entry_addres uint32_t *args = (uint32_t *)&packet[MESSAGE_SIZE(0)]; const uint64_t tstamp = sof_cycle_get_64_safe(); - put_header(packet, dt_tr.uuid_p, _TRACE_INV_ID, _TRACE_INV_ID, + put_header(packet, _TRACE_INV_ID, _TRACE_INV_ID, dict_entry_address, tstamp); for (i = 0; i < MIN(n_args, _TRACE_EVENT_MAX_ARGUMENT_COUNT); i++) diff --git a/tools/logger/convert.c b/tools/logger/convert.c index 4e0a6cdc59ae..5571706439c4 100644 --- a/tools/logger/convert.c +++ b/tools/logger/convert.c @@ -517,7 +517,7 @@ static void print_entry_params(const struct log_entry_header *dma_log, (LOG_LEVEL_CRITICAL ? KRED : KNRM) : "", dma_log->core_id, entry->header.level, - get_component_name(entry->header.component_class, dma_log->uid), + get_component_name(entry->header.component_class, 0), raw_output && strlen(ids) ? "-" : "", ids); @@ -548,7 +548,7 @@ static void print_entry_params(const struct log_entry_header *dma_log, /* component name and id */ fprintf(out_fd, "%s%-12s %-5s%s ", use_colors ? KYEL : "", - get_component_name(entry->header.component_class, dma_log->uid), + get_component_name(entry->header.component_class, 0), ids, use_colors ? KNRM : ""); From 54043bf88f06f42d0a5a12589d56486fa50faae5 Mon Sep 17 00:00:00 2001 From: Baofeng Tian Date: Thu, 1 Feb 2024 13:36:19 +0800 Subject: [PATCH 04/13] Trace: add nonzephyr logging interface for sof This interface will be used to replace existing interface in trace header file, compared between this and existing interace, the difference is this interace does not have context dependency. Signed-off-by: Baofeng Tian --- src/include/sof/trace/trace.h | 44 +--- src/include/sof/trace/trace_nonzephyr.h | 269 ++++++++++++++++++++++++ src/trace/trace.c | 75 +++++++ 3 files changed, 346 insertions(+), 42 deletions(-) create mode 100644 src/include/sof/trace/trace_nonzephyr.h diff --git a/src/include/sof/trace/trace.h b/src/include/sof/trace/trace.h index d1beb48d81e2..1848216a3570 100644 --- a/src/include/sof/trace/trace.h +++ b/src/include/sof/trace/trace.h @@ -35,6 +35,8 @@ #include #endif +#include "trace_nonzephyr.h" + struct sof; struct trace; struct tr_ctx; @@ -84,8 +86,6 @@ struct tr_ctx; #define TRACE_BOOT_PLATFORM_SPI (TRACE_BOOT_PLATFORM + 0x200) #define TRACE_BOOT_PLATFORM_DMA_TRACE (TRACE_BOOT_PLATFORM + 0x210) -#define _TRACE_EVENT_MAX_ARGUMENT_COUNT 4 - static inline struct trace *trace_get(void) { return sof_get()->trace; @@ -211,19 +211,6 @@ void mtrace_dict_entry(bool atomic_context, uint32_t log_entry_pointer, int n_ar /** Posts a fully prepared log header + log entry */ void mtrace_event(const char *complete_packet, uint32_t length); -#ifdef CONFIG_TRACEM /* Send everything to shared memory too */ -# ifdef __ZEPHYR__ -/* We don't use Zephyr's dictionary yet so there's not enough space for - * DEBUG messages - */ -# define MTRACE_DUPLICATION_LEVEL LOG_LEVEL_INFO -# else -# define MTRACE_DUPLICATION_LEVEL LOG_LEVEL_DEBUG -# endif -#else /* copy only ERRORS */ -# define MTRACE_DUPLICATION_LEVEL LOG_LEVEL_ERROR -#endif /* CONFIG_TRACEM */ - /* This function is _not_ passed the format string to save space */ void _log_sofdict(log_func_t sofdict_logf, bool atomic, const void *log_entry, const struct tr_ctx *ctx, const uint32_t lvl, @@ -261,19 +248,6 @@ do { \ #define trace_point(x) platform_trace_point(x) -#define BASE_LOG_ASSERT_FAIL_MSG \ -unsupported_amount_of_params_in_trace_event\ -_thrown_from_macro_BASE_LOG_in_trace_h - -#define CT_ASSERT(COND, MESSAGE) \ - ((void)sizeof(char[1 - 2 * !(COND)])) - -#define trace_check_size_uint32(a) \ - CT_ASSERT(sizeof(a) <= sizeof(uint32_t), "error: trace argument is bigger than a uint32_t"); - -#define STATIC_ASSERT_ARG_SIZE(...) \ - META_MAP(1, trace_check_size_uint32, __VA_ARGS__) - /** _log_message is where the memory-saving dictionary magic described * above happens: the "format" string argument is moved to a special * linker section and replaced by a &log_entry pointer to it. This must @@ -294,20 +268,6 @@ do { \ lvl, format, ##__VA_ARGS__); \ } while (0) -#ifdef __ZEPHYR__ -/* Just like XTOS, only the most urgent messages go to limited - * shared memory. - */ -#define _log_nodict(atomic, arg_count, lvl, format, ...) \ -do { \ - if ((lvl) <= MTRACE_DUPLICATION_LEVEL) \ - printk("%llu " format "\n", k_cycle_get_64(), \ - ##__VA_ARGS__); \ -} while (0) -#else -#define _log_nodict(atomic, n_args, lvl, format, ...) -#endif - #endif /* CONFIG_LIBRARY */ #else /* CONFIG_TRACE */ diff --git a/src/include/sof/trace/trace_nonzephyr.h b/src/include/sof/trace/trace_nonzephyr.h new file mode 100644 index 000000000000..1dc9c31667b4 --- /dev/null +++ b/src/include/sof/trace/trace_nonzephyr.h @@ -0,0 +1,269 @@ +/* SPDX-License-Identifier: BSD-3-Clause + * + * Copyright(c) 2024 Intel Corporation. All rights reserved. + * + * Author: Baofeng Tian + */ + +#ifndef __SOF_TRACE_NONZEPHYR_H__ +#define __SOF_TRACE_NONZEPHYR_H__ + +#include + +#define _TRACE_EVENT_MAX_ARGUMENT_COUNT 4 + +/* Silences compiler warnings about unused variables */ +#define trace_unused_nonzephyr(class, id_1, id_2, format, ...) \ + SOF_TRACE_UNUSED(id_1, id_2, ##__VA_ARGS__) + +#if CONFIG_TRACE + +#include +#include /* LOG_LEVEL_... */ + +/* + * trace_event macro definition + * + * trace_event() macro is used for logging events that occur at runtime. + * It comes in 2 main flavours, atomic and non-atomic. Depending of definitions + * above, it might also propagate log messages to mbox if desired. + * + * First argument is always class of event being logged, as defined in + * user/trace.h - TRACE_CLASS_* (deprecated - do not use). + * Second argument is string literal in printf format, followed by up to 4 + * parameters (uint32_t), that are used to expand into string fromat when + * parsing log data. + * + * All compile-time accessible data (verbosity, class, source file name, line + * index and string literal) are linked into .static_log_entries section + * of binary and then extracted by smex, so they do not contribute to loadable + * image size. This way more elaborate log messages are possible and encouraged, + * for better debugging experience, without worrying about runtime performance. + */ + +/* Map the different trace_xxxx_with_ids(... ) levels to the + * _trace_event_with_ids(level_xxxx, ...) macro shared across log + * levels. + */ +#define trace_event_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + _trace_event_with_ids_nonzephyr(LOG_LEVEL_INFO, class, id_1, id_2, \ + format, ##__VA_ARGS__) + +#define trace_event_atomic_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + _trace_event_atomic_with_ids_nonzephyr(LOG_LEVEL_INFO, class, id_1, id_2, \ + format, ##__VA_ARGS__) + +#define trace_warn_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + _trace_event_with_ids_nonzephyr(LOG_LEVEL_WARNING, class, id_1, id_2, \ + format, ##__VA_ARGS__) + +#define trace_warn_atomic_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + _trace_event_atomic_with_ids_nonzephyr(LOG_LEVEL_WARNING, class, \ + id_1, id_2, \ + format, ##__VA_ARGS__) + +/* All tracing macros in this file end up calling these functions in the end. */ +typedef void (*log_func_t_nonzephyr)(bool send_atomic, const void *log_entry, + uint32_t lvl, uint32_t id_1, uint32_t id_2, + int arg_count, va_list args); + +void trace_log_filtered_nonzephyr(bool send_atomic, const void *log_entry, + uint32_t lvl, uint32_t id_1, uint32_t id_2, + int arg_count, va_list args); +void trace_log_unfiltered_nonzephyr(bool send_atomic, const void *log_entry, + uint32_t lvl, uint32_t id_1, uint32_t id_2, + int arg_count, va_list args); + +#define _trace_event_with_ids_nonzephyr(lvl, class, id_1, id_2, format, ...) \ + _log_message_nonzephyr(trace_log_filtered_nonzephyr, false, lvl, class, id_1, \ + id_2, format, ##__VA_ARGS__) + +#define _trace_event_atomic_with_ids_nonzephyr(lvl, class, id_1, id_2, format, ...) \ + _log_message_nonzephyr(trace_log_filtered_nonzephyr, true, lvl, class, id_1, \ + id_2, format, ##__VA_ARGS__) + +/* This function is _not_ passed the format string to save space */ +void _log_sofdict_nonzephyr(log_func_t_nonzephyr sofdict_logf, bool atomic, const void *log_entry, + const uint32_t lvl, + uint32_t id_1, uint32_t id_2, int arg_count, ...); + +#ifdef CONFIG_TRACEM /* Send everything to shared memory too */ +# ifdef __ZEPHYR__ +/* We don't use Zephyr's dictionary yet so there's not enough space for + * DEBUG messages + */ +# define MTRACE_DUPLICATION_LEVEL LOG_LEVEL_INFO +# else +# define MTRACE_DUPLICATION_LEVEL LOG_LEVEL_DEBUG +# endif +#else /* copy only ERRORS */ +# define MTRACE_DUPLICATION_LEVEL LOG_LEVEL_ERROR +#endif /* CONFIG_TRACEM */ + +/* _log_message() */ + +#ifdef CONFIG_LIBRARY + +#include + +/* trace level used on host configurations */ +extern int host_trace_level; + +#define _log_message_nonzephyr(ignored_log_func, atomic, level, comp_class, id_1, id_2, \ + format, ...) \ +do { \ + (void)id_1; \ + (void)id_2; \ + struct timeval tv; \ + char *msg = "(%s:%d) " format; \ + if (level >= host_trace_level) { \ + gettimeofday(&tv, NULL); \ + fprintf(stderr, "%ld.%6.6ld:", tv.tv_sec, tv.tv_usec); \ + fprintf(stderr, msg, strrchr(__FILE__, '/') + 1, \ + __LINE__, ##__VA_ARGS__); \ + fprintf(stderr, "\n"); \ + } \ +} while (0) + +#else /* CONFIG_LIBRARY */ + +#define BASE_LOG_ASSERT_FAIL_MSG \ +unsupported_amount_of_params_in_trace_event\ +_thrown_from_macro_BASE_LOG_in_trace_h + +#define CT_ASSERT(COND, MESSAGE) \ + ((void)sizeof(char[1 - 2 * !(COND)])) + +#define trace_check_size_uint32(a) \ + CT_ASSERT(sizeof(a) <= sizeof(uint32_t), "error: trace argument is bigger than a uint32_t"); + +#define STATIC_ASSERT_ARG_SIZE(...) \ + META_MAP(1, trace_check_size_uint32, __VA_ARGS__) + +#ifdef __ZEPHYR__ +/* Just like XTOS, only the most urgent messages go to limited + * shared memory. + */ +#define _log_nodict(atomic, arg_count, lvl, format, ...) \ +do { \ + if ((lvl) <= MTRACE_DUPLICATION_LEVEL) \ + printk("%llu " format "\n", k_cycle_get_64(), \ + ##__VA_ARGS__); \ +} while (0) +#else +#define _log_nodict(atomic, n_args, lvl, format, ...) +#endif + +/** _log_message is where the memory-saving dictionary magic described + * above happens: the "format" string argument is moved to a special + * linker section and replaced by a &log_entry pointer to it. This must + * be a macro for the source location to be meaningful. + */ +#define _log_message_nonzephyr(log_func, atomic, lvl, comp_class, id_1, id_2, format, ...) \ +do { \ + _DECLARE_LOG_ENTRY(lvl, format, comp_class, \ + META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__)); \ + STATIC_ASSERT_ARG_SIZE(__VA_ARGS__); \ + STATIC_ASSERT(_TRACE_EVENT_MAX_ARGUMENT_COUNT >= \ + META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), \ + BASE_LOG_ASSERT_FAIL_MSG \ + ); \ + _log_sofdict_nonzephyr(log_func, atomic, &log_entry, lvl, id_1, id_2, \ + META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), ##__VA_ARGS__); \ + _log_nodict(atomic, META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), \ + lvl, format, ##__VA_ARGS__); \ +} while (0) + +#endif /* CONFIG_LIBRARY */ + +#else /* CONFIG_TRACE */ + +#define trace_event_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + trace_unused_nonzephyr(class, id_1, id_2, format, ##__VA_ARGS__) +#define trace_event_atomic_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + trace_unused_nonzephyr(class, id_1, id_2, format, ##__VA_ARGS__) + +#define trace_warn_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + trace_unused_nonzephyr(class, id_1, id_2, format, ##__VA_ARGS__) +#define trace_warn_atomic_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + trace_unused_nonzephyr(class, id_1, id_2, format, ##__VA_ARGS__) + +#endif /* CONFIG_TRACE */ + +#if CONFIG_TRACEV +/* Enable tr_dbg() statements by defining tracev_...() */ +#define tracev_event_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + _trace_event_with_ids_nonzephyr(LOG_LEVEL_VERBOSE, class, \ + id_1, id_2, \ + format, ##__VA_ARGS__) + +#define tracev_event_atomic_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + _trace_event_atomic_with_ids_nonzephyr(LOG_LEVEL_VERBOSE, class, \ + id_1, id_2, \ + format, ##__VA_ARGS__) + +#else /* CONFIG_TRACEV */ +#define tracev_event_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + trace_unused_nonzephyr(class, id_1, id_2, format, ##__VA_ARGS__) +#define tracev_event_atomic_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + trace_unused_nonzephyr(class, id_1, id_2, format, ##__VA_ARGS__) + +#endif /* CONFIG_TRACEV */ + +/* The _error_ level has 2, 1 or 0 backends depending on Kconfig */ +#if CONFIG_TRACEE +/* LOG_LEVEL_CRITICAL messages are duplicated to the mail box */ +#define _trace_error_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + _log_message_nonzephyr(trace_log_filtered_nonzephyr, true, LOG_LEVEL_CRITICAL, class, \ + id_1, id_2, format, ##__VA_ARGS__) +#define trace_error_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + _trace_error_with_ids_nonzephyr(class, id_1, id_2, format, ##__VA_ARGS__) +#define trace_error_atomic_with_ids_nonzephyr(...) trace_error_with_ids_nonzephyr(__VA_ARGS__) + +#elif CONFIG_TRACE +/* Goes to trace_log_filtered() too but with a downgraded, LOG_INFO level */ +#define trace_error_with_ids_nonzephyr(...) trace_event_with_ids_nonzephyr(__VA_ARGS__) +#define trace_error_atomic_with_ids_nonzephyr(...) \ + trace_event_atomic_with_ids_nonzephyr(__VA_ARGS__) + +#else /* CONFIG_TRACEE, CONFIG_TRACE */ +#define trace_error_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + trace_unused_nonzephyr(class, id_1, id_2, format, ##__VA_ARGS__) +#define trace_error_atomic_with_ids_nonzephyr(class, id_1, id_2, format, ...) \ + trace_unused_nonzephyr(class, id_1, id_2, format, ##__VA_ARGS__) + +#endif /* CONFIG_TRACEE, CONFIG_TRACE */ + +/* tracing from device (component, pipeline, dai, ...) */ + +/** \brief Trace from a device on err level. + * + * @param get_id_m Macro that can retrieve device's id0 from the dev + * @param get_subid_m Macro that can retrieve device's id1 from the dev + * @param dev Device + * @param fmt Format followed by parameters + * @param ... Parameters + */ +#define trace_dev_err_nonzephyr(get_id_m, get_subid_m, dev, fmt, ...) \ + trace_error_with_ids_nonzephyr(_TRACE_INV_CLASS, \ + get_id_m(dev), get_subid_m(dev), \ + fmt, ##__VA_ARGS__) + +/** \brief Trace from a device on warning level. */ +#define trace_dev_warn_nonzephyr(get_id_m, get_subid_m, dev, fmt, ...) \ + trace_warn_with_ids_nonzephyr(_TRACE_INV_CLASS, \ + get_id_m(dev), get_subid_m(dev), \ + fmt, ##__VA_ARGS__) + +/** \brief Trace from a device on info level. */ +#define trace_dev_info_nonzephyr(get_id_m, get_subid_m, dev, fmt, ...) \ + trace_event_with_ids_nonzephyr(_TRACE_INV_CLASS, \ + get_id_m(dev), get_subid_m(dev), \ + fmt, ##__VA_ARGS__) + +/** \brief Trace from a device on dbg level. */ +#define trace_dev_dbg_nonzephyr(get_id_m, get_subid_m, dev, fmt, ...) \ + tracev_event_with_ids_nonzephyr(_TRACE_INV_CLASS, \ + get_id_m(dev), \ + get_subid_m(dev), fmt, ##__VA_ARGS__) +#endif /* __SOF_TRACE_NONZEPHYR_H__ */ diff --git a/src/trace/trace.c b/src/trace/trace.c index d19109bc4ad6..4c343690cd8d 100644 --- a/src/trace/trace.c +++ b/src/trace/trace.c @@ -243,6 +243,27 @@ static void dma_trace_log(bool send_atomic, uint32_t log_entry, const struct tr_ } +static void dma_trace_log_nonzephyr(bool send_atomic, uint32_t log_entry, + uint32_t lvl, uint32_t id_1, uint32_t id_2, + int arg_count, va_list vargs) +{ + uint32_t data[MESSAGE_SIZE_DWORDS(_TRACE_EVENT_MAX_ARGUMENT_COUNT)]; + const int message_size = MESSAGE_SIZE(arg_count); + int i; + + /* fill log content. arg_count is in the dictionary. */ + put_header(data, id_1, id_2, log_entry, sof_cycle_get_64_safe()); + + for (i = 0; i < arg_count; ++i) + data[PAYLOAD_OFFSET(i)] = va_arg(vargs, uint32_t); + + /* send event by */ + if (send_atomic) + dtrace_event_atomic((const char *)data, message_size); + else + dtrace_event((const char *)data, message_size); +} + void trace_log_unfiltered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vl) { @@ -278,6 +299,41 @@ void trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr dma_trace_log(send_atomic, (uint32_t)log_entry, ctx, lvl, id_1, id_2, arg_count, vl); } +void trace_log_unfiltered_nonzephyr(bool send_atomic, const void *log_entry, + uint32_t lvl, uint32_t id_1, uint32_t id_2, + int arg_count, va_list vl) +{ + struct trace *trace = trace_get(); + + if (!trace->enable) + return; + + dma_trace_log_nonzephyr(send_atomic, (uint32_t)log_entry, lvl, id_1, id_2, arg_count, vl); +} + +void trace_log_filtered_nonzephyr(bool send_atomic, const void *log_entry, + uint32_t lvl, uint32_t id_1, uint32_t id_2, + int arg_count, va_list vl) +{ + struct trace *trace = trace_get(); + + if (!trace->enable) + return; + +#if CONFIG_TRACE_FILTERING_ADAPTIVE + if (!trace->user_filter_override) { + const uint64_t current_ts = sof_cycle_get_64_safe(); + + emit_recent_entries(current_ts); + + if (!trace_filter_flood(lvl, (uint32_t)log_entry, current_ts)) + return; + } +#endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */ + + dma_trace_log_nonzephyr(send_atomic, (uint32_t)log_entry, lvl, id_1, id_2, arg_count, vl); +} + struct sof_ipc_trace_filter_elem *trace_filter_fill(struct sof_ipc_trace_filter_elem *elem, struct sof_ipc_trace_filter_elem *end, struct trace_filter *filter) @@ -551,3 +607,22 @@ void _log_sofdict(log_func_t sofdict_logf, bool atomic, const void *log_entry, sofdict_logf(atomic, log_entry, ctx, lvl, id_1, id_2, arg_count, ap); va_end(ap); } + +void _log_sofdict_nonzephyr(log_func_t_nonzephyr sofdict_logf, bool atomic, const void *log_entry, + const uint32_t lvl, + uint32_t id_1, uint32_t id_2, int arg_count, ...) +{ + va_list ap; + +#ifndef __ZEPHYR__ /* for Zephyr see _log_nodict() in trace.h */ + if (lvl <= MTRACE_DUPLICATION_LEVEL) { + va_start(ap, arg_count); + mtrace_dict_entry_vl(atomic, (uint32_t)log_entry, arg_count, ap); + va_end(ap); + } +#endif + + va_start(ap, arg_count); + sofdict_logf(atomic, log_entry, lvl, id_1, id_2, arg_count, ap); + va_end(ap); +} From 55476d37cd21af119decf361c1f3eb7c306392ac Mon Sep 17 00:00:00 2001 From: Baofeng Tian Date: Thu, 1 Feb 2024 13:46:00 +0800 Subject: [PATCH 05/13] dai-legacy: replace non-zephyr logging interface Replace it with new defined logging interface without context dependency. Signed-off-by: Baofeng Tian --- src/include/sof/lib/dai-legacy.h | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/src/include/sof/lib/dai-legacy.h b/src/include/sof/lib/dai-legacy.h index 7b914307c679..a47f2f5bb704 100644 --- a/src/include/sof/lib/dai-legacy.h +++ b/src/include/sof/lib/dai-legacy.h @@ -227,7 +227,6 @@ struct dai_type_info { #define trace_dai_drv_get_id(drv_p) (-1) #define trace_dai_drv_get_subid(drv_p) (-1) -#define trace_dai_get_tr_ctx(dai_p) ((dai_p)->drv->tctx) #define trace_dai_get_id(dai_p) ((dai_p)->drv->type) #define trace_dai_get_subid(dai_p) ((dai_p)->index) @@ -245,24 +244,20 @@ struct dai_type_info { /* device tracing */ #define dai_err(dai_p, __e, ...) \ - trace_dev_err(trace_dai_get_tr_ctx, \ - trace_dai_get_id, \ - trace_dai_get_subid, dai_p, __e, ##__VA_ARGS__) + trace_dev_err_nonzephyr(trace_dai_get_id, \ + trace_dai_get_subid, dai_p, __e, ##__VA_ARGS__) #define dai_warn(dai_p, __e, ...) \ - trace_dev_warn(trace_dai_get_tr_ctx, \ - trace_dai_get_id, \ - trace_dai_get_subid, dai_p, __e, ##__VA_ARGS__) + trace_dev_warn_nonzephyr(trace_dai_get_id, \ + trace_dai_get_subid, dai_p, __e, ##__VA_ARGS__) #define dai_info(dai_p, __e, ...) \ - trace_dev_info(trace_dai_get_tr_ctx, \ - trace_dai_get_id, \ - trace_dai_get_subid, dai_p, __e, ##__VA_ARGS__) + trace_dev_info_nonzephyr(trace_dai_get_id, \ + trace_dai_get_subid, dai_p, __e, ##__VA_ARGS__) #define dai_dbg(dai_p, __e, ...) \ - trace_dev_dbg(trace_dai_get_tr_ctx, \ - trace_dai_get_id, \ - trace_dai_get_subid, dai_p, __e, ##__VA_ARGS__) + trace_dev_dbg_nonzephyr(trace_dai_get_id, \ + trace_dai_get_subid, dai_p, __e, ##__VA_ARGS__) #endif /* #if defined(__ZEPHYR__) && defined(CONFIG_ZEPHYR_LOG) */ From 203fba606cb161413016af2f6823ca86c930b294 Mon Sep 17 00:00:00 2001 From: Baofeng Tian Date: Thu, 1 Feb 2024 14:25:11 +0800 Subject: [PATCH 06/13] Buffer: replace non-zephyr logging interface Replace it with new defined logging interface without context dependency. Signed-off-by: Baofeng Tian --- src/include/sof/audio/buffer.h | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/src/include/sof/audio/buffer.h b/src/include/sof/audio/buffer.h index e636771f9f93..8140da7ad0f7 100644 --- a/src/include/sof/audio/buffer.h +++ b/src/include/sof/audio/buffer.h @@ -72,34 +72,34 @@ extern struct tr_ctx buffer_tr; #else /** \brief Trace error message from buffer */ #define buf_err(buf_ptr, __e, ...) \ - trace_dev_err(trace_buf_get_tr_ctx, trace_buf_get_id, \ - buf_get_id, \ - (__sparse_force const struct comp_buffer *)buf_ptr, \ - __e, ##__VA_ARGS__) + trace_dev_err_nonzephyr(trace_buf_get_id, \ + buf_get_id, \ + (__sparse_force const struct comp_buffer *)buf_ptr, \ + __e, ##__VA_ARGS__) /** \brief Trace warning message from buffer */ #define buf_warn(buf_ptr, __e, ...) \ - trace_dev_warn(trace_buf_get_tr_ctx, trace_buf_get_id, \ - buf_get_id, \ - (__sparse_force const struct comp_buffer *)buf_ptr, \ - __e, ##__VA_ARGS__) + trace_dev_warn_nonzephyr(trace_buf_get_id, \ + buf_get_id, \ + (__sparse_force const struct comp_buffer *)buf_ptr, \ + __e, ##__VA_ARGS__) /** \brief Trace info message from buffer */ #define buf_info(buf_ptr, __e, ...) \ - trace_dev_info(trace_buf_get_tr_ctx, trace_buf_get_id, \ - buf_get_id, \ - (__sparse_force const struct comp_buffer *)buf_ptr, \ - __e, ##__VA_ARGS__) + trace_dev_info_nonzephyr(trace_buf_get_id, \ + buf_get_id, \ + (__sparse_force const struct comp_buffer *)buf_ptr, \ + __e, ##__VA_ARGS__) /** \brief Trace debug message from buffer */ #if defined(CONFIG_LIBRARY) #define buf_dbg(buf_ptr, __e, ...) #else #define buf_dbg(buf_ptr, __e, ...) \ - trace_dev_dbg(trace_buf_get_tr_ctx, trace_buf_get_id, \ - buf_get_id, \ - (__sparse_force const struct comp_buffer *)buf_ptr, \ - __e, ##__VA_ARGS__) + trace_dev_dbg_nonzephyr(trace_buf_get_id, \ + buf_get_id, \ + (__sparse_force const struct comp_buffer *)buf_ptr, \ + __e, ##__VA_ARGS__) #endif #endif /* #if defined(__ZEPHYR__) && defined(CONFIG_ZEPHYR_LOG) */ From e4a97b06cf0bb5cd7c5dcc06faf5f14136544bc6 Mon Sep 17 00:00:00 2001 From: Baofeng Tian Date: Thu, 1 Feb 2024 14:50:01 +0800 Subject: [PATCH 07/13] Pipeline: replace non-zephyr logging interface Replace it with new defined logging interface without context dependency. Signed-off-by: Baofeng Tian --- src/include/sof/audio/pipeline-trace.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/include/sof/audio/pipeline-trace.h b/src/include/sof/audio/pipeline-trace.h index d1f89583ad45..0a5af14450e7 100644 --- a/src/include/sof/audio/pipeline-trace.h +++ b/src/include/sof/audio/pipeline-trace.h @@ -60,20 +60,20 @@ extern struct tr_ctx pipe_tr; #else #define pipe_err(pipe_p, __e, ...) \ - trace_dev_err(trace_pipe_get_tr_ctx, trace_pipe_get_id, \ - trace_pipe_get_subid, pipe_p, __e, ##__VA_ARGS__) + trace_dev_err_nonzephyr(trace_pipe_get_id, \ + trace_pipe_get_subid, pipe_p, __e, ##__VA_ARGS__) #define pipe_warn(pipe_p, __e, ...) \ - trace_dev_warn(trace_pipe_get_tr_ctx, trace_pipe_get_id, \ - trace_pipe_get_subid, pipe_p, __e, ##__VA_ARGS__) + trace_dev_warn_nonzephyr(trace_pipe_get_id, \ + trace_pipe_get_subid, pipe_p, __e, ##__VA_ARGS__) #define pipe_info(pipe_p, __e, ...) \ - trace_dev_info(trace_pipe_get_tr_ctx, trace_pipe_get_id, \ - trace_pipe_get_subid, pipe_p, __e, ##__VA_ARGS__) + trace_dev_info_nonzephyr(trace_pipe_get_id, \ + trace_pipe_get_subid, pipe_p, __e, ##__VA_ARGS__) #define pipe_dbg(pipe_p, __e, ...) \ - trace_dev_dbg(trace_pipe_get_tr_ctx, trace_pipe_get_id, \ - trace_pipe_get_subid, pipe_p, __e, ##__VA_ARGS__) + trace_dev_dbg_nonzephyr(trace_pipe_get_id, \ + trace_pipe_get_subid, pipe_p, __e, ##__VA_ARGS__) #endif /* #if defined(__ZEPHYR__) && defined(CONFIG_ZEPHYR_LOG) */ From bf22c179b25a24c0374499918b1054d3f02b32a9 Mon Sep 17 00:00:00 2001 From: Baofeng Tian Date: Thu, 1 Feb 2024 14:59:24 +0800 Subject: [PATCH 08/13] Component: replace non-zephyr logging interface Replace it with new defined logging interface without context dependency. Signed-off-by: Baofeng Tian --- src/audio/rtnr/rtnr.c | 12 +++---- src/include/sof/audio/component.h | 52 ++++++++++++++----------------- 2 files changed, 29 insertions(+), 35 deletions(-) diff --git a/src/audio/rtnr/rtnr.c b/src/audio/rtnr/rtnr.c index 8476467f1a44..cbff4e3f09c6 100644 --- a/src/audio/rtnr/rtnr.c +++ b/src/audio/rtnr/rtnr.c @@ -51,8 +51,6 @@ /* ID for RTNR data */ #define RTNR_DATA_ID_PRESET 12345678 -static const struct comp_driver comp_rtnr; - /** \brief RTNR processing functions map item. */ struct rtnr_func_map { enum sof_ipc_frame fmt; /**< source frame format */ @@ -78,27 +76,27 @@ void rtnr_printf(int a, int b, int c, int d, int e) { switch (a) { case 0xa: - comp_cl_info(&comp_rtnr, "rtnr_printf 1st=%08x, 2nd=%08x, 3rd=%08x, 4st=%08x", + comp_cl_info(NULL, "rtnr_printf 1st=%08x, 2nd=%08x, 3rd=%08x, 4st=%08x", b, c, d, e); break; case 0xb: - comp_cl_info(&comp_rtnr, "rtnr_printf 1st=%08x, 2nd=%08x, 3rd=%08x, 4st=%08x", + comp_cl_info(NULL, "rtnr_printf 1st=%08x, 2nd=%08x, 3rd=%08x, 4st=%08x", b, c, d, e); break; case 0xc: - comp_cl_warn(&comp_rtnr, "rtnr_printf 1st=%08x, 2nd=%08x, 3rd=%08x, 4st=%08x", + comp_cl_warn(NULL, "rtnr_printf 1st=%08x, 2nd=%08x, 3rd=%08x, 4st=%08x", b, c, d, e); break; case 0xd: - comp_cl_dbg(&comp_rtnr, "rtnr_printf 1st=%08x, 2nd=%08x, 3rd=%08x, 4st=%08x", + comp_cl_dbg(NULL, "rtnr_printf 1st=%08x, 2nd=%08x, 3rd=%08x, 4st=%08x", b, c, d, e); break; case 0xe: - comp_cl_err(&comp_rtnr, "rtnr_printf 1st=%08x, 2nd=%08x, 3rd=%08x, 4st=%08x", + comp_cl_err(NULL, "rtnr_printf 1st=%08x, 2nd=%08x, 3rd=%08x, 4st=%08x", b, c, d, e); break; diff --git a/src/include/sof/audio/component.h b/src/include/sof/audio/component.h index 9f427cccb6ed..92be3b9042f4 100644 --- a/src/include/sof/audio/component.h +++ b/src/include/sof/audio/component.h @@ -176,57 +176,53 @@ enum { /** \brief Trace error message from component driver (no comp instance) */ #define comp_cl_err(drv_p, __e, ...) \ - trace_dev_err(trace_comp_drv_get_tr_ctx, \ - trace_comp_drv_get_id, \ - trace_comp_drv_get_subid, \ - drv_p, \ - __e, ##__VA_ARGS__) + trace_dev_err_nonzephyr(trace_comp_drv_get_id, \ + trace_comp_drv_get_subid, \ + drv_p, \ + __e, ##__VA_ARGS__) /** \brief Trace warning message from component driver (no comp instance) */ #define comp_cl_warn(drv_p, __e, ...) \ - trace_dev_warn(trace_comp_drv_get_tr_ctx, \ - trace_comp_drv_get_id, \ - trace_comp_drv_get_subid, \ - drv_p, \ - __e, ##__VA_ARGS__) + trace_dev_warn_nonzephyr(trace_comp_drv_get_id, \ + trace_comp_drv_get_subid, \ + drv_p, \ + __e, ##__VA_ARGS__) /** \brief Trace info message from component driver (no comp instance) */ #define comp_cl_info(drv_p, __e, ...) \ - trace_dev_info(trace_comp_drv_get_tr_ctx, \ - trace_comp_drv_get_id, \ - trace_comp_drv_get_subid, \ - drv_p, \ - __e, ##__VA_ARGS__) + trace_dev_info_nonzephyr(trace_comp_drv_get_id, \ + trace_comp_drv_get_subid, \ + drv_p, \ + __e, ##__VA_ARGS__) /** \brief Trace debug message from component driver (no comp instance) */ #define comp_cl_dbg(drv_p, __e, ...) \ - trace_dev_dbg(trace_comp_drv_get_tr_ctx, \ - trace_comp_drv_get_id, \ - trace_comp_drv_get_subid, \ - drv_p, \ - __e, ##__VA_ARGS__) + trace_dev_dbg_nonzephyr(trace_comp_drv_get_id, \ + trace_comp_drv_get_subid, \ + drv_p, \ + __e, ##__VA_ARGS__) /* device tracing */ /** \brief Trace error message from component device */ #define comp_err(comp_p, __e, ...) \ - trace_dev_err(trace_comp_get_tr_ctx, trace_comp_get_id, \ - trace_comp_get_subid, comp_p, __e, ##__VA_ARGS__) + trace_dev_err_nonzephyr(trace_comp_get_id, \ + trace_comp_get_subid, comp_p, __e, ##__VA_ARGS__) /** \brief Trace warning message from component device */ #define comp_warn(comp_p, __e, ...) \ - trace_dev_warn(trace_comp_get_tr_ctx, trace_comp_get_id, \ - trace_comp_get_subid, comp_p, __e, ##__VA_ARGS__) + trace_dev_warn_nonzephyr(trace_comp_get_id, \ + trace_comp_get_subid, comp_p, __e, ##__VA_ARGS__) /** \brief Trace info message from component device */ #define comp_info(comp_p, __e, ...) \ - trace_dev_info(trace_comp_get_tr_ctx, trace_comp_get_id, \ - trace_comp_get_subid, comp_p, __e, ##__VA_ARGS__) + trace_dev_info_nonzephyr(trace_comp_get_id, \ + trace_comp_get_subid, comp_p, __e, ##__VA_ARGS__) /** \brief Trace debug message from component device */ #define comp_dbg(comp_p, __e, ...) \ - trace_dev_dbg(trace_comp_get_tr_ctx, trace_comp_get_id, \ - trace_comp_get_subid, comp_p, __e, ##__VA_ARGS__) + trace_dev_dbg_nonzephyr(trace_comp_get_id, \ + trace_comp_get_subid, comp_p, __e, ##__VA_ARGS__) #endif /* #if defined(__ZEPHYR__) && defined(CONFIG_ZEPHYR_LOG) */ From 31841db995dfd9370dc215eaed4a1a83e83235b9 Mon Sep 17 00:00:00 2001 From: Baofeng Tian Date: Thu, 1 Feb 2024 15:06:39 +0800 Subject: [PATCH 09/13] Trace: replace non-zephyr logging interface Replace it with new defined logging interface without context dependency. Signed-off-by: Baofeng Tian --- src/include/sof/trace/trace.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/include/sof/trace/trace.h b/src/include/sof/trace/trace.h index 1848216a3570..fc86f2d2f443 100644 --- a/src/include/sof/trace/trace.h +++ b/src/include/sof/trace/trace.h @@ -408,22 +408,22 @@ struct tr_ctx { /* tracing from infrastructure part */ #define tr_err_atomic(ctx, fmt, ...) \ - trace_error_atomic_with_ids(_TRACE_INV_CLASS, ctx, \ + trace_error_atomic_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, \ fmt, ##__VA_ARGS__) #define tr_warn_atomic(ctx, fmt, ...) \ - trace_warn_atomic_with_ids(_TRACE_INV_CLASS, ctx, \ + trace_warn_atomic_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, \ fmt, ##__VA_ARGS__) #define tr_info_atomic(ctx, fmt, ...) \ - trace_event_atomic_with_ids(_TRACE_INV_CLASS, ctx, \ + trace_event_atomic_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, \ fmt, ##__VA_ARGS__) #define tr_dbg_atomic(ctx, fmt, ...) \ - tracev_event_atomic_with_ids(_TRACE_INV_CLASS, ctx, \ + tracev_event_atomic_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, \ fmt, ##__VA_ARGS__) @@ -448,20 +448,20 @@ struct tr_ctx { #endif #define tr_err(ctx, fmt, ...) \ - trace_error_with_ids(_TRACE_INV_CLASS, ctx, \ + trace_error_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, fmt, ##__VA_ARGS__) #define tr_warn(ctx, fmt, ...) \ - trace_warn_with_ids(_TRACE_INV_CLASS, ctx, \ + trace_warn_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, fmt, ##__VA_ARGS__) #define tr_info(ctx, fmt, ...) \ - trace_event_with_ids(_TRACE_INV_CLASS, ctx, \ + trace_event_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, fmt, ##__VA_ARGS__) /* tracev_ output depends on CONFIG_TRACEV=y */ #define tr_dbg(ctx, fmt, ...) \ - tracev_event_with_ids(_TRACE_INV_CLASS, ctx, \ + tracev_event_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, fmt, ##__VA_ARGS__) #endif From bea69f109f5bf1459f144ac4131028cf725d93a7 Mon Sep 17 00:00:00 2001 From: Baofeng Tian Date: Thu, 1 Feb 2024 20:37:37 +0800 Subject: [PATCH 10/13] Trace: remove unused trace definition Remove unused trace definition, since it is already replaced by non-zephyr trace definition. Signed-off-by: Baofeng Tian --- src/include/sof/trace/trace.h | 187 --------------------------------- src/trace/trace.c | 84 +-------------- test/cmocka/src/common_mocks.c | 13 ++- 3 files changed, 11 insertions(+), 273 deletions(-) diff --git a/src/include/sof/trace/trace.h b/src/include/sof/trace/trace.h index fc86f2d2f443..ca0f47f94308 100644 --- a/src/include/sof/trace/trace.h +++ b/src/include/sof/trace/trace.h @@ -91,10 +91,6 @@ static inline struct trace *trace_get(void) return sof_get()->trace; } -/* Silences compiler warnings about unused variables */ -#define trace_unused(class, ctx, id_1, id_2, format, ...) \ - SOF_TRACE_UNUSED(ctx, id_1, id_2, ##__VA_ARGS__) - struct trace_filter { uint32_t uuid_id; /**< type id, or 0 when not important */ int32_t comp_id; /**< component id or -1 when not important */ @@ -132,71 +128,16 @@ struct trace_filter { #include #include /* LOG_LEVEL_... */ -/* - * trace_event macro definition - * - * trace_event() macro is used for logging events that occur at runtime. - * It comes in 2 main flavours, atomic and non-atomic. Depending of definitions - * above, it might also propagate log messages to mbox if desired. - * - * First argument is always class of event being logged, as defined in - * user/trace.h - TRACE_CLASS_* (deprecated - do not use). - * Second argument is string literal in printf format, followed by up to 4 - * parameters (uint32_t), that are used to expand into string fromat when - * parsing log data. - * - * All compile-time accessible data (verbosity, class, source file name, line - * index and string literal) are linked into .static_log_entries section - * of binary and then extracted by smex, so they do not contribute to loadable - * image size. This way more elaborate log messages are possible and encouraged, - * for better debugging experience, without worrying about runtime performance. - */ - -/* Map the different trace_xxxx_with_ids(... ) levels to the - * _trace_event_with_ids(level_xxxx, ...) macro shared across log - * levels. - */ -#define trace_event_with_ids(class, ctx, id_1, id_2, format, ...) \ - _trace_event_with_ids(LOG_LEVEL_INFO, class, ctx, id_1, id_2, \ - format, ##__VA_ARGS__) - -#define trace_event_atomic_with_ids(class, ctx, id_1, id_2, format, ...) \ - _trace_event_atomic_with_ids(LOG_LEVEL_INFO, class, ctx, id_1, id_2, \ - format, ##__VA_ARGS__) - -#define trace_warn_with_ids(class, ctx, id_1, id_2, format, ...) \ - _trace_event_with_ids(LOG_LEVEL_WARNING, class, ctx, id_1, id_2, \ - format, ##__VA_ARGS__) - -#define trace_warn_atomic_with_ids(class, ctx, id_1, id_2, format, ...) \ - _trace_event_atomic_with_ids(LOG_LEVEL_WARNING, class, \ - ctx, id_1, id_2, \ - format, ##__VA_ARGS__) - void trace_flush_dma_to_mbox(void); void trace_on(void); void trace_off(void); void trace_init(struct sof *sof); -/* All tracing macros in this file end up calling these functions in the end. */ -typedef void (*log_func_t)(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list args); - -void trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list args); -void trace_log_unfiltered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list args); struct sof_ipc_trace_filter_elem *trace_filter_fill(struct sof_ipc_trace_filter_elem *elem, struct sof_ipc_trace_filter_elem *end, struct trace_filter *filter); int trace_filter_update(const struct trace_filter *elem); -#define _trace_event_with_ids(lvl, class, ctx, id_1, id_2, format, ...) \ - _log_message(trace_log_filtered, false, lvl, class, ctx, id_1, id_2, format, ##__VA_ARGS__) - -#define _trace_event_atomic_with_ids(lvl, class, ctx, id_1, id_2, format, ...) \ - _log_message(trace_log_filtered, true, lvl, class, ctx, id_1, id_2, format, ##__VA_ARGS__) - /** * Appends one SOF dictionary entry and log statement to the ring buffer * implementing the 'etrace' in shared memory. @@ -211,11 +152,6 @@ void mtrace_dict_entry(bool atomic_context, uint32_t log_entry_pointer, int n_ar /** Posts a fully prepared log header + log entry */ void mtrace_event(const char *complete_packet, uint32_t length); -/* This function is _not_ passed the format string to save space */ -void _log_sofdict(log_func_t sofdict_logf, bool atomic, const void *log_entry, - const struct tr_ctx *ctx, const uint32_t lvl, - uint32_t id_1, uint32_t id_2, int arg_count, ...); - /* _log_message() */ #ifdef CONFIG_LIBRARY @@ -226,21 +162,6 @@ void _log_sofdict(log_func_t sofdict_logf, bool atomic, const void *log_entry, extern int host_trace_level; char *get_trace_class(uint32_t trace_class); -#define _log_message(ignored_log_func, atomic, level, comp_class, ctx, id_1, id_2, format, ...) \ -do { \ - (void)ctx; \ - (void)id_1; \ - (void)id_2; \ - struct timeval tv; \ - char *msg = "(%s:%d) " format; \ - if (level >= host_trace_level) { \ - gettimeofday(&tv, NULL); \ - fprintf(stderr, "%ld.%6.6ld:", tv.tv_sec, tv.tv_usec); \ - fprintf(stderr, msg, strrchr(__FILE__, '/') + 1, \ - __LINE__, ##__VA_ARGS__); \ - fprintf(stderr, "\n"); \ - } \ -} while (0) #define trace_point(x) do {} while (0) @@ -248,40 +169,10 @@ do { \ #define trace_point(x) platform_trace_point(x) -/** _log_message is where the memory-saving dictionary magic described - * above happens: the "format" string argument is moved to a special - * linker section and replaced by a &log_entry pointer to it. This must - * be a macro for the source location to be meaningful. - */ -#define _log_message(log_func, atomic, lvl, comp_class, ctx, id_1, id_2, format, ...) \ -do { \ - _DECLARE_LOG_ENTRY(lvl, format, comp_class, \ - META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__)); \ - STATIC_ASSERT_ARG_SIZE(__VA_ARGS__); \ - STATIC_ASSERT(_TRACE_EVENT_MAX_ARGUMENT_COUNT >= \ - META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), \ - BASE_LOG_ASSERT_FAIL_MSG \ - ); \ - _log_sofdict(log_func, atomic, &log_entry, ctx, lvl, id_1, id_2, \ - META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), ##__VA_ARGS__); \ - _log_nodict(atomic, META_COUNT_VARAGS_BEFORE_COMPILE(__VA_ARGS__), \ - lvl, format, ##__VA_ARGS__); \ -} while (0) - #endif /* CONFIG_LIBRARY */ #else /* CONFIG_TRACE */ -#define trace_event_with_ids(class, ctx, id_1, id_2, format, ...) \ - trace_unused(class, ctx, id_1, id_2, format, ##__VA_ARGS__) -#define trace_event_atomic_with_ids(class, ctx, id_1, id_2, format, ...) \ - trace_unused(class, ctx, id_1, id_2, format, ##__VA_ARGS__) - -#define trace_warn_with_ids(class, ctx, id_1, id_2, format, ...) \ - trace_unused(class, ctx, id_1, id_2, format, ##__VA_ARGS__) -#define trace_warn_atomic_with_ids(class, ctx, id_1, id_2, format, ...) \ - trace_unused(class, ctx, id_1, id_2, format, ##__VA_ARGS__) - #define trace_point(x) do {} while (0) static inline void trace_flush_dma_to_mbox(void) { } @@ -293,50 +184,6 @@ static inline int trace_filter_update(const struct trace_filter *filter) #endif /* CONFIG_TRACE */ -#if CONFIG_TRACEV -/* Enable tr_dbg() statements by defining tracev_...() */ -#define tracev_event_with_ids(class, ctx, id_1, id_2, format, ...) \ - _trace_event_with_ids(LOG_LEVEL_VERBOSE, class, \ - ctx, id_1, id_2, \ - format, ##__VA_ARGS__) - -#define tracev_event_atomic_with_ids(class, ctx, id_1, id_2, format, ...) \ - _trace_event_atomic_with_ids(LOG_LEVEL_VERBOSE, class, \ - ctx, id_1, id_2, \ - format, ##__VA_ARGS__) - -#else /* CONFIG_TRACEV */ -#define tracev_event_with_ids(class, ctx, id_1, id_2, format, ...) \ - trace_unused(class, ctx, id_1, id_2, format, ##__VA_ARGS__) -#define tracev_event_atomic_with_ids(class, ctx, id_1, id_2, format, ...) \ - trace_unused(class, ctx, id_1, id_2, format, ##__VA_ARGS__) - -#endif /* CONFIG_TRACEV */ - -/* The _error_ level has 2, 1 or 0 backends depending on Kconfig */ -#if CONFIG_TRACEE -/* LOG_LEVEL_CRITICAL messages are duplicated to the mail box */ -#define _trace_error_with_ids(class, ctx, id_1, id_2, format, ...) \ - _log_message(trace_log_filtered, true, LOG_LEVEL_CRITICAL, class, ctx, id_1, \ - id_2, format, ##__VA_ARGS__) -#define trace_error_with_ids(class, ctx, id_1, id_2, format, ...) \ - _trace_error_with_ids(class, ctx, id_1, id_2, format, ##__VA_ARGS__) -#define trace_error_atomic_with_ids(...) trace_error_with_ids(__VA_ARGS__) - -#elif CONFIG_TRACE -/* Goes to trace_log_filtered() too but with a downgraded, LOG_INFO level */ -#define trace_error_with_ids(...) trace_event_with_ids(__VA_ARGS__) -#define trace_error_atomic_with_ids(...) \ - trace_event_atomic_with_ids(__VA_ARGS__) - -#else /* CONFIG_TRACEE, CONFIG_TRACE */ -#define trace_error_with_ids(class, ctx, id_1, id_2, format, ...) \ - trace_unused(class, ctx, id_1, id_2, format, ##__VA_ARGS__) -#define trace_error_atomic_with_ids(class, ctx, id_1, id_2, format, ...) \ - trace_unused(class, ctx, id_1, id_2, format, ##__VA_ARGS__) - -#endif /* CONFIG_TRACEE, CONFIG_TRACE */ - /** Default value when there is no specific pipeline, dev, dai, etc. */ #define _TRACE_INV_ID -1 @@ -371,40 +218,6 @@ struct tr_ctx { .level = default_log_level, \ } -/* tracing from device (component, pipeline, dai, ...) */ - -/** \brief Trace from a device on err level. - * - * @param get_ctx_m Macro that can retrieve trace context from dev - * @param get_id_m Macro that can retrieve device's id0 from the dev - * @param get_subid_m Macro that can retrieve device's id1 from the dev - * @param dev Device - * @param fmt Format followed by parameters - * @param ... Parameters - */ -#define trace_dev_err(get_ctx_m, get_id_m, get_subid_m, dev, fmt, ...) \ - trace_error_with_ids(_TRACE_INV_CLASS, get_ctx_m(dev), \ - get_id_m(dev), get_subid_m(dev), \ - fmt, ##__VA_ARGS__) - -/** \brief Trace from a device on warning level. */ -#define trace_dev_warn(get_ctx_m, get_id_m, get_subid_m, dev, fmt, ...) \ - trace_warn_with_ids(_TRACE_INV_CLASS, get_ctx_m(dev), \ - get_id_m(dev), get_subid_m(dev), \ - fmt, ##__VA_ARGS__) - -/** \brief Trace from a device on info level. */ -#define trace_dev_info(get_ctx_m, get_id_m, get_subid_m, dev, fmt, ...) \ - trace_event_with_ids(_TRACE_INV_CLASS, get_ctx_m(dev), \ - get_id_m(dev), get_subid_m(dev), \ - fmt, ##__VA_ARGS__) - -/** \brief Trace from a device on dbg level. */ -#define trace_dev_dbg(get_ctx_m, get_id_m, get_subid_m, dev, fmt, ...) \ - tracev_event_with_ids(_TRACE_INV_CLASS, \ - get_ctx_m(dev), get_id_m(dev), \ - get_subid_m(dev), fmt, ##__VA_ARGS__) - /* tracing from infrastructure part */ #define tr_err_atomic(ctx, fmt, ...) \ diff --git a/src/trace/trace.c b/src/trace/trace.c index 4c343690cd8d..ff48bd21e057 100644 --- a/src/trace/trace.c +++ b/src/trace/trace.c @@ -118,10 +118,11 @@ void mtrace_event(const char *data, uint32_t length) /** Report how many times an entry was suppressed and clear it. */ static void emit_suppressed_entry(struct recent_log_entry *entry) { - _log_message(trace_log_unfiltered, false, LOG_LEVEL_INFO, _TRACE_INV_CLASS, &dt_tr, - _TRACE_INV_ID, _TRACE_INV_ID, "Suppressed %u similar messages: %pQ", - entry->trigger_count - CONFIG_TRACE_BURST_COUNT, - (void *)entry->entry_id); + _log_message_nonzephyr(trace_log_unfiltered_nonzephyr, false, LOG_LEVEL_INFO, + _TRACE_INV_CLASS, _TRACE_INV_ID, _TRACE_INV_ID, + "Suppressed %u similar messages: %pQ", + entry->trigger_count - CONFIG_TRACE_BURST_COUNT, + (void *)entry->entry_id); memset(entry, 0, sizeof(*entry)); } @@ -222,27 +223,6 @@ static bool trace_filter_flood(uint32_t log_level, uint32_t entry, uint64_t mess * not. Serializes events into trace messages and passes them to * dtrace_event() */ -static void dma_trace_log(bool send_atomic, uint32_t log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vargs) -{ - uint32_t data[MESSAGE_SIZE_DWORDS(_TRACE_EVENT_MAX_ARGUMENT_COUNT)]; - const int message_size = MESSAGE_SIZE(arg_count); - int i; - - /* fill log content. arg_count is in the dictionary. */ - put_header(data, id_1, id_2, log_entry, sof_cycle_get_64_safe()); - - for (i = 0; i < arg_count; ++i) - data[PAYLOAD_OFFSET(i)] = va_arg(vargs, uint32_t); - - /* send event by */ - if (send_atomic) - dtrace_event_atomic((const char *)data, message_size); - else - dtrace_event((const char *)data, message_size); - -} - static void dma_trace_log_nonzephyr(bool send_atomic, uint32_t log_entry, uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vargs) @@ -264,41 +244,6 @@ static void dma_trace_log_nonzephyr(bool send_atomic, uint32_t log_entry, dtrace_event((const char *)data, message_size); } -void trace_log_unfiltered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vl) -{ - struct trace *trace = trace_get(); - - if (!trace->enable) { - return; - } - - dma_trace_log(send_atomic, (uint32_t)log_entry, ctx, lvl, id_1, id_2, arg_count, vl); -} - -void trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vl) -{ - struct trace *trace = trace_get(); - - if (!trace->enable) { - return; - } - -#if CONFIG_TRACE_FILTERING_ADAPTIVE - if (!trace->user_filter_override) { - const uint64_t current_ts = sof_cycle_get_64_safe(); - - emit_recent_entries(current_ts); - - if (!trace_filter_flood(lvl, (uint32_t)log_entry, current_ts)) - return; - } -#endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */ - - dma_trace_log(send_atomic, (uint32_t)log_entry, ctx, lvl, id_1, id_2, arg_count, vl); -} - void trace_log_unfiltered_nonzephyr(bool send_atomic, const void *log_entry, uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, va_list vl) @@ -589,25 +534,6 @@ void mtrace_dict_entry(bool atomic_context, uint32_t dict_entry_address, int n_a va_end(ap); } -void _log_sofdict(log_func_t sofdict_logf, bool atomic, const void *log_entry, - const struct tr_ctx *ctx, const uint32_t lvl, - uint32_t id_1, uint32_t id_2, int arg_count, ...) -{ - va_list ap; - -#ifndef __ZEPHYR__ /* for Zephyr see _log_nodict() in trace.h */ - if (lvl <= MTRACE_DUPLICATION_LEVEL) { - va_start(ap, arg_count); - mtrace_dict_entry_vl(atomic, (uint32_t)log_entry, arg_count, ap); - va_end(ap); - } -#endif - - va_start(ap, arg_count); - sofdict_logf(atomic, log_entry, ctx, lvl, id_1, id_2, arg_count, ap); - va_end(ap); -} - void _log_sofdict_nonzephyr(log_func_t_nonzephyr sofdict_logf, bool atomic, const void *log_entry, const uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, ...) diff --git a/test/cmocka/src/common_mocks.c b/test/cmocka/src/common_mocks.c index 92ba58031680..05de395c83d1 100644 --- a/test/cmocka/src/common_mocks.c +++ b/test/cmocka/src/common_mocks.c @@ -119,13 +119,12 @@ void WEAK __panic(uint32_t p, const char *filename, uint32_t linenum) } #if CONFIG_TRACE -void WEAK trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr_ctx *ctx, - uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, - va_list args) +void WEAK trace_log_filtered_nonzephyr(bool send_atomic, const void *log_entry, + uint32_t lvl, uint32_t id_1, uint32_t id_2, int arg_count, + va_list args) { (void) send_atomic; (void) log_entry; - (void) ctx; (void) lvl; (void) id_1; (void) id_2; @@ -133,9 +132,9 @@ void WEAK trace_log_filtered(bool send_atomic, const void *log_entry, const stru (void) args; } -void WEAK _log_sofdict(log_func_t sofdict_logf, bool atomic, const void *log_entry, - const struct tr_ctx *ctx, const uint32_t lvl, - uint32_t id_1, uint32_t id_2, int arg_count, ...) +void WEAK _log_sofdict_nonzephyr(log_func_t_nonzephyr sofdict_logf, bool atomic, + const void *log_entry, const uint32_t lvl, + uint32_t id_1, uint32_t id_2, int arg_count, ...) { } From b68e8adf2e2b48165830f616d4f5ef2ee9cf6b37 Mon Sep 17 00:00:00 2001 From: Baofeng Tian Date: Thu, 1 Feb 2024 20:48:25 +0800 Subject: [PATCH 11/13] Trace: remove atomic trace context dependency Remove context dependency from four atomic trace definition. Signed-off-by: Baofeng Tian --- posix/include/rtos/spinlock.h | 14 +++++++------- posix/include/rtos/wait.h | 2 +- src/include/sof/trace/trace.h | 8 ++++---- src/trace/dma-trace.c | 2 +- xtos/include/rtos/spinlock.h | 14 +++++++------- xtos/include/rtos/wait.h | 2 +- 6 files changed, 21 insertions(+), 21 deletions(-) diff --git a/posix/include/rtos/spinlock.h b/posix/include/rtos/spinlock.h index beb20ab6e847..545a7ef53947 100644 --- a/posix/include/rtos/spinlock.h +++ b/posix/include/rtos/spinlock.h @@ -88,9 +88,9 @@ extern struct tr_ctx sl_tr; break; /* lock acquired */ \ } \ if (__tries == 0) { \ - tr_err_atomic(&sl_tr, "DED"); \ - tr_err_atomic(&sl_tr, "line: %d", line); \ - tr_err_atomic(&sl_tr, "user: %d", (lock)->user); \ + tr_err_atomic("DED"); \ + tr_err_atomic("line: %d", line); \ + tr_err_atomic("user: %d", (lock)->user); \ panic(SOF_IPC_PANIC_DEADLOCK); /* lock not acquired */ \ } \ } while (0) @@ -102,11 +102,11 @@ extern struct tr_ctx sl_tr; int __i = 0; \ int __count = lock_dbg_atomic >= DBG_LOCK_USERS \ ? DBG_LOCK_USERS : lock_dbg_atomic; \ - tr_err_atomic(&sl_tr, "eal"); \ - tr_err_atomic(&sl_tr, "line: %d", line); \ - tr_err_atomic(&sl_tr, "dbg_atomic: %d", lock_dbg_atomic); \ + tr_err_atomic("eal"); \ + tr_err_atomic("line: %d", line); \ + tr_err_atomic("dbg_atomic: %d", lock_dbg_atomic); \ for (__i = 0; __i < __count; __i++) { \ - tr_err_atomic(&sl_tr, "value: %d", \ + tr_err_atomic("value: %d", \ (lock_dbg_atomic << 24) | \ lock_dbg_user[__i]); \ } \ diff --git a/posix/include/rtos/wait.h b/posix/include/rtos/wait.h index b6874888a763..6531cf4e5599 100644 --- a/posix/include/rtos/wait.h +++ b/posix/include/rtos/wait.h @@ -32,7 +32,7 @@ static inline void wait_for_interrupt(int level) tr_dbg(&wait_tr, "WFE"); #if CONFIG_DEBUG_LOCKS if (lock_dbg_atomic) - tr_err_atomic(&wait_tr, "atm"); + tr_err_atomic("atm"); #endif platform_wait_for_interrupt(level); tr_dbg(&wait_tr, "WFX"); diff --git a/src/include/sof/trace/trace.h b/src/include/sof/trace/trace.h index ca0f47f94308..0cc9bb340651 100644 --- a/src/include/sof/trace/trace.h +++ b/src/include/sof/trace/trace.h @@ -220,22 +220,22 @@ struct tr_ctx { /* tracing from infrastructure part */ -#define tr_err_atomic(ctx, fmt, ...) \ +#define tr_err_atomic(fmt, ...) \ trace_error_atomic_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, \ fmt, ##__VA_ARGS__) -#define tr_warn_atomic(ctx, fmt, ...) \ +#define tr_warn_atomic(fmt, ...) \ trace_warn_atomic_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, \ fmt, ##__VA_ARGS__) -#define tr_info_atomic(ctx, fmt, ...) \ +#define tr_info_atomic(fmt, ...) \ trace_event_atomic_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, \ fmt, ##__VA_ARGS__) -#define tr_dbg_atomic(ctx, fmt, ...) \ +#define tr_dbg_atomic(fmt, ...) \ tracev_event_atomic_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, \ fmt, ##__VA_ARGS__) diff --git a/src/trace/dma-trace.c b/src/trace/dma-trace.c index 4eb245a283aa..3155c9f72f1c 100644 --- a/src/trace/dma-trace.c +++ b/src/trace/dma-trace.c @@ -491,7 +491,7 @@ int dma_trace_enable(struct dma_trace_data *d) /* validate DMA context */ if (!d->dc.dmac || !d->dc.chan) { - tr_err_atomic(&dt_tr, "dma_trace_enable(): not valid"); + tr_err_atomic("dma_trace_enable(): not valid"); err = -ENODEV; goto out; } diff --git a/xtos/include/rtos/spinlock.h b/xtos/include/rtos/spinlock.h index 2c5c7d94fdd8..71faa503ccb1 100644 --- a/xtos/include/rtos/spinlock.h +++ b/xtos/include/rtos/spinlock.h @@ -88,9 +88,9 @@ extern struct tr_ctx sl_tr; break; /* lock acquired */ \ } \ if (__tries == 0) { \ - tr_err_atomic(&sl_tr, "DED"); \ - tr_err_atomic(&sl_tr, "line: %d", line); \ - tr_err_atomic(&sl_tr, "user: %d", (lock)->user); \ + tr_err_atomic("DED"); \ + tr_err_atomic("line: %d", line); \ + tr_err_atomic("user: %d", (lock)->user); \ panic(SOF_IPC_PANIC_DEADLOCK); /* lock not acquired */ \ } \ } while (0) @@ -102,11 +102,11 @@ extern struct tr_ctx sl_tr; int __i = 0; \ int __count = lock_dbg_atomic >= DBG_LOCK_USERS \ ? DBG_LOCK_USERS : lock_dbg_atomic; \ - tr_err_atomic(&sl_tr, "eal"); \ - tr_err_atomic(&sl_tr, "line: %d", line); \ - tr_err_atomic(&sl_tr, "dbg_atomic: %d", lock_dbg_atomic); \ + tr_err_atomic("eal"); \ + tr_err_atomic("line: %d", line); \ + tr_err_atomic("dbg_atomic: %d", lock_dbg_atomic); \ for (__i = 0; __i < __count; __i++) { \ - tr_err_atomic(&sl_tr, "value: %d", \ + tr_err_atomic("value: %d", \ (lock_dbg_atomic << 24) | \ lock_dbg_user[__i]); \ } \ diff --git a/xtos/include/rtos/wait.h b/xtos/include/rtos/wait.h index 03bbcadd506d..d222bd5ca7bd 100644 --- a/xtos/include/rtos/wait.h +++ b/xtos/include/rtos/wait.h @@ -32,7 +32,7 @@ static inline void wait_for_interrupt(int level) tr_dbg(&wait_tr, "WFE"); #if CONFIG_DEBUG_LOCKS if (lock_dbg_atomic) - tr_err_atomic(&wait_tr, "atm"); + tr_err_atomic("atm"); #endif platform_wait_for_interrupt(level); tr_dbg(&wait_tr, "WFX"); From 427c2b5b9f3b590b2444c68f6babdea6b362ce7c Mon Sep 17 00:00:00 2001 From: Baofeng Tian Date: Thu, 1 Feb 2024 21:32:34 +0800 Subject: [PATCH 12/13] Trace: remove trace context dependency Remove it and change source file accordingly. Signed-off-by: Baofeng Tian --- posix/include/rtos/spinlock.h | 10 +- posix/include/rtos/wait.h | 4 +- posix/include/sof/lib/perf_cnt.h | 4 +- src/audio/base_fw.c | 8 +- src/audio/buffer.c | 8 +- src/audio/chain_dma.c | 26 ++- src/audio/channel_map.c | 2 +- src/audio/dp_queue.c | 4 +- src/drivers/amd/common/acp_dma.c | 20 +-- src/drivers/amd/common/acp_dmic_dma.c | 18 +- src/drivers/amd/common/acp_sp_dma.c | 10 +- src/drivers/amd/rembrandt/acp_bt_dma.c | 22 +-- src/drivers/amd/rembrandt/acp_dmic_dma.c | 10 +- src/drivers/amd/rembrandt/acp_hs_dma.c | 22 +-- src/drivers/amd/rembrandt/acp_sp_dma.c | 12 +- src/drivers/amd/rembrandt/acp_sw_audio_dma.c | 22 +-- src/drivers/amd/rembrandt/interrupt.c | 14 +- src/drivers/amd/rembrandt/ipc.c | 2 +- src/drivers/amd/renoir/acp_bt_dma.c | 22 +-- src/drivers/amd/renoir/acp_dmic_dma.c | 13 +- src/drivers/amd/renoir/acp_sp_dma.c | 12 +- src/drivers/amd/renoir/interrupt.c | 10 +- src/drivers/amd/renoir/ipc.c | 4 +- src/drivers/amd/vangogh/acp_bt_dma.c | 22 +-- src/drivers/amd/vangogh/acp_dmic_dma.c | 10 +- src/drivers/amd/vangogh/acp_hs_dma.c | 24 +-- src/drivers/amd/vangogh/acp_sp_dma.c | 12 +- src/drivers/amd/vangogh/interrupt.c | 12 +- src/drivers/amd/vangogh/ipc.c | 2 +- src/drivers/dw/dma.c | 62 +++---- src/drivers/dw/ssi-spi.c | 6 +- src/drivers/generic/dummy-dma.c | 16 +- src/drivers/imx/edma.c | 34 ++-- src/drivers/imx/interrupt-irqsteer.c | 4 +- src/drivers/imx/ipc.c | 12 +- src/drivers/imx/sdma.c | 88 ++++----- src/drivers/interrupt.c | 8 +- src/drivers/mediatek/afe/afe-drv.c | 26 +-- src/drivers/mediatek/afe/afe-memif.c | 46 ++--- src/drivers/mediatek/afe/mt8186/afe-sgen.c | 10 +- src/drivers/mediatek/afe/mt8188/afe-sgen.c | 10 +- src/drivers/mediatek/afe/mt8195/afe-sgen.c | 10 +- src/drivers/mediatek/mt818x/interrupt.c | 8 +- src/drivers/mediatek/mt818x/ipc.c | 10 +- src/drivers/mediatek/mt8195/interrupt.c | 12 +- src/drivers/mediatek/mt8195/ipc.c | 2 +- src/idc/idc.c | 12 +- src/idc/zephyr_idc.c | 2 +- src/include/sof/audio/buffer.h | 3 - src/include/sof/audio/component.h | 3 - src/include/sof/audio/pipeline-trace.h | 9 +- src/include/sof/ipc/common.h | 2 +- src/include/sof/lib/dai-legacy.h | 1 - src/include/sof/trace/trace.h | 16 +- src/ipc/dma-copy.c | 8 +- src/ipc/ipc-common.c | 4 +- src/ipc/ipc-helper.c | 22 +-- src/ipc/ipc-zephyr.c | 9 +- src/ipc/ipc3/dai.c | 4 +- src/ipc/ipc3/handler.c | 178 +++++++++---------- src/ipc/ipc3/helper.c | 51 +++--- src/ipc/ipc3/host-page-table.c | 16 +- src/ipc/ipc4/handler.c | 128 +++++++------ src/ipc/ipc4/helper.c | 55 +++--- src/lib/agent.c | 18 +- src/lib/alloc.c | 47 +++-- src/lib/ams.c | 12 +- src/lib/clk.c | 2 +- src/lib/dai.c | 18 +- src/lib/dma.c | 36 ++-- src/lib/notifier.c | 2 +- src/lib/pm_runtime.c | 14 +- src/lib/wait.c | 2 +- src/library_manager/lib_manager.c | 63 +++---- src/library_manager/lib_notification.c | 2 +- src/library_manager/llext_manager.c | 18 +- src/math/power.c | 2 +- src/platform/amd/acp_6_3/lib/clk.c | 15 +- src/platform/intel/ace/lib/watchdog.c | 4 +- src/platform/library/schedule/edf_schedule.c | 2 +- src/platform/library/schedule/ll_schedule.c | 2 +- src/platform/mt8186/lib/clk.c | 14 +- src/platform/mt8188/lib/clk.c | 12 +- src/platform/mt8195/lib/clk.c | 14 +- src/probe/probe.c | 120 ++++++------- src/schedule/dma_multi_chan_domain.c | 10 +- src/schedule/dma_single_chan_domain.c | 26 ++- src/schedule/edf_schedule.c | 18 +- src/schedule/ll_schedule.c | 46 +++-- src/schedule/schedule.c | 2 +- src/schedule/timer_domain.c | 14 +- src/schedule/zephyr_dma_domain.c | 16 +- src/schedule/zephyr_domain.c | 14 +- src/schedule/zephyr_dp_schedule.c | 12 +- src/schedule/zephyr_ll.c | 22 ++- src/trace/dma-trace.c | 9 +- src/trace/trace.c | 10 +- xtos/include/rtos/spinlock.h | 10 +- xtos/include/rtos/wait.h | 4 +- xtos/include/sof/lib/perf_cnt.h | 4 +- zephyr/include/rtos/interrupt.h | 3 +- zephyr/lib/alloc.c | 18 +- zephyr/lib/cpu.c | 12 +- zephyr/lib/pm_runtime.c | 6 +- zephyr/wrapper.c | 3 +- 105 files changed, 931 insertions(+), 995 deletions(-) diff --git a/posix/include/rtos/spinlock.h b/posix/include/rtos/spinlock.h index 545a7ef53947..4090f8778efd 100644 --- a/posix/include/rtos/spinlock.h +++ b/posix/include/rtos/spinlock.h @@ -77,8 +77,6 @@ typedef uint32_t k_spinlock_key_t; extern uint32_t lock_dbg_atomic; extern uint32_t lock_dbg_user[DBG_LOCK_USERS]; -extern struct tr_ctx sl_tr; - /* panic on deadlock */ #define spin_try_lock_dbg(lock, line) \ do { \ @@ -115,14 +113,14 @@ extern struct tr_ctx sl_tr; #define spin_lock_dbg(line) \ do { \ - tr_info(&sl_tr, "LcE"); \ - tr_info(&sl_tr, "line: %d", line); \ + tr_info("LcE"); \ + tr_info("line: %d", line); \ } while (0) #define spin_unlock_dbg(line) \ do { \ - tr_info(&sl_tr, "LcX"); \ - tr_info(&sl_tr, "line: %d", line); \ + tr_info("LcX"); \ + tr_info("line: %d", line); \ } while (0) #else /* CONFIG_DEBUG_LOCKS_VERBOSE */ diff --git a/posix/include/rtos/wait.h b/posix/include/rtos/wait.h index 6531cf4e5599..99a50e899abd 100644 --- a/posix/include/rtos/wait.h +++ b/posix/include/rtos/wait.h @@ -29,13 +29,13 @@ static inline void wait_for_interrupt(int level) { LOG_MODULE_DECLARE(wait, CONFIG_SOF_LOG_LEVEL); - tr_dbg(&wait_tr, "WFE"); + tr_dbg("WFE"); #if CONFIG_DEBUG_LOCKS if (lock_dbg_atomic) tr_err_atomic("atm"); #endif platform_wait_for_interrupt(level); - tr_dbg(&wait_tr, "WFX"); + tr_dbg("WFX"); } /** diff --git a/posix/include/sof/lib/perf_cnt.h b/posix/include/sof/lib/perf_cnt.h index bddda9c8e06b..d0342053a578 100644 --- a/posix/include/sof/lib/perf_cnt.h +++ b/posix/include/sof/lib/perf_cnt.h @@ -30,7 +30,7 @@ struct perf_cnt_data { #if CONFIG_PERFORMANCE_COUNTERS #define perf_cnt_trace(ctx, pcd) \ - tr_info(ctx, "perf plat last %u peak %u cpu last %u, peak %u", \ + tr_info("perf plat last %u peak %u cpu last %u, peak %u", \ (uint32_t)((pcd)->plat_delta_last), \ (uint32_t)((pcd)->plat_delta_peak), \ (uint32_t)((pcd)->cpu_delta_last), \ @@ -78,7 +78,7 @@ struct perf_cnt_data { /* perf measurement windows size 2^x */ #define PERF_CNT_CHECK_WINDOW_SIZE 10 #define task_perf_avg_info(pcd, task_p, class) \ - tr_info(task_p, "perf_cycle task %p, %pU cpu avg %u peak %u",\ + tr_info("perf_cycle task %p, %pU cpu avg %u peak %u",\ class, (class)->uid, \ (uint32_t)((pcd)->cpu_delta_sum), \ (uint32_t)((pcd)->cpu_delta_peak)) diff --git a/src/audio/base_fw.c b/src/audio/base_fw.c index cd9b3b00dece..55bfcf0d87bb 100644 --- a/src/audio/base_fw.c +++ b/src/audio/base_fw.c @@ -425,10 +425,10 @@ static int fw_config_set_force_l1_exit(const struct sof_tlv *tlv) const uint32_t force = tlv->value[0]; if (force) { - tr_info(&basefw_comp_tr, "FW config set force dmi l0 state"); + tr_info("FW config set force dmi l0 state"); intel_adsp_force_dmi_l0_state(); } else { - tr_info(&basefw_comp_tr, "FW config set allow dmi l1 state"); + tr_info("FW config set allow dmi l1 state"); intel_adsp_allow_dmi_l1_state(); } @@ -451,7 +451,7 @@ static int basefw_set_fw_config(bool first_block, default: break; } - tr_warn(&basefw_comp_tr, "returning success for Set FW_CONFIG without handling it"); + tr_warn("returning success for Set FW_CONFIG without handling it"); return 0; } @@ -485,7 +485,7 @@ static int basefw_get_large_config(struct comp_dev *dev, case IPC4_EXTENDED_SYSTEM_TIME: ret = basefw_get_ext_system_time(data_offset, data); if (ret == IPC4_UNAVAILABLE) { - tr_warn(&basefw_comp_tr, "returning success for get host EXTENDED_SYSTEM_TIME without handling it"); + tr_warn("returning success for get host EXTENDED_SYSTEM_TIME without handling it"); return 0; } else { return ret; diff --git a/src/audio/buffer.c b/src/audio/buffer.c index 02f6c9da0777..21eb823516cf 100644 --- a/src/audio/buffer.c +++ b/src/audio/buffer.c @@ -33,11 +33,11 @@ struct comp_buffer *buffer_alloc(uint32_t size, uint32_t caps, uint32_t flags, u struct comp_buffer *buffer; void *stream_addr; - tr_dbg(&buffer_tr, "buffer_alloc()"); + tr_dbg("buffer_alloc()"); /* validate request */ if (size == 0) { - tr_err(&buffer_tr, "buffer_alloc(): new size = %u is invalid", + tr_err("buffer_alloc(): new size = %u is invalid", size); return NULL; } @@ -48,7 +48,7 @@ struct comp_buffer *buffer_alloc(uint32_t size, uint32_t caps, uint32_t flags, u buffer = rzalloc(zone, 0, SOF_MEM_CAPS_RAM, sizeof(*buffer)); if (!buffer) { - tr_err(&buffer_tr, "buffer_alloc(): could not alloc structure"); + tr_err("buffer_alloc(): could not alloc structure"); return NULL; } @@ -58,7 +58,7 @@ struct comp_buffer *buffer_alloc(uint32_t size, uint32_t caps, uint32_t flags, u stream_addr = rballoc_align(0, caps, size, align); if (!stream_addr) { rfree(buffer); - tr_err(&buffer_tr, "buffer_alloc(): could not alloc size = %u bytes of type = %u", + tr_err("buffer_alloc(): could not alloc size = %u bytes of type = %u", size, caps); return NULL; } diff --git a/src/audio/chain_dma.c b/src/audio/chain_dma.c index 696973146404..61902e59452c 100644 --- a/src/audio/chain_dma.c +++ b/src/audio/chain_dma.c @@ -147,14 +147,14 @@ static void handle_xrun(struct chain_dma_data *cd) if (cd->link_connector_node_id.f.dma_type == ipc4_hda_link_output_class && !cd->xrun_notification_sent) { - tr_warn(&chain_dma_tr, "handle_xrun(): underrun detected"); + tr_warn("handle_xrun(): underrun detected"); xrun_notif_msg_init(cd->msg_xrun, cd->link_connector_node_id.dw, SOF_IPC4_GATEWAY_UNDERRUN_DETECTED); ipc_msg_send(cd->msg_xrun, NULL, true); cd->xrun_notification_sent = true; } else if (cd->link_connector_node_id.f.dma_type == ipc4_hda_link_input_class && !cd->xrun_notification_sent) { - tr_warn(&chain_dma_tr, "handle_xrun(): overrun detected"); + tr_warn("handle_xrun(): overrun detected"); xrun_notif_msg_init(cd->msg_xrun, cd->link_connector_node_id.dw, SOF_IPC4_GATEWAY_OVERRUN_DETECTED); ipc_msg_send(cd->msg_xrun, NULL, true); @@ -185,14 +185,14 @@ static enum task_state chain_task_run(void *data) case 0: break; case -EPIPE: - tr_warn(&chain_dma_tr, "chain_task_run(): dma_get_status() link xrun occurred," + tr_warn("chain_task_run(): dma_get_status() link xrun occurred," " ret = %u", ret); #if CONFIG_XRUN_NOTIFICATIONS_ENABLE handle_xrun(cd); #endif break; default: - tr_err(&chain_dma_tr, "chain_task_run(): dma_get_status() error, ret = %u", ret); + tr_err("chain_task_run(): dma_get_status() error, ret = %u", ret); return SOF_TASK_STATE_COMPLETED; } @@ -203,7 +203,7 @@ static enum task_state chain_task_run(void *data) /* Host DMA does not report xruns. All error values will be treated as critical. */ ret = dma_get_status(cd->chan_host->dma->z_dev, cd->chan_host->index, &stat); if (ret < 0) { - tr_err(&chain_dma_tr, "chain_task_run(): dma_get_status() error, ret = %u", ret); + tr_err("chain_task_run(): dma_get_status() error, ret = %u", ret); return SOF_TASK_STATE_COMPLETED; } @@ -221,15 +221,13 @@ static enum task_state chain_task_run(void *data) ret = dma_reload(cd->chan_host->dma->z_dev, cd->chan_host->index, 0, 0, increment); if (ret < 0) { - tr_err(&chain_dma_tr, - "chain_task_run(): dma_reload() host error, ret = %u", ret); + tr_err("chain_task_run(): dma_reload() host error, ret = %u", ret); return SOF_TASK_STATE_COMPLETED; } ret = dma_reload(cd->chan_link->dma->z_dev, cd->chan_link->index, 0, 0, increment); if (ret < 0) { - tr_err(&chain_dma_tr, - "chain_task_run(): dma_reload() link error, ret = %u", ret); + tr_err("chain_task_run(): dma_reload() link error, ret = %u", ret); return SOF_TASK_STATE_COMPLETED; } } else { @@ -246,9 +244,8 @@ static enum task_state chain_task_run(void *data) cd->chan_link->index, 0, 0, half_buff_size); if (ret < 0) { - tr_err(&chain_dma_tr, - "chain_task_run(): dma_reload() link error, ret = %u", - ret); + tr_err("chain_task_run(): dma_reload() link error, ret = %u", + ret); return SOF_TASK_STATE_COMPLETED; } cd->first_data_received = true; @@ -262,8 +259,7 @@ static enum task_state chain_task_run(void *data) ret = dma_reload(cd->chan_host->dma->z_dev, cd->chan_host->index, 0, 0, transferred); if (ret < 0) { - tr_err(&chain_dma_tr, - "chain_task_run(): dma_reload() host error, ret = %u", ret); + tr_err("chain_task_run(): dma_reload() host error, ret = %u", ret); return SOF_TASK_STATE_COMPLETED; } @@ -272,7 +268,7 @@ static enum task_state chain_task_run(void *data) ret = dma_reload(cd->chan_link->dma->z_dev, cd->chan_link->index, 0, 0, half_buff_size); if (ret < 0) { - tr_err(&chain_dma_tr, "chain_task_run(): dma_reload() " + tr_err("chain_task_run(): dma_reload() " "link error, ret = %u", ret); return SOF_TASK_STATE_COMPLETED; } diff --git a/src/audio/channel_map.c b/src/audio/channel_map.c index 4c116d55cfa9..f9d5b3bf2d9d 100644 --- a/src/audio/channel_map.c +++ b/src/audio/channel_map.c @@ -30,7 +30,7 @@ struct sof_ipc_channel_map *chmap_get(struct sof_ipc_stream_map *smap, uint32_t byte = 0; if (index >= smap->num_ch_map) { - tr_err(&chmap_tr, "chmap_get(): index %d out of bounds %d", + tr_err("chmap_get(): index %d out of bounds %d", index, smap->num_ch_map); return NULL; diff --git a/src/audio/dp_queue.c b/src/audio/dp_queue.c index a201dde82130..23e632d71616 100644 --- a/src/audio/dp_queue.c +++ b/src/audio/dp_queue.c @@ -288,14 +288,14 @@ struct dp_queue *dp_queue_create(size_t min_available, size_t min_free_space, ui goto err; dp_queue->audio_stream_params.id = id; - tr_info(&dp_queue_tr, "DpQueue created, id: %u shared: %u min_available: %u min_free_space %u, size %u", + tr_info("DpQueue created, id: %u shared: %u min_available: %u min_free_space %u, size %u", id, dp_queue_is_shared(dp_queue), min_available, min_free_space, dp_queue->data_buffer_size); /* return a pointer to allocated structure */ return dp_queue; err: - tr_err(&dp_queue_tr, "DpQueue creation failure"); + tr_err("DpQueue creation failure"); rfree(dp_queue); return NULL; } diff --git a/src/drivers/amd/common/acp_dma.c b/src/drivers/amd/common/acp_dma.c index f85b7b3ad684..7bb09da952d0 100644 --- a/src/drivers/amd/common/acp_dma.c +++ b/src/drivers/amd/common/acp_dma.c @@ -60,13 +60,13 @@ static struct dma_chan_data *acp_dma_channel_get(struct dma *dma, key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { k_spin_unlock(&dma->lock, key); - tr_err(&acpdma_tr, "DMA: Channel %d not in range", req_chan); + tr_err("DMA: Channel %d not in range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { k_spin_unlock(&dma->lock, key); - tr_err(&acpdma_tr, "DMA: channel already in use %d", req_chan); + tr_err("DMA: channel already in use %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); @@ -199,14 +199,14 @@ static int acp_dma_start(struct dma_chan_data *channel) return 0; } while (platform_timer_get(timer) <= deadline); - tr_err(&acpdma_tr, "acp-dma: timed out for dma start"); + tr_err("acp-dma: timed out for dma start"); return -ETIME; } static int acp_dma_release(struct dma_chan_data *channel) { - tr_info(&acpdma_tr, "DMA: release(%d)", channel->index); + tr_info("DMA: release(%d)", channel->index); if (channel->status != COMP_STATE_PAUSED) return -EINVAL; channel->status = COMP_STATE_ACTIVE; @@ -215,7 +215,7 @@ static int acp_dma_release(struct dma_chan_data *channel) static int acp_dma_pause(struct dma_chan_data *channel) { - tr_info(&acpdma_tr, "h/w pause is not supported, changing the status of(%d) channel", + tr_info("h/w pause is not supported, changing the status of(%d) channel", channel->index); if (channel->status != COMP_STATE_ACTIVE) return -EINVAL; @@ -285,14 +285,14 @@ static int acp_dma_probe(struct dma *dma) int channel; if (dma->chan) { - tr_err(&acpdma_tr, "DMA: Already probe"); + tr_err("DMA: Already probe"); return -EEXIST; } dma->chan = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM, dma->plat_data.channels * sizeof(struct dma_chan_data)); if (!dma->chan) { - tr_err(&acpdma_tr, "DMA: unable to allocate channel context"); + tr_err("DMA: unable to allocate channel context"); return -ENOMEM; } for (channel = 0; channel < dma->plat_data.channels; channel++) { @@ -304,7 +304,7 @@ static int acp_dma_probe(struct dma *dma) sizeof(struct acp_dma_chan_data)); if (!acp_dma_chan) { rfree(dma->chan); - tr_err(&acpdma_tr, "acp-dma: %d channel %d private data alloc failed", + tr_err("acp-dma: %d channel %d private data alloc failed", dma->plat_data.id, channel); return -ENOMEM; } @@ -318,7 +318,7 @@ static int acp_dma_remove(struct dma *dma) int channel; if (!dma->chan) { - tr_err(&acpdma_tr, "DMA: Invalid remove call"); + tr_err("DMA: Invalid remove call"); return 0; } for (channel = 0; channel < dma->plat_data.channels; channel++) @@ -379,7 +379,7 @@ static int acp_dma_get_data_size(struct dma_chan_data *channel, *free = ABS(data_size) / 2; break; default: - tr_err(&acpdma_tr, "dma_get_data_size() Invalid direction %d", + tr_err("dma_get_data_size() Invalid direction %d", channel->direction); return -EINVAL; } diff --git a/src/drivers/amd/common/acp_dmic_dma.c b/src/drivers/amd/common/acp_dmic_dma.c index 449456c911eb..8cb1f9e5085f 100644 --- a/src/drivers/amd/common/acp_dmic_dma.c +++ b/src/drivers/amd/common/acp_dmic_dma.c @@ -51,14 +51,14 @@ static struct dma_chan_data *acp_dmic_dma_channel_get(struct dma *dma, key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_dmic_dma_tr, "Channel %d out of range", + tr_err("Channel %d out of range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_dmic_dma_tr, "Cannot reuse channel %d", + tr_err("Cannot reuse channel %d", req_chan); return NULL; } @@ -82,14 +82,14 @@ static void acp_dmic_dma_channel_put(struct dma_chan_data *channel) static int acp_dmic_dma_release(struct dma_chan_data *channel) { /* nothing to do on rembrandt */ - tr_dbg(&acp_dmic_dma_tr, "dmic dma release()"); + tr_dbg("dmic dma release()"); return 0; } static int acp_dmic_dma_pause(struct dma_chan_data *channel) { /* nothing to do on rembrandt */ - tr_dbg(&acp_dmic_dma_tr, "dmic dma pause()"); + tr_dbg("dmic dma pause()"); return 0; } @@ -151,14 +151,14 @@ static int acp_dmic_dma_probe(struct dma *dma) int channel; if (dma->chan) { - tr_err(&acp_dmic_dma_tr, "Repeated probe"); + tr_err("Repeated probe"); return -EEXIST; } dma->chan = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, dma->plat_data.channels * sizeof(struct dma_chan_data)); if (!dma->chan) { - tr_err(&acp_dmic_dma_tr, "unable to allocate channel descriptors"); + tr_err("unable to allocate channel descriptors"); return -ENOMEM; } for (channel = 0; channel < dma->plat_data.channels; channel++) { @@ -173,7 +173,7 @@ static int acp_dmic_dma_probe(struct dma *dma) static int acp_dmic_dma_remove(struct dma *dma) { if (!dma->chan) { - tr_err(&acp_dmic_dma_tr, "remove called without probe"); + tr_err("remove called without probe"); return 0; } rfree(dma->chan); @@ -188,11 +188,11 @@ static int acp_dmic_dma_get_data_size(struct dma_chan_data *channel, *avail = dmic_rngbuff_size >> 1; *free = dmic_rngbuff_size >> 1; } else { - tr_err(&acp_dmic_dma_tr, "Channel direction Not defined %d", + tr_err("Channel direction Not defined %d", channel->direction); } - tr_info(&acp_dmic_dma_tr, "avail %d and free %d", + tr_info("avail %d and free %d", avail[0], free[0]); return 0; } diff --git a/src/drivers/amd/common/acp_sp_dma.c b/src/drivers/amd/common/acp_sp_dma.c index 04f3e3c1a7fa..6b871c5b8c50 100644 --- a/src/drivers/amd/common/acp_sp_dma.c +++ b/src/drivers/amd/common/acp_sp_dma.c @@ -48,13 +48,13 @@ static struct dma_chan_data *acp_dai_sp_dma_channel_get(struct dma *dma, key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_sp_tr, "Channel %d not in range", req_chan); + tr_err("Channel %d not in range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_sp_tr, "channel already in use %d", req_chan); + tr_err("channel already in use %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); @@ -112,14 +112,14 @@ static int acp_dai_sp_dma_probe(struct dma *dma) int channel; if (dma->chan) { - tr_err(&acp_sp_tr, "Repeated probe"); + tr_err("Repeated probe"); return -EEXIST; } dma->chan = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, dma->plat_data.channels * sizeof(struct dma_chan_data)); if (!dma->chan) { - tr_err(&acp_sp_tr, "Probe failure,unable to allocate channel descriptors"); + tr_err("Probe failure,unable to allocate channel descriptors"); return -ENOMEM; } for (channel = 0; channel < dma->plat_data.channels; channel++) { @@ -134,7 +134,7 @@ static int acp_dai_sp_dma_probe(struct dma *dma) static int acp_dai_sp_dma_remove(struct dma *dma) { if (!dma->chan) { - tr_err(&acp_sp_tr, "remove called without probe,it's a no-op"); + tr_err("remove called without probe,it's a no-op"); return 0; } diff --git a/src/drivers/amd/rembrandt/acp_bt_dma.c b/src/drivers/amd/rembrandt/acp_bt_dma.c index 93c9dc5b0427..38090c318806 100644 --- a/src/drivers/amd/rembrandt/acp_bt_dma.c +++ b/src/drivers/amd/rembrandt/acp_bt_dma.c @@ -61,13 +61,13 @@ static struct dma_chan_data *acp_dai_bt_dma_channel_get(struct dma *dma, key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_bt_dma_tr, "Channel %d not in range", req_chan); + tr_err("Channel %d not in range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_bt_dma_tr, "channel already in use %d", req_chan); + tr_err("channel already in use %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); @@ -131,7 +131,7 @@ static int acp_dai_bt_dma_start(struct dma_chan_data *channel) bt_tdm_irer.bits.bttdm_rx_samplen = 2; io_reg_write((PU_REGISTER_BASE + ACP_BTTDM_IRER), bt_tdm_irer.u32all); } else { - tr_err(&acp_bt_dma_tr, "Start direction not defined %d", channel->direction); + tr_err("Start direction not defined %d", channel->direction); return -EINVAL; } return 0; @@ -176,7 +176,7 @@ static int acp_dai_bt_dma_stop(struct dma_chan_data *channel) bt_tdm_irer.bits.bttdm_rx_en = 0; io_reg_write(PU_REGISTER_BASE + ACP_BTTDM_IRER, bt_tdm_irer.u32all); } else { - tr_err(&acp_bt_dma_tr, "direction not defined %d", channel->direction); + tr_err("direction not defined %d", channel->direction); return -EINVAL; } @@ -216,11 +216,11 @@ static int acp_dai_bt_dma_set_config(struct dma_chan_data *channel, uint32_t bt_fifo_addr; if (!config->cyclic) { - tr_err(&acp_bt_dma_tr, "cyclic configurations only supported"); + tr_err("cyclic configurations only supported"); return -EINVAL; } if (config->scatter) { - tr_err(&acp_bt_dma_tr, "scatter enabled, that is not supported for now"); + tr_err("scatter enabled, that is not supported for now"); return -EINVAL; } @@ -272,7 +272,7 @@ static int acp_dai_bt_dma_set_config(struct dma_chan_data *channel, (bt_buff_size >> 1)); break; default: - tr_err(&acp_bt_dma_tr, "unsupported config direction"); + tr_err("unsupported config direction"); return -EINVAL; } @@ -296,14 +296,14 @@ static int acp_dai_bt_dma_probe(struct dma *dma) int channel; if (dma->chan) { - tr_err(&acp_bt_dma_tr, "Repeated probe"); + tr_err("Repeated probe"); return -EEXIST; } dma->chan = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, dma->plat_data.channels * sizeof(struct dma_chan_data)); if (!dma->chan) { - tr_err(&acp_bt_dma_tr, "Probe failure, unable to allocate channel descriptors"); + tr_err("Probe failure, unable to allocate channel descriptors"); return -ENOMEM; } for (channel = 0; channel < dma->plat_data.channels; channel++) { @@ -318,7 +318,7 @@ static int acp_dai_bt_dma_probe(struct dma *dma) static int acp_dai_bt_dma_remove(struct dma *dma) { if (!dma->chan) { - tr_err(&acp_bt_dma_tr, "remove called without probe, it's a no-op"); + tr_err("remove called without probe, it's a no-op"); return 0; } @@ -365,7 +365,7 @@ static int acp_dai_bt_dma_get_data_size(struct dma_chan_data *channel, *avail = bt_buff_size >> 1; #endif } else { - tr_err(&acp_bt_dma_tr, "Channel direction Not defined %d", + tr_err("Channel direction Not defined %d", channel->direction); return -EINVAL; } diff --git a/src/drivers/amd/rembrandt/acp_dmic_dma.c b/src/drivers/amd/rembrandt/acp_dmic_dma.c index 5f51305897d7..2b13242d0bc6 100644 --- a/src/drivers/amd/rembrandt/acp_dmic_dma.c +++ b/src/drivers/amd/rembrandt/acp_dmic_dma.c @@ -88,7 +88,7 @@ int acp_dmic_dma_start(struct dma_chan_data *channel) /* safe check in case we've got preempted after read */ if ((uint32_t)pdm_dma_enable.bits.pdm_dma_en_status) return 0; - tr_err(&acp_dmic_dma_rmb_tr, "timed out for dma start"); + tr_err("timed out for dma start"); return -ETIME; } pdm_dma_enable = (acp_wov_pdm_dma_enable_t) @@ -129,7 +129,7 @@ int acp_dmic_dma_stop(struct dma_chan_data *channel) /* safe check in case we've got preempted after read */ if ((uint32_t)pdm_dma_enable.bits.pdm_dma_en_status) return 0; - tr_err(&acp_dmic_dma_rmb_tr, "timed out for dma stop"); + tr_err("timed out for dma stop"); return -ETIME; } pdm_dma_enable = (acp_wov_pdm_dma_enable_t) @@ -191,15 +191,15 @@ int acp_dmic_dma_set_config(struct dma_chan_data *channel, break; default: - tr_err(&acp_dmic_dma_rmb_tr, "unsupported config direction"); + tr_err("unsupported config direction"); return -EINVAL; } if (!config->cyclic) { - tr_err(&acp_dmic_dma_rmb_tr, "cyclic configurations only supported!"); + tr_err("cyclic configurations only supported!"); return -EINVAL; } if (config->scatter) { - tr_err(&acp_dmic_dma_rmb_tr, "scatter enabled, not supported for now!"); + tr_err("scatter enabled, not supported for now!"); return -EINVAL; } return 0; diff --git a/src/drivers/amd/rembrandt/acp_hs_dma.c b/src/drivers/amd/rembrandt/acp_hs_dma.c index cf2bbe3d2689..114101ef7c77 100644 --- a/src/drivers/amd/rembrandt/acp_hs_dma.c +++ b/src/drivers/amd/rembrandt/acp_hs_dma.c @@ -56,13 +56,13 @@ static struct dma_chan_data *acp_dai_hs_dma_channel_get(struct dma *dma, key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_hs_tr, "Channel %d not in range", req_chan); + tr_err("Channel %d not in range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_hs_tr, "channel already in use %d", req_chan); + tr_err("channel already in use %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); @@ -126,7 +126,7 @@ static int acp_dai_hs_dma_start(struct dma_chan_data *channel) hs_irer.bits.hstdm_rx_samplen = 2; io_reg_write((PU_REGISTER_BASE + ACP_HSTDM_IRER), hs_irer.u32all); } else { - tr_err(&acp_hs_tr, "Start direction not defined %d", channel->direction); + tr_err("Start direction not defined %d", channel->direction); return -EINVAL; } @@ -173,7 +173,7 @@ static int acp_dai_hs_dma_stop(struct dma_chan_data *channel) hs_irer.bits.hstdm_rx_en = 0; io_reg_write((PU_REGISTER_BASE + ACP_HSTDM_IRER), hs_irer.u32all); } else { - tr_err(&acp_hs_tr, "Stop direction not defined %d", channel->direction); + tr_err("Stop direction not defined %d", channel->direction); return -EINVAL; } hs_iter = (acp_hstdm_iter_t)io_reg_read((PU_REGISTER_BASE + ACP_HSTDM_ITER)); @@ -211,11 +211,11 @@ static int acp_dai_hs_dma_set_config(struct dma_chan_data *channel, uint32_t hs_fifo_addr; if (!config->cyclic) { - tr_err(&acp_hs_tr, "cyclic configurations only supported!"); + tr_err("cyclic configurations only supported!"); return -EINVAL; } if (config->scatter) { - tr_err(&acp_hs_tr, "scatter enabled, that is not supported for now!"); + tr_err("scatter enabled, that is not supported for now!"); return -EINVAL; } @@ -269,7 +269,7 @@ static int acp_dai_hs_dma_set_config(struct dma_chan_data *channel, (hs_buff_size_capture >> 1)); } else { - tr_err(&acp_hs_tr, "Config channel direction undefined %d", channel->direction); + tr_err("Config channel direction undefined %d", channel->direction); return -EINVAL; } @@ -293,14 +293,14 @@ static int acp_dai_hs_dma_probe(struct dma *dma) int channel; if (dma->chan) { - tr_err(&acp_hs_tr, "Repeated probe"); + tr_err("Repeated probe"); return -EEXIST; } dma->chan = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, dma->plat_data.channels * sizeof(struct dma_chan_data)); if (!dma->chan) { - tr_err(&acp_hs_tr, "Probe failure,unable to allocate channel descriptors"); + tr_err("Probe failure,unable to allocate channel descriptors"); return -ENOMEM; } for (channel = 0; channel < dma->plat_data.channels; channel++) { @@ -315,7 +315,7 @@ static int acp_dai_hs_dma_probe(struct dma *dma) static int acp_dai_hs_dma_remove(struct dma *dma) { if (!dma->chan) { - tr_err(&acp_hs_tr, "remove called without probe,it's a no-op"); + tr_err("remove called without probe,it's a no-op"); return 0; } @@ -362,7 +362,7 @@ static int acp_dai_hs_dma_get_data_size(struct dma_chan_data *channel, *avail = (hs_buff_size_capture >> 1); #endif } else { - tr_err(&acp_hs_tr, "Channel direction not defined %d", channel->direction); + tr_err("Channel direction not defined %d", channel->direction); return -EINVAL; } return 0; diff --git a/src/drivers/amd/rembrandt/acp_sp_dma.c b/src/drivers/amd/rembrandt/acp_sp_dma.c index 008a8e41f736..533ec1d5f3fb 100644 --- a/src/drivers/amd/rembrandt/acp_sp_dma.c +++ b/src/drivers/amd/rembrandt/acp_sp_dma.c @@ -85,7 +85,7 @@ int acp_dai_sp_dma_start(struct dma_chan_data *channel) sp_irer.bits.i2stdm_rx_samplen = 2; io_reg_write((PU_REGISTER_BASE + ACP_I2STDM_IRER), sp_irer.u32all); } else { - tr_err(&acp_sp_rmb_tr, "Start direction not defined %d", channel->direction); + tr_err("Start direction not defined %d", channel->direction); return -EINVAL; } @@ -118,7 +118,7 @@ int acp_dai_sp_dma_stop(struct dma_chan_data *channel) sp_irer.bits.i2stdm_rx_en = 0; io_reg_write((PU_REGISTER_BASE + ACP_I2STDM_IRER), sp_irer.u32all); } else { - tr_err(&acp_sp_rmb_tr, "Stop direction not defined %d", channel->direction); + tr_err("Stop direction not defined %d", channel->direction); return -EINVAL; } sp_iter = (acp_i2stdm_iter_t)io_reg_read((PU_REGISTER_BASE + ACP_I2STDM_ITER)); @@ -145,11 +145,11 @@ int acp_dai_sp_dma_set_config(struct dma_chan_data *channel, uint32_t sp_fifo_addr; if (!config->cyclic) { - tr_err(&acp_sp_rmb_tr, "cyclic configurations only supported!"); + tr_err("cyclic configurations only supported!"); return -EINVAL; } if (config->scatter) { - tr_err(&acp_sp_rmb_tr, "scatter enabled, that is not supported for now!"); + tr_err("scatter enabled, that is not supported for now!"); return -EINVAL; } @@ -201,7 +201,7 @@ int acp_dai_sp_dma_set_config(struct dma_chan_data *channel, (sp_buff_size >> 1)); } else { - tr_err(&acp_sp_rmb_tr, "DMA Config channel direction undefined %d", + tr_err("DMA Config channel direction undefined %d", channel->direction); return -EINVAL; } @@ -247,7 +247,7 @@ int acp_dai_sp_dma_get_data_size(struct dma_chan_data *channel, *avail = (sp_buff_size >> 1); #endif } else { - tr_err(&acp_sp_rmb_tr, "Channel direction not defined %d", channel->direction); + tr_err("Channel direction not defined %d", channel->direction); return -EINVAL; } return 0; diff --git a/src/drivers/amd/rembrandt/acp_sw_audio_dma.c b/src/drivers/amd/rembrandt/acp_sw_audio_dma.c index 9a652121ea12..133d3ba60775 100644 --- a/src/drivers/amd/rembrandt/acp_sw_audio_dma.c +++ b/src/drivers/amd/rembrandt/acp_sw_audio_dma.c @@ -101,13 +101,13 @@ static struct dma_chan_data *acp_dai_sw_audio_dma_channel_get(struct dma *dma, key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_sw_audio_tr, "Channel %d not in range", req_chan); + tr_err("Channel %d not in range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_sw_audio_tr, "channel already in use %d", req_chan); + tr_err("channel already in use %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); @@ -157,7 +157,7 @@ static int acp_dai_sw_audio_dma_start(struct dma_chan_data *channel) 0x1, 0x1, 15); break; default: - tr_err(&acp_sw_audio_tr, "Start direction not defined %d", + tr_err("Start direction not defined %d", channel->direction); return -EINVAL; } @@ -203,7 +203,7 @@ static int acp_dai_sw_audio_dma_stop(struct dma_chan_data *channel) 0x1, 0x0, 15); break; default: - tr_err(&acp_sw_audio_tr, "Stop direction not defined %d", + tr_err("Stop direction not defined %d", channel->direction); return -EINVAL; } @@ -241,11 +241,11 @@ static int acp_dai_sw_audio_dma_set_config(struct dma_chan_data *channel, uint32_t sw0_audio_fifo_addr; if (!config->cyclic) { - tr_err(&acp_sw_audio_tr, "cyclic configurations only supported!"); + tr_err("cyclic configurations only supported!"); return -EINVAL; } if (config->scatter) { - tr_err(&acp_sw_audio_tr, "scatter enabled, that is not supported for now!"); + tr_err("scatter enabled, that is not supported for now!"); return -EINVAL; } @@ -308,7 +308,7 @@ static int acp_dai_sw_audio_dma_set_config(struct dma_chan_data *channel, sw_audio_buff_size_capture >> 1); break; default: - tr_err(&acp_sw_audio_tr, "Config channel direction undefined %d", + tr_err("Config channel direction undefined %d", channel->direction); return -EINVAL; } @@ -334,14 +334,14 @@ static int acp_dai_sw_audio_dma_probe(struct dma *dma) int channel; if (dma->chan) { - tr_err(&acp_sw_audio_tr, "Repeated probe"); + tr_err("Repeated probe"); return -EEXIST; } dma->chan = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, dma->plat_data.channels * sizeof(struct dma_chan_data)); if (!dma->chan) { - tr_err(&acp_sw_audio_tr, "Probe failure,unable to allocate channel descriptors"); + tr_err("Probe failure,unable to allocate channel descriptors"); return -ENOMEM; } for (channel = 0; channel < dma->plat_data.channels; channel++) { @@ -356,7 +356,7 @@ static int acp_dai_sw_audio_dma_probe(struct dma *dma) static int acp_dai_sw_audio_dma_remove(struct dma *dma) { if (!dma->chan) { - tr_err(&acp_sw_audio_tr, "remove called without probe,it's a no-op"); + tr_err("remove called without probe,it's a no-op"); return 0; } @@ -378,7 +378,7 @@ static int acp_dai_sw_audio_dma_get_data_size(struct dma_chan_data *channel, *avail = sw_audio_buff_size_capture >> 1; } else { - tr_err(&acp_sw_audio_tr, "Channel direction not defined %d", channel->direction); + tr_err("Channel direction not defined %d", channel->direction); return -EINVAL; } diff --git a/src/drivers/amd/rembrandt/interrupt.c b/src/drivers/amd/rembrandt/interrupt.c index f183186c2f74..9405ac35d1a1 100644 --- a/src/drivers/amd/rembrandt/interrupt.c +++ b/src/drivers/amd/rembrandt/interrupt.c @@ -69,7 +69,7 @@ static void acp_irq_mask_int(uint32_t irq) uint32_t mask; if (irq < RESERVED_IRQS_NUM || irq >= IRQS_NUM) { - tr_err(&acp_irq_tr, "Invalid interrupt"); + tr_err("Invalid interrupt"); return; } mask = IRQ_INT_MASK(irq); @@ -84,7 +84,7 @@ static void acp_irq_unmask_int(uint32_t irq) uint32_t mask; if (irq < RESERVED_IRQS_NUM || irq >= IRQS_NUM) { - tr_err(&acp_irq_tr, "Invalid interrupt"); + tr_err("Invalid interrupt"); return; } mask = IRQ_INT_MASK(irq); @@ -136,7 +136,7 @@ static inline void acp_handle_irq_5(struct irq_cascade_desc *cascade, k_spin_unlock(&cascade->lock, key); if (!handled) { - tr_err(&acp_irq_tr, "irq_handler(): not handled, bit %d", + tr_err("irq_handler(): not handled, bit %d", bit); acp_irq_mask_int(line_index * IRQS_PER_LINE + bit); } @@ -172,8 +172,8 @@ static inline void acp_handle_irq(struct irq_cascade_desc *cascade, k_spin_unlock(&cascade->lock, key); if (!handled) { - tr_err(&acp_irq_tr, "irq_handler(): not handled, bit %d", - bit); + tr_err("irq_handler(): not handled, bit %d", + bit); acp_irq_mask_int(line_index * IRQS_PER_LINE + bit); } } @@ -194,7 +194,7 @@ static void irqhandler_5(void *data) /* Handle current interrupts */ acp_handle_irq_5(cascade, line_index, status); else - tr_err(&acp_irq_tr, "invalid interrupt status"); + tr_err("invalid interrupt status"); } static inline void irq_handler(void *data, uint32_t line_index) @@ -209,7 +209,7 @@ static inline void irq_handler(void *data, uint32_t line_index) /* Handle current interrupts */ acp_handle_irq(cascade, line_index, status); else - tr_err(&acp_irq_tr, "invalid interrupt status"); + tr_err("invalid interrupt status"); } #define DEFINE_IRQ_HANDLER(n) \ diff --git a/src/drivers/amd/rembrandt/ipc.c b/src/drivers/amd/rembrandt/ipc.c index f8a17cdedab5..3514780f6289 100644 --- a/src/drivers/amd/rembrandt/ipc.c +++ b/src/drivers/amd/rembrandt/ipc.c @@ -76,7 +76,7 @@ void amd_irq_handler(void *arg) } io_reg_write((PU_REGISTER_BASE + ACP_AXI2DAGB_SEM_0), lock); } else { - tr_err(&ipc_tr, "IPC:interrupt without setting flags host status 0x%x", + tr_err("IPC:interrupt without setting flags host status 0x%x", sof_ipc_host_status()); } } diff --git a/src/drivers/amd/renoir/acp_bt_dma.c b/src/drivers/amd/renoir/acp_bt_dma.c index 0d061fb57210..4e023834a779 100644 --- a/src/drivers/amd/renoir/acp_bt_dma.c +++ b/src/drivers/amd/renoir/acp_bt_dma.c @@ -60,13 +60,13 @@ static struct dma_chan_data *acp_dai_bt_dma_channel_get(struct dma *dma, key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_bt_dma_tr, "Channel %d not in range", req_chan); + tr_err("Channel %d not in range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_bt_dma_tr, "channel already in use %d", req_chan); + tr_err("channel already in use %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); @@ -130,7 +130,7 @@ static int acp_dai_bt_dma_start(struct dma_chan_data *channel) bt_tdm_irer.bits.bttdm_rx_samplen = 2; io_reg_write((PU_REGISTER_BASE + ACP_BTTDM_IRER), bt_tdm_irer.u32all); } else { - tr_err(&acp_bt_dma_tr, " ACP:Start direction not defined %d", channel->direction); + tr_err(" ACP:Start direction not defined %d", channel->direction); return -EINVAL; } return 0; @@ -175,7 +175,7 @@ static int acp_dai_bt_dma_stop(struct dma_chan_data *channel) bt_tdm_irer.bits.bttdm_rx_en = 0; io_reg_write(PU_REGISTER_BASE + ACP_BTTDM_IRER, bt_tdm_irer.u32all); } else { - tr_err(&acp_bt_dma_tr, "direction not defined %d", channel->direction); + tr_err("direction not defined %d", channel->direction); return -EINVAL; } @@ -214,11 +214,11 @@ static int acp_dai_bt_dma_set_config(struct dma_chan_data *channel, uint32_t fifo_addr; if (!config->cyclic) { - tr_err(&acp_bt_dma_tr, "cyclic configurations only supported!"); + tr_err("cyclic configurations only supported!"); return -EINVAL; } if (config->scatter) { - tr_err(&acp_bt_dma_tr, "scatter enabled, that is not supported for now"); + tr_err("scatter enabled, that is not supported for now"); return -EINVAL; } @@ -269,7 +269,7 @@ static int acp_dai_bt_dma_set_config(struct dma_chan_data *channel, (bt_buff_size >> 1)); break; default: - tr_err(&acp_bt_dma_tr, "unsupported config direction"); + tr_err("unsupported config direction"); return -EINVAL; } @@ -293,14 +293,14 @@ static int acp_dai_bt_dma_probe(struct dma *dma) int channel; if (dma->chan) { - tr_err(&acp_bt_dma_tr, "Repeated probe"); + tr_err("Repeated probe"); return -EEXIST; } dma->chan = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, dma->plat_data.channels * sizeof(struct dma_chan_data)); if (!dma->chan) { - tr_err(&acp_bt_dma_tr, "Probe failure, unable to allocate channel descriptors"); + tr_err("Probe failure, unable to allocate channel descriptors"); return -ENOMEM; } for (channel = 0; channel < dma->plat_data.channels; channel++) { @@ -315,7 +315,7 @@ static int acp_dai_bt_dma_probe(struct dma *dma) static int acp_dai_bt_dma_remove(struct dma *dma) { if (!dma->chan) { - tr_err(&acp_bt_dma_tr, "remove call without probe, it's a no-op"); + tr_err("remove call without probe, it's a no-op"); return 0; } rfree(dma->chan); @@ -361,7 +361,7 @@ static int acp_dai_bt_dma_get_data_size(struct dma_chan_data *channel, *avail = bt_buff_size >> 1; #endif } else { - tr_err(&acp_bt_dma_tr, "Channel direction Not defined %d", + tr_err("Channel direction Not defined %d", channel->direction); return -EINVAL; } diff --git a/src/drivers/amd/renoir/acp_dmic_dma.c b/src/drivers/amd/renoir/acp_dmic_dma.c index 0da4e7993855..2ca9baf7ccf6 100644 --- a/src/drivers/amd/renoir/acp_dmic_dma.c +++ b/src/drivers/amd/renoir/acp_dmic_dma.c @@ -89,7 +89,7 @@ int acp_dmic_dma_start(struct dma_chan_data *channel) /* safe check in case we've got preempted after read */ if ((uint32_t)pdm_dma_enable.bits.pdm_dma_en_status) return 0; - tr_err(&acp_dmic_dma_rn_tr, "DMICDMA: timed out for dma start"); + tr_err("DMICDMA: timed out for dma start"); return -ETIME; } pdm_dma_enable = (acp_wov_pdm_dma_enable_t) @@ -130,7 +130,7 @@ int acp_dmic_dma_stop(struct dma_chan_data *channel) /* safe check in case we've got preempted after read */ if ((uint32_t)pdm_dma_enable.bits.pdm_dma_en_status) return 0; - tr_err(&acp_dmic_dma_rn_tr, "DMIC-DMA: timed out for dma stop"); + tr_err("DMIC-DMA: timed out for dma stop"); return -ETIME; } pdm_dma_enable = (acp_wov_pdm_dma_enable_t) @@ -193,18 +193,15 @@ int acp_dmic_dma_set_config(struct dma_chan_data *channel, acp_initsilence.numfilterbuffers = DMIC_SMOOTH_TIME_MS / timeperiod_ms; break; default: - tr_err(&acp_dmic_dma_rn_tr, - "dmic dma_set_config() unsupported config direction"); + tr_err("dmic dma_set_config() unsupported config direction"); return -EINVAL; } if (!config->cyclic) { - tr_err(&acp_dmic_dma_rn_tr, - "DMIC DMA: cyclic configurations only supported!"); + tr_err("DMIC DMA: cyclic configurations only supported!"); return -EINVAL; } if (config->scatter) { - tr_err(&acp_dmic_dma_rn_tr, - "DMIC DMA: scatter enabled, not supported for now!"); + tr_err("DMIC DMA: scatter enabled, not supported for now!"); return -EINVAL; } return 0; diff --git a/src/drivers/amd/renoir/acp_sp_dma.c b/src/drivers/amd/renoir/acp_sp_dma.c index 4cddabba2486..01e299685dd9 100644 --- a/src/drivers/amd/renoir/acp_sp_dma.c +++ b/src/drivers/amd/renoir/acp_sp_dma.c @@ -81,7 +81,7 @@ int acp_dai_sp_dma_start(struct dma_chan_data *channel) sp_irer.bits.i2stdm_rx_samplen = 2; io_reg_write((PU_REGISTER_BASE + ACP_I2STDM_IRER), sp_irer.u32all); } else { - tr_err(&acp_sp_rn_tr, "Start direction not defined %d", channel->direction); + tr_err("Start direction not defined %d", channel->direction); return -EINVAL; } @@ -114,7 +114,7 @@ int acp_dai_sp_dma_stop(struct dma_chan_data *channel) sp_irer.bits.i2stdm_rx_en = 0; io_reg_write((PU_REGISTER_BASE + ACP_I2STDM_IRER), sp_irer.u32all); } else { - tr_err(&acp_sp_rn_tr, "Stop direction not defined %d", channel->direction); + tr_err("Stop direction not defined %d", channel->direction); return -EINVAL; } sp_iter = (acp_i2stdm_iter_t)io_reg_read((PU_REGISTER_BASE + ACP_I2STDM_ITER)); @@ -141,11 +141,11 @@ int acp_dai_sp_dma_set_config(struct dma_chan_data *channel, uint32_t sp_fifo_addr; if (!config->cyclic) { - tr_err(&acp_sp_rn_tr, "cyclic configurations only supported!"); + tr_err("cyclic configurations only supported!"); return -EINVAL; } if (config->scatter) { - tr_err(&acp_sp_rn_tr, "scatter enabled, that is not supported for now!"); + tr_err("scatter enabled, that is not supported for now!"); return -EINVAL; } @@ -200,7 +200,7 @@ int acp_dai_sp_dma_set_config(struct dma_chan_data *channel, (sp_buff_size_capture >> 1)); } else { - tr_err(&acp_sp_rn_tr, "config channel direction undefined %d", channel->direction); + tr_err("config channel direction undefined %d", channel->direction); return -EINVAL; } @@ -245,7 +245,7 @@ int acp_dai_sp_dma_get_data_size(struct dma_chan_data *channel, *free = (sp_buff_size_capture >> 1); #endif } else { - tr_err(&acp_sp_rn_tr, "Channel direction not defined %d", channel->direction); + tr_err("Channel direction not defined %d", channel->direction); return -EINVAL; } return 0; diff --git a/src/drivers/amd/renoir/interrupt.c b/src/drivers/amd/renoir/interrupt.c index 487ef620f810..08a7f3593ef4 100644 --- a/src/drivers/amd/renoir/interrupt.c +++ b/src/drivers/amd/renoir/interrupt.c @@ -61,7 +61,7 @@ static void acp_irq_mask_int(uint32_t irq) uint32_t mask; if (irq < RESERVED_IRQS_NUM || irq >= IRQS_NUM) { - tr_err(&acp_irq_tr, "Invalid interrupt"); + tr_err("Invalid interrupt"); return; } mask = IRQ_INT_MASK(irq); @@ -73,7 +73,7 @@ static void acp_irq_unmask_int(uint32_t irq) uint32_t mask; if (irq < RESERVED_IRQS_NUM || irq >= IRQS_NUM) { - tr_err(&acp_irq_tr, "Invalid interrupt"); + tr_err("Invalid interrupt"); return; } mask = IRQ_INT_MASK(irq); @@ -119,8 +119,8 @@ static inline void acp_handle_irq(struct irq_cascade_desc *cascade, k_spin_unlock(&cascade->lock, key); if (!handled) { - tr_err(&acp_irq_tr, "irq_handler(): not handled, bit %d", - bit); + tr_err("irq_handler(): not handled, bit %d", + bit); acp_irq_mask_int(line_index * IRQS_PER_LINE + bit); } } @@ -139,7 +139,7 @@ static inline void irq_handler(void *data, uint32_t line_index) /* Handle current interrupts */ acp_handle_irq(cascade, line_index, status); else - tr_err(&acp_irq_tr, "invalid interrupt status"); + tr_err("invalid interrupt status"); } #define DEFINE_IRQ_HANDLER(n) \ diff --git a/src/drivers/amd/renoir/ipc.c b/src/drivers/amd/renoir/ipc.c index b718f57540ba..9e60811bf38f 100644 --- a/src/drivers/amd/renoir/ipc.c +++ b/src/drivers/amd/renoir/ipc.c @@ -62,7 +62,7 @@ void amd_irq_handler(void *arg) delay_cnt--; } if (lock_fail) { - tr_err(&ipc_tr, "ACP fail to acquire the lock"); + tr_err("ACP fail to acquire the lock"); return; } /* Check if it is response from host */ @@ -89,7 +89,7 @@ void amd_irq_handler(void *arg) } io_reg_write((PU_REGISTER_BASE + ACP_AXI2DAGB_SEM_0), lock); } else { - tr_err(&ipc_tr, "IPC:interrupt without setting flags host status 0x%x", + tr_err("IPC:interrupt without setting flags host status 0x%x", sof_ipc_host_status()); } } diff --git a/src/drivers/amd/vangogh/acp_bt_dma.c b/src/drivers/amd/vangogh/acp_bt_dma.c index b65c7c4c9ec9..6bfa719ae626 100644 --- a/src/drivers/amd/vangogh/acp_bt_dma.c +++ b/src/drivers/amd/vangogh/acp_bt_dma.c @@ -61,13 +61,13 @@ static struct dma_chan_data *acp_dai_bt_dma_channel_get(struct dma *dma, key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_bt_dma_tr, "Channel %d not in range", req_chan); + tr_err("Channel %d not in range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_bt_dma_tr, "channel already in use %d", req_chan); + tr_err("channel already in use %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); @@ -127,7 +127,7 @@ static int acp_dai_bt_dma_start(struct dma_chan_data *channel) bt_tdm_irer.bits.bttdm_rx_samplen = 2; io_reg_write((PU_REGISTER_BASE + ACP_BTTDM_IRER), bt_tdm_irer.u32all); } else { - tr_err(&acp_bt_dma_tr, "Start direction not defined %d", channel->direction); + tr_err("Start direction not defined %d", channel->direction); return -EINVAL; } return 0; @@ -170,7 +170,7 @@ static int acp_dai_bt_dma_stop(struct dma_chan_data *channel) bt_tdm_irer.bits.bttdm_rx_en = 0; io_reg_write(PU_REGISTER_BASE + ACP_BTTDM_IRER, bt_tdm_irer.u32all); } else { - tr_err(&acp_bt_dma_tr, "direction not defined %d", channel->direction); + tr_err("direction not defined %d", channel->direction); return -EINVAL; } @@ -205,11 +205,11 @@ static int acp_dai_bt_dma_set_config(struct dma_chan_data *channel, uint32_t bt_fifo_addr; if (!config->cyclic) { - tr_err(&acp_bt_dma_tr, "cyclic configurations only supported"); + tr_err("cyclic configurations only supported"); return -EINVAL; } if (config->scatter) { - tr_err(&acp_bt_dma_tr, "scatter enabled, that is not supported for now"); + tr_err("scatter enabled, that is not supported for now"); return -EINVAL; } @@ -267,7 +267,7 @@ static int acp_dai_bt_dma_set_config(struct dma_chan_data *channel, (bt_buff_size >> 1)); break; default: - tr_err(&acp_bt_dma_tr, "unsupported config direction"); + tr_err("unsupported config direction"); return -EINVAL; } @@ -291,14 +291,14 @@ static int acp_dai_bt_dma_probe(struct dma *dma) int channel; if (dma->chan) { - tr_err(&acp_bt_dma_tr, "Repeated probe"); + tr_err("Repeated probe"); return -EEXIST; } dma->chan = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, dma->plat_data.channels * sizeof(struct dma_chan_data)); if (!dma->chan) { - tr_err(&acp_bt_dma_tr, "Probe failure, unable to allocate channel descriptors"); + tr_err("Probe failure, unable to allocate channel descriptors"); return -ENOMEM; } for (channel = 0; channel < dma->plat_data.channels; channel++) { @@ -313,7 +313,7 @@ static int acp_dai_bt_dma_probe(struct dma *dma) static int acp_dai_bt_dma_remove(struct dma *dma) { if (!dma->chan) { - tr_err(&acp_bt_dma_tr, "remove called without probe, it's a no-op"); + tr_err("remove called without probe, it's a no-op"); return 0; } @@ -360,7 +360,7 @@ static int acp_dai_bt_dma_get_data_size(struct dma_chan_data *channel, *avail = bt_buff_size >> 1; #endif } else { - tr_err(&acp_bt_dma_tr, "Channel direction Not defined %d", + tr_err("Channel direction Not defined %d", channel->direction); return -EINVAL; } diff --git a/src/drivers/amd/vangogh/acp_dmic_dma.c b/src/drivers/amd/vangogh/acp_dmic_dma.c index 6d1a67369573..b032c8c225aa 100644 --- a/src/drivers/amd/vangogh/acp_dmic_dma.c +++ b/src/drivers/amd/vangogh/acp_dmic_dma.c @@ -89,7 +89,7 @@ int acp_dmic_dma_start(struct dma_chan_data *channel) /* safe check in case we've got preempted after read */ if ((uint32_t)pdm_dma_enable.bits.pdm_dma_en_status) return 0; - tr_err(&acp_dmic_dma_vgh_tr, "timed out for dma start"); + tr_err("timed out for dma start"); return -ETIME; } pdm_dma_enable = (acp_wov_pdm_dma_enable_t) @@ -131,7 +131,7 @@ int acp_dmic_dma_stop(struct dma_chan_data *channel) /* safe check in case we've got preempted after read */ if ((uint32_t)pdm_dma_enable.bits.pdm_dma_en_status) return 0; - tr_err(&acp_dmic_dma_vgh_tr, "timed out for dma stop"); + tr_err("timed out for dma stop"); return -ETIME; } pdm_dma_enable = (acp_wov_pdm_dma_enable_t) @@ -185,15 +185,15 @@ int acp_dmic_dma_set_config(struct dma_chan_data *channel, ACP_WOV_RX_INTR_WATERMARK_SIZE, watermark.u32all); break; default: - tr_err(&acp_dmic_dma_vgh_tr, "unsupported config direction"); + tr_err("unsupported config direction"); return -EINVAL; } if (!config->cyclic) { - tr_err(&acp_dmic_dma_vgh_tr, "cyclic configurations only supported!"); + tr_err("cyclic configurations only supported!"); return -EINVAL; } if (config->scatter) { - tr_err(&acp_dmic_dma_vgh_tr, "scatter enabled, not supported for now!"); + tr_err("scatter enabled, not supported for now!"); return -EINVAL; } return 0; diff --git a/src/drivers/amd/vangogh/acp_hs_dma.c b/src/drivers/amd/vangogh/acp_hs_dma.c index 9059af896e7d..eaa04c1b1ed2 100644 --- a/src/drivers/amd/vangogh/acp_hs_dma.c +++ b/src/drivers/amd/vangogh/acp_hs_dma.c @@ -55,13 +55,13 @@ static struct dma_chan_data *acp_dai_hs_dma_channel_get(struct dma *dma, key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_hs_tr, "Channel %d not in range", req_chan); + tr_err("Channel %d not in range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { k_spin_unlock(&dma->lock, key); - tr_err(&acp_hs_tr, "channel already in use %d", req_chan); + tr_err("channel already in use %d", req_chan); return NULL; } atomic_add(&dma->num_channels_busy, 1); @@ -122,7 +122,7 @@ static int acp_dai_hs_dma_start(struct dma_chan_data *channel) hs_irer.bits.hstdm_rx_samplen = 2; io_reg_write((PU_REGISTER_BASE + ACP_HSTDM_IRER), hs_irer.u32all); } else { - tr_err(&acp_hs_tr, "Start direction not defined %d", channel->direction); + tr_err("Start direction not defined %d", channel->direction); return -EINVAL; } @@ -170,7 +170,7 @@ static int acp_dai_hs_dma_stop(struct dma_chan_data *channel) hs_irer.bits.hstdm_rx_en = 0; io_reg_write((PU_REGISTER_BASE + ACP_HSTDM_IRER), hs_irer.u32all); } else { - tr_err(&acp_hs_tr, "Stop direction not defined %d", channel->direction); + tr_err("Stop direction not defined %d", channel->direction); return -EINVAL; } hs_iter = (acp_hstdm_iter_t)io_reg_read((PU_REGISTER_BASE + ACP_HSTDM_ITER)); @@ -206,11 +206,11 @@ static int acp_dai_hs_dma_set_config(struct dma_chan_data *channel, uint32_t hs_fifo_addr; if (!config->cyclic) { - tr_err(&acp_hs_tr, "cyclic configurations only supported!"); + tr_err("cyclic configurations only supported!"); return -EINVAL; } if (config->scatter) { - tr_err(&acp_hs_tr, "scatter enabled, that is not supported for now!"); + tr_err("scatter enabled, that is not supported for now!"); return -EINVAL; } @@ -267,7 +267,7 @@ static int acp_dai_hs_dma_set_config(struct dma_chan_data *channel, (hs_buff_size >> 1)); } else { - tr_err(&acp_hs_tr, "Config channel direction undefined %d", channel->direction); + tr_err("Config channel direction undefined %d", channel->direction); return -EINVAL; } @@ -281,7 +281,7 @@ static int acp_dai_hs_dma_copy(struct dma_chan_data *channel, int bytes, .channel = channel, .elem.size = bytes, }; - tr_info(&acp_hs_tr, "acp_dai_hs_dma_copy "); + tr_info("acp_dai_hs_dma_copy "); notifier_event(channel, NOTIFIER_ID_DMA_COPY, NOTIFIER_TARGET_CORE_LOCAL, &next, sizeof(next)); return 0; @@ -292,14 +292,14 @@ static int acp_dai_hs_dma_probe(struct dma *dma) int channel; if (dma->chan) { - tr_err(&acp_hs_tr, "Repeated probe"); + tr_err("Repeated probe"); return -EEXIST; } dma->chan = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, dma->plat_data.channels * sizeof(struct dma_chan_data)); if (!dma->chan) { - tr_err(&acp_hs_tr, "Probe failure,unable to allocate channel descriptors"); + tr_err("Probe failure,unable to allocate channel descriptors"); return -ENOMEM; } for (channel = 0; channel < dma->plat_data.channels; channel++) { @@ -314,7 +314,7 @@ static int acp_dai_hs_dma_probe(struct dma *dma) static int acp_dai_hs_dma_remove(struct dma *dma) { if (!dma->chan) { - tr_err(&acp_hs_tr, "remove called without probe,it's a no-op"); + tr_err("remove called without probe,it's a no-op"); return 0; } @@ -361,7 +361,7 @@ static int acp_dai_hs_dma_get_data_size(struct dma_chan_data *channel, *avail = (hs_buff_size >> 1); #endif } else { - tr_err(&acp_hs_tr, "Channel direction not defined %d", channel->direction); + tr_err("Channel direction not defined %d", channel->direction); return -EINVAL; } return 0; diff --git a/src/drivers/amd/vangogh/acp_sp_dma.c b/src/drivers/amd/vangogh/acp_sp_dma.c index 8d8d9c7e4f6b..6fbd1a9c62fe 100644 --- a/src/drivers/amd/vangogh/acp_sp_dma.c +++ b/src/drivers/amd/vangogh/acp_sp_dma.c @@ -81,7 +81,7 @@ int acp_dai_sp_dma_start(struct dma_chan_data *channel) sp_irer.bits.i2stdm_rx_samplen = 2; io_reg_write((PU_REGISTER_BASE + ACP_I2STDM_IRER), sp_irer.u32all); } else { - tr_err(&acp_sp_vgh_tr, "Start direction not defined %d", channel->direction); + tr_err("Start direction not defined %d", channel->direction); return -EINVAL; } @@ -115,7 +115,7 @@ int acp_dai_sp_dma_stop(struct dma_chan_data *channel) sp_irer.bits.i2stdm_rx_en = 0; io_reg_write((PU_REGISTER_BASE + ACP_I2STDM_IRER), sp_irer.u32all); } else { - tr_err(&acp_sp_vgh_tr, "Stop direction not defined %d", channel->direction); + tr_err("Stop direction not defined %d", channel->direction); return -EINVAL; } sp_iter = (acp_i2stdm_iter_t)io_reg_read((PU_REGISTER_BASE + ACP_I2STDM_ITER)); @@ -140,11 +140,11 @@ int acp_dai_sp_dma_set_config(struct dma_chan_data *channel, uint32_t sp_fifo_addr; if (!config->cyclic) { - tr_err(&acp_sp_vgh_tr, "cyclic configurations only supported!"); + tr_err("cyclic configurations only supported!"); return -EINVAL; } if (config->scatter) { - tr_err(&acp_sp_vgh_tr, "scatter enabled, that is not supported for now!"); + tr_err("scatter enabled, that is not supported for now!"); return -EINVAL; } @@ -202,7 +202,7 @@ int acp_dai_sp_dma_set_config(struct dma_chan_data *channel, (sp_buff_size >> 1)); } else { - tr_err(&acp_sp_vgh_tr, "DMA Config channel direction undefined %d", + tr_err("DMA Config channel direction undefined %d", channel->direction); return -EINVAL; } @@ -250,7 +250,7 @@ int acp_dai_sp_dma_get_data_size(struct dma_chan_data *channel, *avail = (sp_buff_size >> 1); #endif } else { - tr_err(&acp_sp_vgh_tr, "Channel direction not defined %d", channel->direction); + tr_err("Channel direction not defined %d", channel->direction); return -EINVAL; } return 0; diff --git a/src/drivers/amd/vangogh/interrupt.c b/src/drivers/amd/vangogh/interrupt.c index 4e107ced7722..1e5700fea041 100644 --- a/src/drivers/amd/vangogh/interrupt.c +++ b/src/drivers/amd/vangogh/interrupt.c @@ -60,7 +60,7 @@ static void acp_irq_mask_int(uint32_t irq) uint32_t mask; if (irq < RESERVED_IRQS_NUM || irq >= IRQS_NUM) { - tr_err(&acp_irq_tr, "Invalid interrupt"); + tr_err("Invalid interrupt"); return; } mask = IRQ_INT_MASK(irq); @@ -72,7 +72,7 @@ static void acp_irq_unmask_int(uint32_t irq) uint32_t mask; if (irq < RESERVED_IRQS_NUM || irq >= IRQS_NUM) { - tr_err(&acp_irq_tr, "Invalid interrupt"); + tr_err("Invalid interrupt"); return; } mask = IRQ_INT_MASK(irq); @@ -121,7 +121,7 @@ static inline void acp_handle_irq_5(struct irq_cascade_desc *cascade, k_spin_unlock(&cascade->lock, key); if (!handled) { - tr_err(&acp_irq_tr, "irq_handler(): not handled, bit %d", + tr_err("irq_handler(): not handled, bit %d", bit); acp_irq_mask_int(line_index * IRQS_PER_LINE + bit); } @@ -157,7 +157,7 @@ static inline void acp_handle_irq(struct irq_cascade_desc *cascade, k_spin_unlock(&cascade->lock, key); if (!handled) { - tr_err(&acp_irq_tr, "irq_handler(): not handled, bit %d", bit); + tr_err("irq_handler(): not handled, bit %d", bit); acp_irq_mask_int(line_index * IRQS_PER_LINE + bit); } } @@ -178,7 +178,7 @@ static void irqhandler_5(void *data) /* Handle current interrupts */ acp_handle_irq_5(cascade, line_index, status); else - tr_err(&acp_irq_tr, "invalid interrupt status"); + tr_err("invalid interrupt status"); } static inline void irq_handler(void *data, uint32_t line_index) @@ -193,7 +193,7 @@ static inline void irq_handler(void *data, uint32_t line_index) /* Handle current interrupts */ acp_handle_irq(cascade, line_index, status); else - tr_err(&acp_irq_tr, "invalid interrupt status"); + tr_err("invalid interrupt status"); } #define DEFINE_IRQ_HANDLER(n) \ diff --git a/src/drivers/amd/vangogh/ipc.c b/src/drivers/amd/vangogh/ipc.c index 05f3de709310..2e97ce1cc590 100644 --- a/src/drivers/amd/vangogh/ipc.c +++ b/src/drivers/amd/vangogh/ipc.c @@ -76,7 +76,7 @@ void amd_irq_handler(void *arg) } io_reg_write((PU_REGISTER_BASE + ACP_AXI2DAGB_SEM_0), lock); } else { - tr_err(&ipc_tr, "IPC:interrupt without setting flags host status 0x%x", + tr_err("IPC:interrupt without setting flags host status 0x%x", sof_ipc_host_status()); } } diff --git a/src/drivers/dw/dma.c b/src/drivers/dw/dma.c index 53f808113b05..bb4a9bb3aadc 100644 --- a/src/drivers/dw/dma.c +++ b/src/drivers/dw/dma.c @@ -190,7 +190,7 @@ static struct dma_chan_data *dw_dma_channel_get(struct dma *dma, k_spinlock_key_t key; int i; - tr_info(&dwdma_tr, "dw_dma_channel_get(): dma %d request channel %d", + tr_info("dw_dma_channel_get(): dma %d request channel %d", dma->plat_data.id, req_chan); key = k_spin_lock(&dma->lock); @@ -217,7 +217,7 @@ static struct dma_chan_data *dw_dma_channel_get(struct dma *dma, /* DMA controller has no free channels */ k_spin_unlock(&dma->lock, key); - tr_err(&dwdma_tr, "dw_dma_channel_get(): dma %d no free channels", + tr_err("dw_dma_channel_get(): dma %d no free channels", dma->plat_data.id); return NULL; @@ -256,7 +256,7 @@ static void dw_dma_channel_put(struct dma_chan_data *channel) { k_spinlock_key_t key; - tr_info(&dwdma_tr, "dw_dma_channel_put(): dma %d channel %d put", + tr_info("dw_dma_channel_put(): dma %d channel %d put", channel->dma->plat_data.id, channel->index); key = k_spin_lock(&channel->dma->lock); @@ -275,7 +275,7 @@ static int dw_dma_start(struct dma_chan_data *channel) uint32_t words_per_tfr = 0; #endif - tr_dbg(&dwdma_tr, "dw_dma_start(): dma %d channel %d start", + tr_dbg("dw_dma_start(): dma %d channel %d start", channel->dma->plat_data.id, channel->index); irq_local_disable(flags); @@ -284,7 +284,7 @@ static int dw_dma_start(struct dma_chan_data *channel) if ((channel->status != COMP_STATE_PREPARE && channel->status != COMP_STATE_PAUSED) || (dma_reg_read(dma, DW_DMA_CHAN_EN) & DW_CHAN(channel->index))) { - tr_err(&dwdma_tr, "dw_dma_start(): dma %d channel %d not ready ena 0x%x status 0x%x", + tr_err("dw_dma_start(): dma %d channel %d not ready ena 0x%x status 0x%x", dma->plat_data.id, channel->index, dma_reg_read(dma, DW_DMA_CHAN_EN), channel->status); @@ -294,7 +294,7 @@ static int dw_dma_start(struct dma_chan_data *channel) /* is valid stream */ if (!dw_chan->lli) { - tr_err(&dwdma_tr, "dw_dma_start(): dma %d channel %d invalid stream", + tr_err("dw_dma_start(): dma %d channel %d invalid stream", dma->plat_data.id, channel->index); ret = -EINVAL; goto out; @@ -350,7 +350,7 @@ static int dw_dma_release(struct dma_chan_data *channel) struct dw_dma_chan_data *dw_chan = dma_chan_get_data(channel); uint32_t flags; - tr_info(&dwdma_tr, "dw_dma_release(): dma %d channel %d release", + tr_info("dw_dma_release(): dma %d channel %d release", channel->dma->plat_data.id, channel->index); irq_local_disable(flags); @@ -372,7 +372,7 @@ static int dw_dma_pause(struct dma_chan_data *channel) struct dma *dma = channel->dma; uint32_t flags; - tr_info(&dwdma_tr, "dw_dma_pause(): dma %d channel %d pause", + tr_info("dw_dma_pause(): dma %d channel %d pause", channel->dma->plat_data.id, channel->index); irq_local_disable(flags); @@ -407,7 +407,7 @@ static int dw_dma_stop(struct dma_chan_data *channel) int i; #endif - tr_info(&dwdma_tr, "dw_dma_stop(): dma %d channel %d stop", + tr_info("dw_dma_stop(): dma %d channel %d stop", dma->plat_data.id, channel->index); irq_local_disable(flags); @@ -430,7 +430,7 @@ static int dw_dma_stop(struct dma_chan_data *channel) DW_CFGL_FIFO_EMPTY, DW_DMA_TIMEOUT); if (ret < 0) - tr_err(&dwdma_tr, "dw_dma_stop(): dma %d channel %d timeout", + tr_err("dw_dma_stop(): dma %d channel %d timeout", dma->plat_data.id, channel->index); #endif @@ -440,7 +440,7 @@ static int dw_dma_stop(struct dma_chan_data *channel) ret = poll_for_register_delay(dma_base(dma) + DW_DMA_CHAN_EN, DW_CHAN(channel->index), 0, DW_DMA_TIMEOUT); if (ret < 0) { - tr_err(&dwdma_tr, "dw_dma_stop(): dma %d channel %d disable timeout", + tr_err("dw_dma_stop(): dma %d channel %d disable timeout", dma->plat_data.id, channel->index); return -ETIMEDOUT; } @@ -501,7 +501,7 @@ static int dw_dma_set_config(struct dma_chan_data *channel, int ret = 0; int i; - tr_dbg(&dwdma_tr, "dw_dma_set_config(): dma %d channel %d config", + tr_dbg("dw_dma_set_config(): dma %d channel %d config", channel->dma->plat_data.id, channel->index); irq_local_disable(flags); @@ -516,7 +516,7 @@ static int dw_dma_set_config(struct dma_chan_data *channel, dw_chan->cfg_hi = DW_CFG_HIGH_DEF; if (!config->elem_array.count) { - tr_err(&dwdma_tr, "dw_dma_set_config(): dma %d channel %d no elems", + tr_err("dw_dma_set_config(): dma %d channel %d no elems", channel->dma->plat_data.id, channel->index); ret = -EINVAL; goto out; @@ -524,7 +524,7 @@ static int dw_dma_set_config(struct dma_chan_data *channel, if (config->irq_disabled && config->elem_array.count < DW_DMA_CFG_NO_IRQ_MIN_ELEMS) { - tr_err(&dwdma_tr, "dw_dma_set_config(): dma %d channel %d not enough elems for config with irq disabled %d", + tr_err("dw_dma_set_config(): dma %d channel %d not enough elems for config with irq disabled %d", channel->dma->plat_data.id, channel->index, config->elem_array.count); ret = -EINVAL; @@ -551,7 +551,7 @@ static int dw_dma_set_config(struct dma_chan_data *channel, SOF_MEM_CAPS_RAM | SOF_MEM_CAPS_DMA, sizeof(struct dw_lli) * channel->desc_count); if (!dw_chan->lli) { - tr_err(&dwdma_tr, "dw_dma_set_config(): dma %d channel %d lli alloc failed", + tr_err("dw_dma_set_config(): dma %d channel %d lli alloc failed", channel->dma->plat_data.id, channel->index); ret = -ENOMEM; @@ -603,7 +603,7 @@ static int dw_dma_set_config(struct dma_chan_data *channel, lli_desc->ctrl_lo |= DW_CTLL_SRC_WIDTH(2); break; default: - tr_err(&dwdma_tr, "dw_dma_set_config(): dma %d channel %d invalid src width %d", + tr_err("dw_dma_set_config(): dma %d channel %d invalid src width %d", channel->dma->plat_data.id, channel->index, config->src_width); ret = -EINVAL; @@ -631,7 +631,7 @@ static int dw_dma_set_config(struct dma_chan_data *channel, lli_desc->ctrl_lo |= DW_CTLL_DST_WIDTH(2); break; default: - tr_err(&dwdma_tr, "dw_dma_set_config(): dma %d channel %d invalid dest width %d", + tr_err("dw_dma_set_config(): dma %d channel %d invalid dest width %d", channel->dma->plat_data.id, channel->index, config->dest_width); ret = -EINVAL; @@ -709,7 +709,7 @@ static int dw_dma_set_config(struct dma_chan_data *channel, DW_CFGH_DST(config->dest_dev); break; default: - tr_err(&dwdma_tr, "dw_dma_set_config(): dma %d channel %d invalid direction %d", + tr_err("dw_dma_set_config(): dma %d channel %d invalid direction %d", channel->dma->plat_data.id, channel->index, config->direction); ret = -EINVAL; @@ -720,7 +720,7 @@ static int dw_dma_set_config(struct dma_chan_data *channel, lli_desc->dar = sg_elem->dest; if (sg_elem->size > DW_CTLH_BLOCK_TS_MASK) { - tr_err(&dwdma_tr, "dw_dma_set_config(): dma %d channel %d block size too big %d", + tr_err("dw_dma_set_config(): dma %d channel %d block size too big %d", channel->dma->plat_data.id, channel->index, sg_elem->size); ret = -EINVAL; @@ -842,7 +842,7 @@ static int dw_dma_copy(struct dma_chan_data *channel, int bytes, }; k_spinlock_key_t key; - tr_dbg(&dwdma_tr, "dw_dma_copy(): dma %d channel %d copy", + tr_dbg("dw_dma_copy(): dma %d channel %d copy", channel->dma->plat_data.id, channel->index); notifier_event(channel, NOTIFIER_ID_DMA_COPY, @@ -862,7 +862,7 @@ static int dw_dma_copy(struct dma_chan_data *channel, int bytes, DW_CHAN(channel->index), 0, DW_DMA_TIMEOUT); if (ret < 0) { - tr_dbg(&dwdma_tr, "dw_dma_copy(): poll_for_register_delay timeout"); + tr_dbg("dw_dma_copy(): poll_for_register_delay timeout"); return ret; } } @@ -891,7 +891,7 @@ static int dw_dma_setup(struct dma *dma) break; if (!i) { - tr_err(&dwdma_tr, "dw_dma_setup(): dma %d setup failed", + tr_err("dw_dma_setup(): dma %d setup failed", dma->plat_data.id); return -EIO; } @@ -943,7 +943,7 @@ static int dw_dma_probe(struct dma *dma) sizeof(struct dma_chan_data) * dma->plat_data.channels); if (!dma->chan) { - tr_err(&dwdma_tr, "dw_dma_probe(): dma %d allocaction of channels failed", + tr_err("dw_dma_probe(): dma %d allocaction of channels failed", dma->plat_data.id); goto out; } @@ -964,7 +964,7 @@ static int dw_dma_probe(struct dma *dma) sizeof(*dw_chan)); if (!dw_chan) { - tr_err(&dwdma_tr, "dw_dma_probe(): dma %d allocaction of channel %d private data failed", + tr_err("dw_dma_probe(): dma %d allocaction of channel %d private data failed", dma->plat_data.id, i); goto out; } @@ -992,7 +992,7 @@ static int dw_dma_remove(struct dma *dma) { int i; - tr_dbg(&dwdma_tr, "dw_dma_remove(): dma %d remove", dma->plat_data.id); + tr_dbg("dw_dma_remove(): dma %d remove", dma->plat_data.id); pm_runtime_put_sync(DW_DMAC_CLK, dma->plat_data.id); @@ -1027,10 +1027,10 @@ static int dw_dma_avail_data_size(struct dma_chan_data *channel) if (delta) size = dw_chan->ptr_data.buffer_bytes; else - tr_info(&dwdma_tr, "dw_dma_avail_data_size() size is 0!"); + tr_info("dw_dma_avail_data_size() size is 0!"); } - tr_dbg(&dwdma_tr, "DAR %x reader 0x%x free 0x%x avail 0x%x", write_ptr, + tr_dbg("DAR %x reader 0x%x free 0x%x avail 0x%x", write_ptr, read_ptr, dw_chan->ptr_data.buffer_bytes - size, size); return size; @@ -1058,10 +1058,10 @@ static int dw_dma_free_data_size(struct dma_chan_data *channel) if (delta) size = dw_chan->ptr_data.buffer_bytes; else - tr_info(&dwdma_tr, "dw_dma_free_data_size() size is 0!"); + tr_info("dw_dma_free_data_size() size is 0!"); } - tr_dbg(&dwdma_tr, "SAR %x writer 0x%x free 0x%x avail 0x%x", read_ptr, + tr_dbg("SAR %x writer 0x%x free 0x%x avail 0x%x", read_ptr, write_ptr, size, dw_chan->ptr_data.buffer_bytes - size); return size; @@ -1074,7 +1074,7 @@ static int dw_dma_get_data_size(struct dma_chan_data *channel, k_spinlock_key_t key; int ret = 0; - tr_dbg(&dwdma_tr, "dw_dma_get_data_size(): dma %d channel %d get data size", + tr_dbg("dw_dma_get_data_size(): dma %d channel %d get data size", channel->dma->plat_data.id, channel->index); key = k_spin_lock(&channel->dma->lock); @@ -1093,7 +1093,7 @@ static int dw_dma_get_data_size(struct dma_chan_data *channel, #if CONFIG_DMA_HW_LLI if (!(dma_reg_read(channel->dma, DW_DMA_CHAN_EN) & DW_CHAN(channel->index))) { - tr_err(&dwdma_tr, "dw_dma_get_data_size(): xrun detected"); + tr_err("dw_dma_get_data_size(): xrun detected"); return -ENODATA; } #endif diff --git a/src/drivers/dw/ssi-spi.c b/src/drivers/dw/ssi-spi.c index 967dd6abaeb4..a28525df7e36 100644 --- a/src/drivers/dw/ssi-spi.c +++ b/src/drivers/dw/ssi-spi.c @@ -337,7 +337,7 @@ int spi_push(struct spi *spi, const void *data, size_t size) int ret; if (size > SPI_BUFFER_SIZE) { - tr_err(&ipc_tr, "ePs"); + tr_err("ePs"); return -ENOBUFS; } @@ -457,7 +457,7 @@ int spi_probe(struct spi *spi) spi->rx_buffer = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_DMA, SPI_BUFFER_SIZE); if (!spi->rx_buffer) { - tr_err(&ipc_tr, "eSp"); + tr_err("eSp"); return -ENOMEM; } @@ -466,7 +466,7 @@ int spi_probe(struct spi *spi) spi->buffer_size = SPI_BUFFER_SIZE; if (!spi->tx_buffer) { rfree(spi->rx_buffer); - tr_err(&ipc_tr, "eSp"); + tr_err("eSp"); return -ENOMEM; } diff --git a/src/drivers/generic/dummy-dma.c b/src/drivers/generic/dummy-dma.c index 173ca12deb27..1d61c5dda78f 100644 --- a/src/drivers/generic/dummy-dma.c +++ b/src/drivers/generic/dummy-dma.c @@ -242,7 +242,7 @@ static struct dma_chan_data *dummy_dma_channel_get(struct dma *dma, } } k_spin_unlock(&dma->lock, key); - tr_err(&ddma_tr, "dummy-dmac: %d no free channel", + tr_err("dummy-dmac: %d no free channel", dma->plat_data.id); return NULL; } @@ -340,7 +340,7 @@ static int dummy_dma_set_config(struct dma_chan_data *channel, key = k_spin_lock(&channel->dma->lock); if (!config->elem_array.count) { - tr_err(&ddma_tr, "dummy-dmac: %d channel %d no DMA descriptors", + tr_err("dummy-dmac: %d channel %d no DMA descriptors", channel->dma->plat_data.id, channel->index); @@ -353,7 +353,7 @@ static int dummy_dma_set_config(struct dma_chan_data *channel, if (config->direction != DMA_DIR_HMEM_TO_LMEM && config->direction != DMA_DIR_LMEM_TO_HMEM) { /* Shouldn't even happen though */ - tr_err(&ddma_tr, "dummy-dmac: %d channel %d invalid direction %d", + tr_err("dummy-dmac: %d channel %d invalid direction %d", channel->dma->plat_data.id, channel->index, config->direction); ret = -EINVAL; @@ -418,7 +418,7 @@ static int dummy_dma_probe(struct dma *dma) int i; if (dma->chan) { - tr_err(&ddma_tr, "dummy-dmac %d already created!", + tr_err("dummy-dmac %d already created!", dma->plat_data.id); return -EEXIST; /* already created */ } @@ -426,7 +426,7 @@ static int dummy_dma_probe(struct dma *dma) dma->chan = rzalloc(SOF_MEM_ZONE_RUNTIME_SHARED, 0, SOF_MEM_CAPS_RAM, dma->plat_data.channels * sizeof(dma->chan[0])); if (!dma->chan) { - tr_err(&ddma_tr, "dummy-dmac %d: Out of memory!", + tr_err("dummy-dmac %d: Out of memory!", dma->plat_data.id); return -ENOMEM; } @@ -435,7 +435,7 @@ static int dummy_dma_probe(struct dma *dma) dma->plat_data.channels * sizeof(chanp[0])); if (!chanp) { rfree(dma->chan); - tr_err(&ddma_tr, "dummy-dmac %d: Out of memory!", + tr_err("dummy-dmac %d: Out of memory!", dma->plat_data.id); dma->chan = NULL; return -ENOMEM; @@ -466,7 +466,7 @@ static int dummy_dma_probe(struct dma *dma) */ static int dummy_dma_remove(struct dma *dma) { - tr_dbg(&ddma_tr, "dummy_dma %d -> remove", dma->plat_data.id); + tr_dbg("dummy_dma %d -> remove", dma->plat_data.id); if (!dma->chan) return 0; @@ -499,7 +499,7 @@ static int dummy_dma_get_data_size(struct dma_chan_data *channel, *free = size; break; default: - tr_err(&ddma_tr, "get_data_size direction: %d", + tr_err("get_data_size direction: %d", channel->direction); return -EINVAL; } diff --git a/src/drivers/imx/edma.c b/src/drivers/imx/edma.c index f521877857e2..a6cf58885b29 100644 --- a/src/drivers/imx/edma.c +++ b/src/drivers/imx/edma.c @@ -101,19 +101,19 @@ static struct dma_chan_data *edma_channel_get(struct dma *dma, k_spinlock_key_t key; struct dma_chan_data *channel; - tr_dbg(&edma_tr, "EDMA: channel_get(%d)", req_chan); + tr_dbg("EDMA: channel_get(%d)", req_chan); key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { k_spin_unlock(&dma->lock, key); - tr_err(&edma_tr, "EDMA: Channel %d out of range", req_chan); + tr_err("EDMA: Channel %d out of range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { k_spin_unlock(&dma->lock, key); - tr_err(&edma_tr, "EDMA: Cannot reuse channel %d", req_chan); + tr_err("EDMA: Cannot reuse channel %d", req_chan); return NULL; } @@ -132,7 +132,7 @@ static void edma_channel_put(struct dma_chan_data *channel) /* Assuming channel is stopped, we thus don't need hardware to * do anything right now */ - tr_info(&edma_tr, "EDMA: channel_put(%d)", channel->index); + tr_info("EDMA: channel_put(%d)", channel->index); notifier_unregister_all(NULL, channel); @@ -144,7 +144,7 @@ static void edma_channel_put(struct dma_chan_data *channel) static int edma_start(struct dma_chan_data *channel) { - tr_info(&edma_tr, "EDMA: start(%d)", channel->index); + tr_info("EDMA: start(%d)", channel->index); if (channel->status != COMP_STATE_PREPARE && channel->status != COMP_STATE_PAUSED) @@ -160,7 +160,7 @@ static int edma_start(struct dma_chan_data *channel) static int edma_release(struct dma_chan_data *channel) { /* TODO actually handle pause/release properly? */ - tr_info(&edma_tr, "EDMA: release(%d)", channel->index); + tr_info("EDMA: release(%d)", channel->index); if (channel->status != COMP_STATE_PAUSED) return -EINVAL; @@ -172,7 +172,7 @@ static int edma_release(struct dma_chan_data *channel) static int edma_pause(struct dma_chan_data *channel) { /* TODO actually handle pause/release properly? */ - tr_info(&edma_tr, "EDMA: pause(%d)", channel->index); + tr_info("EDMA: pause(%d)", channel->index); if (channel->status != COMP_STATE_ACTIVE) return -EINVAL; @@ -187,7 +187,7 @@ static int edma_pause(struct dma_chan_data *channel) static int edma_stop(struct dma_chan_data *channel) { - tr_info(&edma_tr, "EDMA: stop(%d)", channel->index); + tr_info("EDMA: stop(%d)", channel->index); /* Validate state */ // TODO: Should we? switch (channel->status) { @@ -378,7 +378,7 @@ static int edma_set_config(struct dma_chan_data *channel, int16_t soff = 0; int16_t doff = 0; - tr_info(&edma_tr, "EDMA: set config"); + tr_info("EDMA: set config"); channel->is_scheduling_source = config->is_scheduling_source; channel->direction = config->direction; @@ -393,16 +393,16 @@ static int edma_set_config(struct dma_chan_data *channel, doff = config->dest_width; break; default: - tr_err(&edma_tr, "edma_set_config() unsupported config direction"); + tr_err("edma_set_config() unsupported config direction"); return -EINVAL; } if (!config->cyclic) { - tr_err(&edma_tr, "EDMA: Only cyclic configurations are supported!"); + tr_err("EDMA: Only cyclic configurations are supported!"); return -EINVAL; } if (config->scatter) { - tr_err(&edma_tr, "EDMA: scatter enabled, that is not supported for now!"); + tr_err("EDMA: scatter enabled, that is not supported for now!"); return -EINVAL; } @@ -417,16 +417,16 @@ static int edma_probe(struct dma *dma) int channel; if (dma->chan) { - tr_err(&edma_tr, "EDMA: Repeated probe"); + tr_err("EDMA: Repeated probe"); return -EEXIST; } - tr_info(&edma_tr, "EDMA: probe"); + tr_info("EDMA: probe"); dma->chan = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM, dma->plat_data.channels * sizeof(struct dma_chan_data)); if (!dma->chan) { - tr_err(&edma_tr, "EDMA: Probe failure, unable to allocate channel descriptors"); + tr_err("EDMA: Probe failure, unable to allocate channel descriptors"); return -ENOMEM; } for (channel = 0; channel < dma->plat_data.channels; channel++) { @@ -441,7 +441,7 @@ static int edma_remove(struct dma *dma) int channel; if (!dma->chan) { - tr_err(&edma_tr, "EDMA: remove called without probe, it's a no-op"); + tr_err("EDMA: remove called without probe, it's a no-op"); return 0; } for (channel = 0; channel < dma->plat_data.channels; channel++) { @@ -543,7 +543,7 @@ static int edma_get_data_size(struct dma_chan_data *channel, *avail = ABS(capture_data_size) / 2; break; default: - tr_err(&edma_tr, "edma_get_data_size() unsupported direction %d", + tr_err("edma_get_data_size() unsupported direction %d", channel->direction); return -EINVAL; } diff --git a/src/drivers/imx/interrupt-irqsteer.c b/src/drivers/imx/interrupt-irqsteer.c index 2ef4f2f89e11..bb41e87c9925 100644 --- a/src/drivers/imx/interrupt-irqsteer.c +++ b/src/drivers/imx/interrupt-irqsteer.c @@ -315,7 +315,7 @@ static inline void handle_irq_batch(struct irq_cascade_desc *cascade, k_spin_unlock(&cascade->lock, key); if (!handled) { - tr_err(&irq_i_tr, "irq_handler(): nobody cared, bit %d", + tr_err("irq_handler(): nobody cared, bit %d", bit); /* Mask this interrupt so it won't happen again */ irqstr_mask_int(line_index * IRQSTR_IRQS_PER_LINE + bit); @@ -350,7 +350,7 @@ static inline void irq_handler(void *data, uint32_t line_index) if (!--tries) { tries = IRQ_MAX_TRIES; - tr_err(&irq_i_tr, "irq_handler(): IRQ storm, status 0x%08x%08x", + tr_err("irq_handler(): IRQ storm, status 0x%08x%08x", (uint32_t)(status >> 32), (uint32_t)status); } } diff --git a/src/drivers/imx/ipc.c b/src/drivers/imx/ipc.c index 44fd6d1e50b8..7d8615cead98 100644 --- a/src/drivers/imx/ipc.c +++ b/src/drivers/imx/ipc.c @@ -59,7 +59,7 @@ static void irq_handler(void *arg) /* Interrupt arrived, check src */ status = imx_mu_read(IMX_MU_xSR(IMX_MU_VERSION, IMX_MU_GSR)); - tr_dbg(&ipc_tr, "ipc: irq isr 0x%x", status); + tr_dbg("ipc: irq isr 0x%x", status); /* reply message(done) from host */ if (status & IMX_MU_xSR_GIPn(IMX_MU_VERSION, 1)) { @@ -116,7 +116,7 @@ void ipc_platform_complete_cmd(struct ipc *ipc) 0, 100); if (ret < 0) - tr_err(&ipc_tr, "failed poll for GIR0"); + tr_err("failed poll for GIR0"); ret = poll_for_register_delay(MU_BASE + IMX_MU_xCR(IMX_MU_VERSION, IMX_MU_GCR), @@ -124,7 +124,7 @@ void ipc_platform_complete_cmd(struct ipc *ipc) 0, 100); if (ret < 0) - tr_err(&ipc_tr, "failed poll for GIR1"); + tr_err("failed poll for GIR1"); /* request GP interrupt #0 - notify host that reply is ready */ imx_mu_xcr_rmw(IMX_MU_VERSION, IMX_MU_GCR, IMX_MU_xCR_GIRn(IMX_MU_VERSION, 0), 0); @@ -160,7 +160,7 @@ int ipc_platform_send_msg(const struct ipc_msg *msg) /* now send the message */ mailbox_dspbox_write(0, msg->tx_data, msg->tx_size); - tr_dbg(&ipc_tr, "ipc: msg tx -> 0x%x", msg->header); + tr_dbg("ipc: msg tx -> 0x%x", msg->header); ipc->is_notification_pending = true; @@ -190,7 +190,7 @@ int platform_ipc_init(struct ipc *ipc) iipc = rzalloc(SOF_MEM_ZONE_SYS, 0, SOF_MEM_CAPS_RAM, sizeof(*iipc)); if (!iipc) { - tr_err(&ipc_tr, "Unable to allocate IPC private data"); + tr_err("Unable to allocate IPC private data"); return -ENOMEM; } ipc_set_drvdata(ipc, iipc); @@ -212,7 +212,7 @@ int platform_ipc_init(struct ipc *ipc) iipc->dh_buffer.dmac = dma_get(DMA_DIR_HMEM_TO_LMEM, 0, DMA_DEV_HOST, DMA_ACCESS_SHARED); if (!iipc->dh_buffer.dmac) { - tr_err(&ipc_tr, "Unable to find DMA for host page table"); + tr_err("Unable to find DMA for host page table"); sof_panic(SOF_IPC_PANIC_IPC); } #endif diff --git a/src/drivers/imx/sdma.c b/src/drivers/imx/sdma.c index 902bb03eb245..f4f97e904420 100644 --- a/src/drivers/imx/sdma.c +++ b/src/drivers/imx/sdma.c @@ -87,7 +87,7 @@ struct sdma_pdata { static void sdma_set_overrides(struct dma_chan_data *channel, bool event_override, bool host_override) { - tr_dbg(&sdma_tr, "sdma_set_overrides(%d, %d)", event_override, + tr_dbg("sdma_set_overrides(%d, %d)", event_override, host_override); dma_reg_update_bits(channel->dma, SDMA_EVTOVR, BIT(channel->index), event_override ? BIT(channel->index) : 0); @@ -112,7 +112,7 @@ static int sdma_run_c0(struct dma *dma, uint8_t cmd, uint32_t buf_addr, struct sdma_chan *c0data = dma_chan_get_data(c0); int ret; - tr_dbg(&sdma_tr, "sdma_run_c0 cmd %d buf_addr 0x%08x sdma_addr 0x%04x count %d", + tr_dbg("sdma_run_c0 cmd %d buf_addr 0x%08x sdma_addr 0x%04x count %d", cmd, buf_addr, sdma_addr, count); c0data->desc[0].config = SDMA_BD_CMD(cmd) | SDMA_BD_COUNT(count) @@ -144,7 +144,7 @@ static int sdma_run_c0(struct dma *dma, uint8_t cmd, uint32_t buf_addr, ret = 0; if (ret < 0) - tr_err(&sdma_tr, "SDMA channel 0 timed out"); + tr_err("SDMA channel 0 timed out"); /* Switch to dynamic context switch mode if needed. This saves power. */ if ((dma_reg_read(dma, SDMA_CONFIG) & SDMA_CONFIG_CSM_MSK) == @@ -152,7 +152,7 @@ static int sdma_run_c0(struct dma *dma, uint8_t cmd, uint32_t buf_addr, dma_reg_update_bits(dma, SDMA_CONFIG, SDMA_CONFIG_CSM_MSK, SDMA_CONFIG_CSM_DYN); - tr_dbg(&sdma_tr, "sdma_run_c0 done, ret = %d", ret); + tr_dbg("sdma_run_c0 done, ret = %d", ret); return ret; } @@ -163,12 +163,12 @@ static int sdma_register_init(struct dma *dma) struct sdma_pdata *pdata = dma_get_drvdata(dma); int i; - tr_dbg(&sdma_tr, "sdma_register_init"); + tr_dbg("sdma_register_init"); dma_reg_write(dma, SDMA_RESET, 1); /* Wait for 10us */ ret = poll_for_register_delay(dma_base(dma) + SDMA_RESET, 1, 0, 1000); if (ret < 0) { - tr_err(&sdma_tr, "SDMA reset error, base address %p", + tr_err("SDMA reset error, base address %p", (void *)dma_base(dma)); return ret; } @@ -218,7 +218,7 @@ static void sdma_init_c0(struct dma *dma) struct sdma_pdata *sdma_pdata = dma_get_drvdata(dma); struct sdma_chan *pdata = &sdma_pdata->chan_pdata[0]; - tr_dbg(&sdma_tr, "sdma_init_c0"); + tr_dbg("sdma_init_c0"); c0->status = COMP_STATE_READY; /* Reset channel 0 private data */ @@ -235,14 +235,14 @@ static int sdma_boot(struct dma *dma) { int ret; - tr_dbg(&sdma_tr, "sdma_boot"); + tr_dbg("sdma_boot"); ret = sdma_register_init(dma); if (ret < 0) return ret; sdma_init_c0(dma); - tr_dbg(&sdma_tr, "sdma_boot done"); + tr_dbg("sdma_boot done"); return 0; } @@ -253,7 +253,7 @@ static int sdma_upload_context(struct dma_chan_data *chan) /* Ensure context is ready for upload */ dcache_writeback_region(pdata->ctx, sizeof(*pdata->ctx)); - tr_dbg(&sdma_tr, "sdma_upload_context for channel %d", chan->index); + tr_dbg("sdma_upload_context for channel %d", chan->index); /* Last parameters are unneeded for this command and are ignored; * set to 0. @@ -285,17 +285,17 @@ static int sdma_probe(struct dma *dma) struct sdma_pdata *pdata; if (dma->chan) { - tr_err(&sdma_tr, "SDMA: Repeated probe"); + tr_err("SDMA: Repeated probe"); return -EEXIST; } - tr_info(&sdma_tr, "SDMA: probe"); + tr_info("SDMA: probe"); dma->chan = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM, dma->plat_data.channels * sizeof(struct dma_chan_data)); if (!dma->chan) { - tr_err(&sdma_tr, "SDMA: Probe failure, unable to allocate channel descriptors"); + tr_err("SDMA: Probe failure, unable to allocate channel descriptors"); return -ENOMEM; } @@ -304,7 +304,7 @@ static int sdma_probe(struct dma *dma) if (!pdata) { rfree(dma->chan); dma->chan = NULL; - tr_err(&sdma_tr, "SDMA: Probe failure, unable to allocate private data"); + tr_err("SDMA: Probe failure, unable to allocate private data"); return -ENOMEM; } dma_set_drvdata(dma, pdata); @@ -319,7 +319,7 @@ static int sdma_probe(struct dma *dma) sizeof(struct sdma_chan)); if (!pdata->chan_pdata) { ret = -ENOMEM; - tr_err(&sdma_tr, "SDMA: probe: out of memory"); + tr_err("SDMA: probe: out of memory"); goto err; } @@ -328,7 +328,7 @@ static int sdma_probe(struct dma *dma) sizeof(struct sdma_context)); if (!pdata->contexts) { ret = -ENOMEM; - tr_err(&sdma_tr, "SDMA: probe: unable to allocate contexts"); + tr_err("SDMA: probe: unable to allocate contexts"); goto err; } @@ -337,13 +337,13 @@ static int sdma_probe(struct dma *dma) sizeof(struct sdma_ccb)); if (!pdata->ccb_array) { ret = -ENOMEM; - tr_err(&sdma_tr, "SDMA: probe: unable to allocate CCBs"); + tr_err("SDMA: probe: unable to allocate CCBs"); goto err; } ret = sdma_boot(dma); if (ret < 0) { - tr_err(&sdma_tr, "SDMA: Unable to boot"); + tr_err("SDMA: Unable to boot"); goto err; } @@ -352,7 +352,7 @@ static int sdma_probe(struct dma *dma) RAM_CODE_START_ADDR, RAM_CODE_SIZE * sizeof(short)); if (ret < 0) { - tr_err(&sdma_tr, "SDMA: Failed to load firmware"); + tr_err("SDMA: Failed to load firmware"); goto err; } #endif @@ -379,11 +379,11 @@ static int sdma_remove(struct dma *dma) struct sdma_pdata *pdata = dma_get_drvdata(dma); if (!dma->chan) { - tr_err(&sdma_tr, "SDMA: Remove called without probe, that's a noop"); + tr_err("SDMA: Remove called without probe, that's a noop"); return 0; } - tr_dbg(&sdma_tr, "sdma_remove"); + tr_dbg("sdma_remove"); /* Prevent all channels except channel 0 from running */ dma_reg_write(dma, SDMA_HOSTOVR, 1); @@ -414,7 +414,7 @@ static struct dma_chan_data *sdma_channel_get(struct dma *dma, int i; /* Ignoring channel 0; let's just allocate a free channel */ - tr_dbg(&sdma_tr, "sdma_channel_get"); + tr_dbg("sdma_channel_get"); for (i = 1; i < dma->plat_data.channels; i++) { channel = &dma->chan[i]; if (channel->status != COMP_STATE_INIT) @@ -435,7 +435,7 @@ static struct dma_chan_data *sdma_channel_get(struct dma *dma, sdma_set_overrides(channel, false, false); return channel; } - tr_err(&sdma_tr, "sdma no channel free"); + tr_err("sdma no channel free"); return NULL; } @@ -443,7 +443,7 @@ static void sdma_enable_event(struct dma_chan_data *channel, int eventnum) { struct sdma_chan *pdata = dma_chan_get_data(channel); - tr_dbg(&sdma_tr, "sdma_enable_event(%d, %d)", channel->index, eventnum); + tr_dbg("sdma_enable_event(%d, %d)", channel->index, eventnum); if (eventnum < 0 || eventnum > SDMA_HWEVENTS_COUNT) return; /* No change if request is invalid */ @@ -461,7 +461,7 @@ static void sdma_enable_event(struct dma_chan_data *channel, int eventnum) static void sdma_disable_event(struct dma_chan_data *channel, int eventnum) { - tr_dbg(&sdma_tr, "sdma_disable_event(%d, %d)", channel->index, eventnum); + tr_dbg("sdma_disable_event(%d, %d)", channel->index, eventnum); if (eventnum < 0 || eventnum > SDMA_HWEVENTS_COUNT) return; /* No change if request is invalid */ @@ -476,7 +476,7 @@ static void sdma_channel_put(struct dma_chan_data *channel) if (channel->status == COMP_STATE_INIT) return; /* Channel was already free */ - tr_dbg(&sdma_tr, "sdma_channel_put(%d)", channel->index); + tr_dbg("sdma_channel_put(%d)", channel->index); dma_interrupt_legacy(channel, DMA_IRQ_CLEAR); sdma_disable_event(channel, pdata->hw_event); @@ -486,7 +486,7 @@ static void sdma_channel_put(struct dma_chan_data *channel) static int sdma_start(struct dma_chan_data *channel) { - tr_dbg(&sdma_tr, "sdma_start(%d)", channel->index); + tr_dbg("sdma_start(%d)", channel->index); if (channel->status != COMP_STATE_PREPARE && channel->status != COMP_STATE_PAUSED) @@ -508,7 +508,7 @@ static int sdma_stop(struct dma_chan_data *channel) channel->status = COMP_STATE_READY; - tr_dbg(&sdma_tr, "sdma_stop(%d)", channel->index); + tr_dbg("sdma_stop(%d)", channel->index); sdma_disable_channel(channel->dma, channel->index); @@ -554,7 +554,7 @@ static int sdma_copy(struct dma_chan_data *channel, int bytes, uint32_t flags) }; int idx; - tr_dbg(&sdma_tr, "sdma_copy"); + tr_dbg("sdma_copy"); idx = (pdata->next_bd + 1) % 2; pdata->next_bd = idx; @@ -582,7 +582,7 @@ static int sdma_status(struct dma_chan_data *channel, struct sdma_chan *pdata = dma_chan_get_data(channel); struct sdma_bd *bd; - tr_dbg(&sdma_tr, "sdma_status"); + tr_dbg("sdma_status"); if (channel->status == COMP_STATE_INIT) return -EINVAL; status->state = channel->status; @@ -682,7 +682,7 @@ static int sdma_read_config(struct dma_chan_data *channel, pdata->sdma_chan_type = SDMA_CHAN_TYPE_AP2AP; /* Fallthrough, TODO: implement to support m2m */ default: - tr_err(&sdma_tr, "sdma_set_config: Unsupported direction %d", + tr_err("sdma_set_config: Unsupported direction %d", config->direction); return -EINVAL; } @@ -690,13 +690,13 @@ static int sdma_read_config(struct dma_chan_data *channel, for (i = 0; i < config->elem_array.count; i++) { if (config->direction == DMA_DIR_MEM_TO_DEV && pdata->fifo_paddr != config->elem_array.elems[i].dest) { - tr_err(&sdma_tr, "sdma_read_config: FIFO changes address!"); + tr_err("sdma_read_config: FIFO changes address!"); return -EINVAL; } if (config->direction == DMA_DIR_DEV_TO_MEM && pdata->fifo_paddr != config->elem_array.elems[i].src) { - tr_err(&sdma_tr, "sdma_read_config: FIFO changes address!"); + tr_err("sdma_read_config: FIFO changes address!"); return -EINVAL; } @@ -704,7 +704,7 @@ static int sdma_read_config(struct dma_chan_data *channel, /* Future improvement: Create multiple BDs so as to * support this situation */ - tr_err(&sdma_tr, "sdma_set_config: elem transfers too much: %d bytes", + tr_err("sdma_set_config: elem transfers too much: %d bytes", config->elem_array.elems[i].size); return -EINVAL; } @@ -741,12 +741,12 @@ static int sdma_prep_desc(struct dma_chan_data *channel, /* Validate requested configuration */ if (config->elem_array.count > SDMA_MAX_BDS) { - tr_err(&sdma_tr, "sdma_set_config: Unable to handle %d descriptors", + tr_err("sdma_set_config: Unable to handle %d descriptors", config->elem_array.count); return -EINVAL; } if (config->elem_array.count <= 0) { - tr_err(&sdma_tr, "sdma_set_config: Invalid descriptor count: %d", + tr_err("sdma_set_config: Invalid descriptor count: %d", config->elem_array.count); return -EINVAL; } @@ -826,7 +826,7 @@ static int sdma_prep_desc(struct dma_chan_data *channel, /* This case doesn't happen; we need to assign the other cases * for AP2MCU and MCU2AP */ - tr_err(&sdma_tr, "Unexpected SDMA error"); + tr_err("Unexpected SDMA error"); return -EINVAL; } @@ -869,7 +869,7 @@ static int sdma_set_config(struct dma_chan_data *channel, struct sdma_chan *pdata = dma_chan_get_data(channel); int ret; - tr_dbg(&sdma_tr, "sdma_set_config channel %d", channel->index); + tr_dbg("sdma_set_config channel %d", channel->index); ret = sdma_read_config(channel, config); if (ret < 0) @@ -888,11 +888,11 @@ static int sdma_set_config(struct dma_chan_data *channel, /* Upload context */ ret = sdma_upload_context(channel); if (ret < 0) { - tr_err(&sdma_tr, "Unable to upload context, bailing"); + tr_err("Unable to upload context, bailing"); return ret; } - tr_dbg(&sdma_tr, "SDMA context uploaded"); + tr_dbg("SDMA context uploaded"); /* Context uploaded, we can set up events now */ sdma_enable_event(channel, pdata->hw_event); @@ -926,7 +926,7 @@ static int sdma_interrupt(struct dma_chan_data *channel, enum dma_irq_cmd cmd) */ return 0; default: - tr_err(&sdma_tr, "sdma_interrupt unknown cmd %d", cmd); + tr_err("sdma_interrupt unknown cmd %d", cmd); return -EINVAL; } } @@ -967,10 +967,10 @@ static int sdma_get_data_size(struct dma_chan_data *channel, uint32_t *avail, uint32_t result_data = 0; int i; - tr_dbg(&sdma_tr, "sdma_get_data_size(%d)", channel->index); + tr_dbg("sdma_get_data_size(%d)", channel->index); if (channel->index == 0) { /* Channel 0 shouldn't have this called anyway */ - tr_err(&sdma_tr, "Please do not call get_data_size on SDMA channel 0!"); + tr_err("Please do not call get_data_size on SDMA channel 0!"); *avail = *free = 0; return -EINVAL; } @@ -993,7 +993,7 @@ static int sdma_get_data_size(struct dma_chan_data *channel, uint32_t *avail, *avail = result_data; break; default: - tr_err(&sdma_tr, "sdma_get_data_size channel invalid direction"); + tr_err("sdma_get_data_size channel invalid direction"); return -EINVAL; } return 0; diff --git a/src/drivers/interrupt.c b/src/drivers/interrupt.c index 6b8eefdae86d..55c89e0b706e 100644 --- a/src/drivers/interrupt.c +++ b/src/drivers/interrupt.c @@ -68,7 +68,7 @@ int interrupt_cascade_register(const struct irq_cascade_tmpl *tmpl) cascade = &(*cascade)->next) { if (!rstrcmp((*cascade)->name, tmpl->name)) { ret = -EEXIST; - tr_err(&irq_tr, "cascading IRQ controller name duplication!"); + tr_err("cascading IRQ controller name duplication!"); goto unlock; } @@ -114,7 +114,7 @@ int interrupt_get_irq(unsigned int irq, const char *name) /* If a name is specified, irq must be <= PLATFORM_IRQ_CHILDREN */ if (irq >= PLATFORM_IRQ_CHILDREN) { - tr_err(&irq_tr, "IRQ %d invalid as a child interrupt!", + tr_err("IRQ %d invalid as a child interrupt!", irq); return -EINVAL; } @@ -194,7 +194,7 @@ static int irq_register_child(struct irq_cascade_desc *cascade, int irq, if (child->handler_arg == arg) { - tr_err(&irq_tr, "IRQ 0x%x handler argument re-used!", + tr_err("IRQ 0x%x handler argument re-used!", irq); ret = -EEXIST; goto out; @@ -345,7 +345,7 @@ static uint32_t irq_disable_child(struct irq_cascade_desc *cascade, int irq, } if (!child->enable_count[child_idx]) { - tr_err(&irq_tr, "IRQ %x unbalanced interrupt_disable()", + tr_err("IRQ %x unbalanced interrupt_disable()", irq); } else if (!--child->enable_count[child_idx]) { /* disable the child interrupt */ diff --git a/src/drivers/mediatek/afe/afe-drv.c b/src/drivers/mediatek/afe/afe-drv.c index 9fc9995d20f5..b21eb5021358 100644 --- a/src/drivers/mediatek/afe/afe-drv.c +++ b/src/drivers/mediatek/afe/afe-drv.c @@ -34,20 +34,20 @@ DECLARE_TR_CTX(afedrv_tr, SOF_UUID(afedrv_uuid), LOG_LEVEL_INFO); static inline void afe_reg_read(struct mtk_base_afe *afe, uint32_t reg, uint32_t *value) { *value = io_reg_read((uint32_t)((char *)afe->base + reg)); - tr_dbg(&afedrv_tr, "r_reg:0x%x, value:0x%x\n", reg, *value); + tr_dbg("r_reg:0x%x, value:0x%x\n", reg, *value); } static inline void afe_reg_write(struct mtk_base_afe *afe, uint32_t reg, uint32_t value) { io_reg_write((uint32_t)((char *)afe->base + reg), value); - tr_dbg(&afedrv_tr, "w_reg:0x%x, value:0x%x\n", reg, value); + tr_dbg("w_reg:0x%x, value:0x%x\n", reg, value); } static inline void afe_reg_update_bits(struct mtk_base_afe *afe, uint32_t reg, uint32_t mask, uint32_t value) { io_reg_update_bits((uint32_t)((char *)afe->base + reg), mask, value); - tr_dbg(&afedrv_tr, "u_reg:0x%x, value:0x%x\n", reg, value); + tr_dbg("u_reg:0x%x, value:0x%x\n", reg, value); } static int afe_memif_set_channel(struct mtk_base_afe *afe, int id, unsigned int channel) @@ -90,7 +90,7 @@ static int afe_memif_set_rate(struct mtk_base_afe *afe, int id, unsigned int rat fs = afe->afe_fs(rate, memif->data->id); if (fs < 0) { - tr_err(&afedrv_tr, "invalid fs:%d\n", fs); + tr_err("invalid fs:%d\n", fs); return -EINVAL; } @@ -120,7 +120,7 @@ static int afe_memif_set_format(struct mtk_base_afe *afe, int id, unsigned int f hd_audio = 1; break; default: - tr_err(&afedrv_tr, "not support format:%u\n", format); + tr_err("not support format:%u\n", format); return -EINVAL; } @@ -166,7 +166,7 @@ int afe_memif_set_addr(struct mtk_base_afe *afe, int id, unsigned int dma_addr, memif->afe_addr = phys_buf_addr; memif->buffer_size = dma_bytes; - tr_dbg(&afedrv_tr, "dma_addr:0x%x, size:%u\n", dma_addr, dma_bytes); + tr_dbg("dma_addr:0x%x, size:%u\n", dma_addr, dma_bytes); /* start */ afe_reg_write(afe, memif->data->reg_ofs_base, phys_buf_addr); /* end */ @@ -251,14 +251,14 @@ int afe_dai_set_config(struct mtk_base_afe *afe, int id, unsigned int channel, u if (id >= afe->dais_size) return -EINVAL; - tr_info(&afedrv_tr, "afe_dai_set_config, id:%d\n", id); + tr_info("afe_dai_set_config, id:%d\n", id); dai = &afe->dais[id]; dai->channel = channel; dai->format = format; dai->rate = rate; - tr_info(&afedrv_tr, "dai:%d set: format:%d, rate:%d, channel:%d\n", id, format, rate, + tr_info("dai:%d set: format:%d, rate:%d, channel:%d\n", id, format, rate, channel); return 0; @@ -271,10 +271,10 @@ int afe_dai_get_config(struct mtk_base_afe *afe, int id, unsigned int *channel, /* TODO 1. if need use dai->id to search target dai */ /* TODO 1. if need a status to control the dai status */ - tr_info(&afedrv_tr, "afe_dai_get_config, id:%d\n", id); + tr_info("afe_dai_get_config, id:%d\n", id); if (id >= afe->dais_size || id < 0) { - tr_err(&afedrv_tr, "afe_dai_get_config , invalid id:%d\n", id); + tr_err("afe_dai_get_config , invalid id:%d\n", id); return -EINVAL; } dai = &afe->dais[id]; @@ -283,7 +283,7 @@ int afe_dai_get_config(struct mtk_base_afe *afe, int id, unsigned int *channel, *rate = dai->rate; *format = dai->format; - tr_info(&afedrv_tr, "dai:%d get: format:%d, rate:%d, channel:%d\n", id, *format, *rate, + tr_info("dai:%d get: format:%d, rate:%d, channel:%d\n", id, *format, *rate, *channel); return 0; @@ -359,7 +359,7 @@ int afe_probe(struct mtk_base_afe *afe) afe->irq_fs = platform->irq_fs; if (!afe->afe_fs) return -EINVAL; - tr_dbg(&afedrv_tr, "afe_base:0x%x\n", afe->base); + tr_dbg("afe_base:0x%x\n", afe->base); /* TODO how to get the memif number, how to sync with dmac lib */ afe->memifs_size = platform->memif_size; afe->memif = rzalloc(SOF_MEM_ZONE_RUNTIME_SHARED, 0, SOF_MEM_CAPS_RAM, @@ -407,7 +407,7 @@ void afe_remove(struct mtk_base_afe *afe) if (afe->ref_count < 0) { afe->ref_count = 0; - tr_dbg(&afedrv_tr, "afe ref_count < 0, :%d\n", afe->ref_count); + tr_dbg("afe ref_count < 0, :%d\n", afe->ref_count); return; } diff --git a/src/drivers/mediatek/afe/afe-memif.c b/src/drivers/mediatek/afe/afe-memif.c index 45529f8f1fcf..52540b3eca6e 100644 --- a/src/drivers/mediatek/afe/afe-memif.c +++ b/src/drivers/mediatek/afe/afe-memif.c @@ -58,19 +58,19 @@ static struct dma_chan_data *memif_channel_get(struct dma *dma, unsigned int req k_spinlock_key_t key; struct dma_chan_data *channel; - tr_dbg(&memif_tr, "MEMIF: channel_get(%d)", req_chan); + tr_dbg("MEMIF: channel_get(%d)", req_chan); key = k_spin_lock(&dma->lock); if (req_chan >= dma->plat_data.channels) { k_spin_unlock(&dma->lock, key); - tr_err(&memif_tr, "MEMIF: Channel %d out of range", req_chan); + tr_err("MEMIF: Channel %d out of range", req_chan); return NULL; } channel = &dma->chan[req_chan]; if (channel->status != COMP_STATE_INIT) { k_spin_unlock(&dma->lock, key); - tr_err(&memif_tr, "MEMIF: Cannot reuse channel %d", req_chan); + tr_err("MEMIF: Cannot reuse channel %d", req_chan); return NULL; } @@ -89,7 +89,7 @@ static void memif_channel_put(struct dma_chan_data *channel) /* Assuming channel is stopped, we thus don't need hardware to * do anything right now */ - tr_info(&memif_tr, "MEMIF: channel_put(%d)", channel->index); + tr_info("MEMIF: channel_put(%d)", channel->index); notifier_unregister_all(NULL, channel); @@ -103,7 +103,7 @@ static int memif_start(struct dma_chan_data *channel) { struct afe_memif_dma *memif = dma_chan_get_data(channel); - tr_info(&memif_tr, "MEMIF:%d start(%d), channel_status:%d", memif->memif_id, channel->index, + tr_info("MEMIF:%d start(%d), channel_status:%d", memif->memif_id, channel->index, channel->status); if (channel->status != COMP_STATE_PREPARE && channel->status != COMP_STATE_SUSPEND) @@ -123,7 +123,7 @@ static int memif_release(struct dma_chan_data *channel) struct afe_memif_dma *memif = dma_chan_get_data(channel); /* TODO actually handle pause/release properly? */ - tr_info(&memif_tr, "MEMIF: release(%d)", channel->index); + tr_info("MEMIF: release(%d)", channel->index); if (channel->status != COMP_STATE_PAUSED) return -EINVAL; @@ -144,7 +144,7 @@ static int memif_pause(struct dma_chan_data *channel) struct afe_memif_dma *memif = dma_chan_get_data(channel); /* TODO actually handle pause/release properly? */ - tr_info(&memif_tr, "MEMIF: pause(%d)", channel->index); + tr_info("MEMIF: pause(%d)", channel->index); if (channel->status != COMP_STATE_ACTIVE) return -EINVAL; @@ -159,7 +159,7 @@ static int memif_stop(struct dma_chan_data *channel) { struct afe_memif_dma *memif = dma_chan_get_data(channel); - tr_info(&memif_tr, "MEMIF: stop(%d)", channel->index); + tr_info("MEMIF: stop(%d)", channel->index); /* Validate state */ /* TODO: Should we? */ switch (channel->status) { @@ -193,7 +193,7 @@ static int memif_copy(struct dma_chan_data *channel, int bytes, uint32_t flags) memif->wptr = (memif->wptr + bytes) % memif->dma_size; else memif->rptr = (memif->rptr + bytes) % memif->dma_size; - tr_dbg(&memif_tr, "memif_copy: wptr:%u, rptr:%u", memif->wptr, memif->rptr); + tr_dbg("memif_copy: wptr:%u, rptr:%u", memif->wptr, memif->rptr); notifier_event(channel, NOTIFIER_ID_DMA_COPY, NOTIFIER_TARGET_CORE_LOCAL, &next, sizeof(next)); @@ -242,7 +242,7 @@ static int memif_set_config(struct dma_chan_data *channel, struct dma_sg_config channel->direction = config->direction; direction = afe_memif_get_direction(memif->afe, memif->memif_id); - tr_info(&memif_tr, "memif_set_config, direction:%d, afe_dir:%d", config->direction, + tr_info("memif_set_config, direction:%d, afe_dir:%d", config->direction, direction); switch (config->direction) { @@ -261,10 +261,10 @@ static int memif_set_config(struct dma_chan_data *channel, struct dma_sg_config dai_id = (int)AFE_HS_GET_DAI(config->src_dev); irq_id = (int)AFE_HS_GET_IRQ(config->src_dev); dma_addr = (int)config->elem_array.elems[0].dest; - tr_dbg(&memif_tr, "capture: dai_id:%d, dma_addr:%u\n", dai_id, dma_addr); + tr_dbg("capture: dai_id:%d, dma_addr:%u\n", dai_id, dma_addr); break; default: - tr_err(&memif_tr, "afe_memif_set_config() unsupported config direction"); + tr_err("afe_memif_set_config() unsupported config direction"); return -EINVAL; } @@ -272,11 +272,11 @@ static int memif_set_config(struct dma_chan_data *channel, struct dma_sg_config dma_size += (int)config->elem_array.elems[i].size; if (!config->cyclic) { - tr_err(&memif_tr, "afe-memif: Only cyclic configurations are supported!"); + tr_err("afe-memif: Only cyclic configurations are supported!"); return -ENOTSUP; } if (config->scatter) { - tr_err(&memif_tr, "afe-memif: scatter enabled, that is not supported for now!"); + tr_err("afe-memif: scatter enabled, that is not supported for now!"); return -ENOTSUP; } @@ -304,7 +304,7 @@ static int memif_set_config(struct dma_chan_data *channel, struct dma_sg_config memif->format = SOF_IPC_FRAME_S32_LE; break; default: - tr_err(&memif_tr, "afe-memif: not support bitwidth %u!", config->src_width); + tr_err("afe-memif: not support bitwidth %u!", config->src_width); return -ENOTSUP; } @@ -327,7 +327,7 @@ static int memif_remove(struct dma *dma) struct mtk_base_afe *afe = afe_get(); if (!dma->chan) { - tr_err(&memif_tr, "MEMIF: remove called without probe, it's a no-op"); + tr_err("MEMIF: remove called without probe, it's a no-op"); return 0; } for (channel = 0; channel < dma->plat_data.channels; channel++) { @@ -350,21 +350,21 @@ static int memif_probe(struct dma *dma) struct afe_memif_dma *memif; if (!dma || dma->chan) { - tr_err(&memif_tr, "MEMIF: Repeated probe"); + tr_err("MEMIF: Repeated probe"); return -EEXIST; } /* do afe driver probe */ ret = afe_probe(afe); if (ret < 0) { - tr_err(&memif_tr, "MEMIF: afe_probe fail:%d", ret); + tr_err("MEMIF: afe_probe fail:%d", ret); return ret; } dma->chan = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM, dma->plat_data.channels * sizeof(struct dma_chan_data)); if (!dma->chan) { - tr_err(&memif_tr, "MEMIF: Probe failure, unable to allocate channel descriptors"); + tr_err("MEMIF: Probe failure, unable to allocate channel descriptors"); return -ENOMEM; } @@ -376,7 +376,7 @@ static int memif_probe(struct dma *dma) memif = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(struct afe_memif_dma)); if (!memif) { - tr_err(&memif_tr, "afe-memif: %d channel %d private data alloc failed", + tr_err("afe-memif: %d channel %d private data alloc failed", dma->plat_data.id, channel); goto out; } @@ -455,9 +455,9 @@ static int memif_get_data_size(struct dma_chan_data *channel, uint32_t *avail, u /* update hw pointer from afe memif */ hw_ptr = afe_memif_get_cur_position(memif->afe, memif->memif_id); - tr_dbg(&memif_tr, "get_pos:0x%x, base:0x%x, dir:%d", hw_ptr, memif->dma_base, + tr_dbg("get_pos:0x%x, base:0x%x, dir:%d", hw_ptr, memif->dma_base, memif->direction); - tr_dbg(&memif_tr, "dma_size:%u, period_size:%d", memif->dma_size, memif->period_size); + tr_dbg("dma_size:%u, period_size:%d", memif->dma_size, memif->period_size); if (!hw_ptr) return -EINVAL; @@ -476,7 +476,7 @@ static int memif_get_data_size(struct dma_chan_data *channel, uint32_t *avail, u *avail = *avail / memif->period_size * memif->period_size; *free = memif->dma_size - *avail; - tr_dbg(&memif_tr, "r:0x%x, w:0x%x, avail:%u, free:%u ", + tr_dbg("r:0x%x, w:0x%x, avail:%u, free:%u ", memif->rptr, memif->wptr, *avail, *free); return 0; diff --git a/src/drivers/mediatek/afe/mt8186/afe-sgen.c b/src/drivers/mediatek/afe/mt8186/afe-sgen.c index 6f8225e0b4a8..95b4601d6cee 100644 --- a/src/drivers/mediatek/afe/mt8186/afe-sgen.c +++ b/src/drivers/mediatek/afe/mt8186/afe-sgen.c @@ -156,9 +156,9 @@ static uint32_t mt8186_sinegen_timing(uint32_t rate) break; default: sinegen_timing = SGEN_CH_TIMING_48K; - tr_err(&sgen_tr, "invalid rate %d, set default 48k ", rate); + tr_err("invalid rate %d, set default 48k ", rate); } - tr_dbg(&sgen_tr, "rate %d, sinegen_timing %d ", rate, sinegen_timing); + tr_dbg("rate %d, sinegen_timing %d ", rate, sinegen_timing); return sinegen_timing; } @@ -176,7 +176,7 @@ static void mt8186_afe_sinegen_enable(uint32_t sgen_id, uint32_t rate, int enabl { uint32_t loopback_mode, reg_1, reg_2, sinegen_timing; - tr_dbg(&sgen_tr, "sgen_id %d, enable %d", sgen_id, enable); + tr_dbg("sgen_id %d, enable %d", sgen_id, enable); sinegen_timing = mt8186_sinegen_timing(rate); @@ -196,7 +196,7 @@ static void mt8186_afe_sinegen_enable(uint32_t sgen_id, uint32_t rate, int enabl loopback_mode = MT8186_SGEN_DL2; break; default: - tr_err(&sgen_tr, "invalid sgen_id %d", sgen_id); + tr_err("invalid sgen_id %d", sgen_id); return; } /* enable sinegen clock*/ @@ -237,7 +237,7 @@ static void mt8186_afe_sinegen_enable(uint32_t sgen_id, uint32_t rate, int enabl reg_1 = mtk_afe_reg_read(AFE_SINEGEN_CON0); reg_2 = mtk_afe_reg_read(AFE_SINEGEN_CON2); - tr_dbg(&sgen_tr, "AFE_SINEGEN_CON0 0x%x, AFE_SINEGEN_CON2 0x%x", reg_1, reg_2); + tr_dbg("AFE_SINEGEN_CON0 0x%x, AFE_SINEGEN_CON2 0x%x", reg_1, reg_2); } void afe_sinegen_enable(void) diff --git a/src/drivers/mediatek/afe/mt8188/afe-sgen.c b/src/drivers/mediatek/afe/mt8188/afe-sgen.c index ea8812a20c2b..0898c3a09db3 100644 --- a/src/drivers/mediatek/afe/mt8188/afe-sgen.c +++ b/src/drivers/mediatek/afe/mt8188/afe-sgen.c @@ -163,9 +163,9 @@ static uint32_t mt8188_sinegen_timing(uint32_t rate) break; default: sinegen_timing = SGEN_CH_TIMING_48K; - tr_err(&sgen_tr, "invalid rate %d, set default 48k ", rate); + tr_err("invalid rate %d, set default 48k ", rate); } - tr_dbg(&sgen_tr, "rate %d, sinegen_timing %d ", rate, sinegen_timing); + tr_dbg("rate %d, sinegen_timing %d ", rate, sinegen_timing); return sinegen_timing; } @@ -183,7 +183,7 @@ static void mt8188_afe_sinegen_enable(uint32_t sgen_id, uint32_t rate, int enabl { uint32_t loopback_mode, reg_1, reg_2, sinegen_timing; - tr_dbg(&sgen_tr, "sgen_id %d, enable %d", sgen_id, enable); + tr_dbg("sgen_id %d, enable %d", sgen_id, enable); sinegen_timing = mt8188_sinegen_timing(rate); @@ -203,7 +203,7 @@ static void mt8188_afe_sinegen_enable(uint32_t sgen_id, uint32_t rate, int enabl loopback_mode = MT8188_SGEN_DL3; break; default: - tr_err(&sgen_tr, "invalid sgen_id %d", sgen_id); + tr_err("invalid sgen_id %d", sgen_id); return; } /* enable sinegen clock*/ @@ -244,7 +244,7 @@ static void mt8188_afe_sinegen_enable(uint32_t sgen_id, uint32_t rate, int enabl reg_1 = mtk_afe_reg_read(AFE_SINEGEN_CON0); reg_2 = mtk_afe_reg_read(AFE_SINEGEN_CON2); - tr_dbg(&sgen_tr, "AFE_SINEGEN_CON0 0x%x, AFE_SINEGEN_CON2 0x%x", reg_1, reg_2); + tr_dbg("AFE_SINEGEN_CON0 0x%x, AFE_SINEGEN_CON2 0x%x", reg_1, reg_2); } void afe_sinegen_enable(void) diff --git a/src/drivers/mediatek/afe/mt8195/afe-sgen.c b/src/drivers/mediatek/afe/mt8195/afe-sgen.c index 000c119b1369..6bf865ef242e 100644 --- a/src/drivers/mediatek/afe/mt8195/afe-sgen.c +++ b/src/drivers/mediatek/afe/mt8195/afe-sgen.c @@ -171,9 +171,9 @@ static uint32_t mt8195_sinegen_timing(uint32_t rate) break; default: sinegen_timing = SGEN_CH_TIMING_48K; - tr_err(&sgen_tr, "invalid rate %d, set default 48k ", rate); + tr_err("invalid rate %d, set default 48k ", rate); } - tr_dbg(&sgen_tr, "rate %d, sinegen_timing %d ", rate, sinegen_timing); + tr_dbg("rate %d, sinegen_timing %d ", rate, sinegen_timing); return sinegen_timing; } @@ -191,7 +191,7 @@ static void mt8195_afe_sinegen_enable(uint32_t sgen_id, uint32_t rate, int enabl { uint32_t loopback_mode, reg_1, reg_2, sinegen_timing; - tr_dbg(&sgen_tr, "sgen_id %d, enable %d", sgen_id, enable); + tr_dbg("sgen_id %d, enable %d", sgen_id, enable); sinegen_timing = mt8195_sinegen_timing(rate); @@ -211,7 +211,7 @@ static void mt8195_afe_sinegen_enable(uint32_t sgen_id, uint32_t rate, int enabl loopback_mode = MT8195_SGEN_DL3; break; default: - tr_err(&sgen_tr, "invalid sgen_id %d", sgen_id); + tr_err("invalid sgen_id %d", sgen_id); return; } /* enable sinegen clock*/ @@ -252,7 +252,7 @@ static void mt8195_afe_sinegen_enable(uint32_t sgen_id, uint32_t rate, int enabl reg_1 = mtk_afe_reg_read(AFE_SINEGEN_CON0); reg_2 = mtk_afe_reg_read(AFE_SINEGEN_CON2); - tr_dbg(&sgen_tr, "AFE_SINEGEN_CON0 0x%x, AFE_SINEGEN_CON2 0x%x", reg_1, reg_2); + tr_dbg("AFE_SINEGEN_CON0 0x%x, AFE_SINEGEN_CON2 0x%x", reg_1, reg_2); } void afe_sinegen_enable(void) diff --git a/src/drivers/mediatek/mt818x/interrupt.c b/src/drivers/mediatek/mt818x/interrupt.c index e96ebd909b9b..f36b5f594dc2 100644 --- a/src/drivers/mediatek/mt818x/interrupt.c +++ b/src/drivers/mediatek/mt818x/interrupt.c @@ -48,7 +48,7 @@ static void mtk_irq_mask(struct irq_desc *desc, uint32_t irq, unsigned int core) io_reg_update_bits(MTK_ADSP_IRQ_EN, BIT(desc->irq), 0); break; default: - tr_err(&int_tr, "Invalid interrupt %d", desc->irq); + tr_err("Invalid interrupt %d", desc->irq); return; } } @@ -64,7 +64,7 @@ static void mtk_irq_unmask(struct irq_desc *desc, uint32_t irq, unsigned int cor io_reg_update_bits(MTK_ADSP_IRQ_EN, BIT(desc->irq), BIT(desc->irq)); break; default: - tr_err(&int_tr, "Invalid interrupt %d", desc->irq); + tr_err("Invalid interrupt %d", desc->irq); return; } } @@ -125,7 +125,7 @@ static inline void mtk_handle_group_pending_irq(struct irq_cascade_desc *cascade k_spin_unlock(&cascade->lock, key); if (!handled) { - tr_err(&int_tr, "Not handle irq %u in group %u", + tr_err("Not handle irq %u in group %u", idx, line_index); } @@ -144,7 +144,7 @@ static inline void mtk_irq_group_handler(void *data, uint32_t line_index) if (status) mtk_handle_group_pending_irq(cascade, line_index, status); else - tr_err(&int_tr, "No pending irq in group %d", line_index); + tr_err("No pending irq in group %d", line_index); } #define DEFINE_IRQ_HANDLER(n) \ diff --git a/src/drivers/mediatek/mt818x/ipc.c b/src/drivers/mediatek/mt818x/ipc.c index b074c21f8413..9d9e11a0697a 100644 --- a/src/drivers/mediatek/mt818x/ipc.c +++ b/src/drivers/mediatek/mt818x/ipc.c @@ -147,7 +147,7 @@ int platform_ipc_init(struct ipc *ipc) iipc->dh_buffer.dmac = dma_get(DMA_DIR_HMEM_TO_LMEM, 0, DMA_DEV_HOST, DMA_ACCESS_SHARED); if (!iipc->dh_buffer.dmac) { - tr_err(&ipc_tr, "Unable to find DMA for host page table"); + tr_err("Unable to find DMA for host page table"); sof_panic(SOF_IPC_PANIC_IPC); } #endif @@ -160,25 +160,25 @@ int platform_ipc_init(struct ipc *ipc) */ mbox_irq0 = mtk_irq_group_id(MTK_DSP_IRQ_MBOX0); if (mbox_irq0 < 0) { - tr_err(&ipc_tr, "Invalid ipc mbox 0 IRQ:%d", mbox_irq0); + tr_err("Invalid ipc mbox 0 IRQ:%d", mbox_irq0); sof_panic(SOF_IPC_PANIC_IPC); } mbox_irq1 = mtk_irq_group_id(MTK_DSP_IRQ_MBOX1); if (mbox_irq1 < 0) { - tr_err(&ipc_tr, "Invalid ipc mbox 1 IRQ:%d", mbox_irq1); + tr_err("Invalid ipc mbox 1 IRQ:%d", mbox_irq1); sof_panic(SOF_IPC_PANIC_IPC); } ret = interrupt_register(mbox_irq0, mbox0_handler, ipc); if (ret < 0) { - tr_err(&ipc_tr, "Unable to register ipc mbox 0 IRQ"); + tr_err("Unable to register ipc mbox 0 IRQ"); sof_panic(SOF_IPC_PANIC_IPC); } ret = interrupt_register(mbox_irq1, mbox1_handler, ipc); if (ret < 0) { - tr_err(&ipc_tr, "Unable to register ipc mbox 1 IRQ"); + tr_err("Unable to register ipc mbox 1 IRQ"); sof_panic(SOF_IPC_PANIC_IPC); } diff --git a/src/drivers/mediatek/mt8195/interrupt.c b/src/drivers/mediatek/mt8195/interrupt.c index 79fafec94a2f..583a6e53f6fb 100644 --- a/src/drivers/mediatek/mt8195/interrupt.c +++ b/src/drivers/mediatek/mt8195/interrupt.c @@ -45,7 +45,7 @@ static void mtk_irq_mask(struct irq_desc *desc, uint32_t irq, unsigned int core) level = GET_INTLEVEL(irq); in_irq = GET_INTERRUPT_ID(irq); if (level > MAX_IRQ_NUM) { - tr_err(&int_tr, "Invalid interrupt %d", irq); + tr_err("Invalid interrupt %d", irq); return; } @@ -59,7 +59,7 @@ static void mtk_irq_mask(struct irq_desc *desc, uint32_t irq, unsigned int core) io_reg_update_bits(DSP_IRQ_EN, BIT(irq), 0x0); break; default: - tr_err(&int_tr, "Invalid interrupt %d", desc->irq); + tr_err("Invalid interrupt %d", desc->irq); return; } } @@ -73,7 +73,7 @@ static void mtk_irq_unmask(struct irq_desc *desc, uint32_t irq, unsigned int cor level = GET_INTLEVEL(irq); in_irq = GET_INTERRUPT_ID(irq); if (level > MAX_IRQ_NUM) { - tr_err(&int_tr, "Invalid interrupt %d", irq); + tr_err("Invalid interrupt %d", irq); return; } @@ -88,7 +88,7 @@ static void mtk_irq_unmask(struct irq_desc *desc, uint32_t irq, unsigned int cor io_reg_update_bits(DSP_IRQ_EN, BIT(irq), BIT(irq)); break; default: - tr_err(&int_tr, "Invalid interrupt %d", desc->irq); + tr_err("Invalid interrupt %d", desc->irq); return; } } @@ -145,7 +145,7 @@ static inline void mtk_handle_irq(struct irq_cascade_desc *cascade, k_spin_unlock(&cascade->lock, key); if (!handled) { - tr_err(&int_tr, "irq_handler(): not handled, bit %d", bit); + tr_err("irq_handler(): not handled, bit %d", bit); if (line_index == IRQ_NUM_EXT_LEVEL23) io_reg_update_bits(DSP_IRQ_EN, BIT(bit), 0x0); else @@ -167,7 +167,7 @@ static inline void irq_handler(void *data, uint32_t line_index) /* Handle current interrupts */ mtk_handle_irq(cascade, line_index, status); else - tr_err(&int_tr, "invalid interrupt status"); + tr_err("invalid interrupt status"); } #define DEFINE_IRQ_HANDLER(n) \ diff --git a/src/drivers/mediatek/mt8195/ipc.c b/src/drivers/mediatek/mt8195/ipc.c index 1fff0ff66566..4c9d1be3e1bd 100644 --- a/src/drivers/mediatek/mt8195/ipc.c +++ b/src/drivers/mediatek/mt8195/ipc.c @@ -146,7 +146,7 @@ int platform_ipc_init(struct ipc *ipc) iipc->dh_buffer.dmac = dma_get(DMA_DIR_HMEM_TO_LMEM, 0, DMA_DEV_HOST, DMA_ACCESS_SHARED); if (!iipc->dh_buffer.dmac) { - tr_err(&ipc_tr, "Unable to find DMA for host page table"); + tr_err("Unable to find DMA for host page table"); sof_panic(SOF_IPC_PANIC_IPC); } #endif diff --git a/src/idc/idc.c b/src/idc/idc.c index 8e26f7e54832..3696e9de8fa9 100644 --- a/src/idc/idc.c +++ b/src/idc/idc.c @@ -105,7 +105,7 @@ int idc_wait_in_blocking_mode(uint32_t target_core, bool (*cond)(int)) if (cond(target_core)) return 0; - tr_err(&idc_tr, "idc_wait_in_blocking_mode() error: timeout, target_core %u", + tr_err("idc_wait_in_blocking_mode() error: timeout, target_core %u", target_core); return -ETIME; } @@ -344,7 +344,7 @@ static int idc_ppl_state(uint32_t ppl_id, uint32_t phase) ppl_icd = ipc_get_comp_by_ppl_id(ipc, COMP_TYPE_PIPELINE, ppl_id, IPC_COMP_IGNORE_REMOTE); if (!ppl_icd) { - tr_err(&idc_tr, "idc: comp %d not found", ppl_id); + tr_err("idc: comp %d not found", ppl_id); return IPC4_INVALID_RESOURCE_ID; } @@ -385,7 +385,7 @@ static void idc_process_async_msg(uint32_t slot) #if CONFIG_AMS process_incoming_message(slot); #else - tr_err(&idc_tr, "idc_cmd(): AMS not enabled"); + tr_err("idc_cmd(): AMS not enabled"); #endif } @@ -472,7 +472,7 @@ void idc_cmd(struct idc_msg *msg) idc_process_async_msg(IDC_HEADER_TO_AMS_SLOT_MASK(msg->header)); break; default: - tr_err(&idc_tr, "idc_cmd(): invalid msg->header = %u", + tr_err("idc_cmd(): invalid msg->header = %u", msg->header); } @@ -512,7 +512,7 @@ int idc_init(void) *idc = rzalloc(SOF_MEM_ZONE_SYS, 0, SOF_MEM_CAPS_RAM, sizeof(**idc)); #endif - tr_dbg(&idc_tr, "idc_init()"); + tr_dbg("idc_init()"); /* initialize idc data */ (*idc)->payload = platform_shared_get(static_payload, sizeof(static_payload)); @@ -534,7 +534,7 @@ int idc_restore(void) { struct idc **idc __unused = idc_get(); - tr_info(&idc_tr, "idc_restore()"); + tr_info("idc_restore()"); /* idc_restore() is invoked during D0->D0ix/D0ix->D0 flow. In that * case basic core structures e.g. idc struct should be already diff --git a/src/idc/zephyr_idc.c b/src/idc/zephyr_idc.c index 887cff0eb375..658af95acc52 100644 --- a/src/idc/zephyr_idc.c +++ b/src/idc/zephyr_idc.c @@ -130,7 +130,7 @@ int idc_send_msg(struct idc_msg *msg, uint32_t mode) work->sync = mode == IDC_BLOCKING; if (!cpu_is_core_enabled(target_cpu)) { - tr_err(&zephyr_idc_tr, "Core %u is down, cannot sent IDC message", target_cpu); + tr_err("Core %u is down, cannot sent IDC message", target_cpu); return -EACCES; } if (msg->payload) { diff --git a/src/include/sof/audio/buffer.h b/src/include/sof/audio/buffer.h index 8140da7ad0f7..688619e743f4 100644 --- a/src/include/sof/audio/buffer.h +++ b/src/include/sof/audio/buffer.h @@ -40,9 +40,6 @@ struct comp_dev; /* buffer tracing */ extern struct tr_ctx buffer_tr; -/** \brief Retrieves trace context from the buffer */ -#define trace_buf_get_tr_ctx(buf_ptr) (&(buf_ptr)->tctx) - /** \brief Retrieves id (pipe id) from the buffer */ #define trace_buf_get_id(buf_ptr) ((buf_ptr)->pipeline_id) diff --git a/src/include/sof/audio/component.h b/src/include/sof/audio/component.h index 92be3b9042f4..a5b71c7571d7 100644 --- a/src/include/sof/audio/component.h +++ b/src/include/sof/audio/component.h @@ -132,9 +132,6 @@ enum { /** \brief Retrieves subid (-1 = undefined) from the component driver */ #define trace_comp_drv_get_subid(drv_p) (-1) -/** \brief Retrieves trace context from the component device */ -#define trace_comp_get_tr_ctx(comp_p) (&(comp_p)->tctx) - /** \brief Retrieves id (pipe id) from the component device */ #define trace_comp_get_id(comp_p) ((comp_p)->ipc_config.pipeline_id) diff --git a/src/include/sof/audio/pipeline-trace.h b/src/include/sof/audio/pipeline-trace.h index 0a5af14450e7..c5666bf8c700 100644 --- a/src/include/sof/audio/pipeline-trace.h +++ b/src/include/sof/audio/pipeline-trace.h @@ -18,23 +18,22 @@ /* pipeline tracing */ extern struct tr_ctx pipe_tr; -#define trace_pipe_get_tr_ctx(pipe_p) (&(pipe_p)->tctx) #define trace_pipe_get_id(pipe_p) ((pipe_p)->pipeline_id) #define trace_pipe_get_subid(pipe_p) ((pipe_p)->comp_id) /* class (driver) level (no device object) tracing */ #define pipe_cl_err(__e, ...) \ - tr_err(&pipe_tr, __e, ##__VA_ARGS__) + tr_err(__e, ##__VA_ARGS__) #define pipe_cl_warn(__e, ...) \ - tr_warn(&pipe_tr, __e, ##__VA_ARGS__) + tr_warn(__e, ##__VA_ARGS__) #define pipe_cl_info(__e, ...) \ - tr_info(&pipe_tr, __e, ##__VA_ARGS__) + tr_info(__e, ##__VA_ARGS__) #define pipe_cl_dbg(__e, ...) \ - tr_dbg(&pipe_tr, __e, ##__VA_ARGS__) + tr_dbg(__e, ##__VA_ARGS__) /* device tracing */ #if defined(__ZEPHYR__) && defined(CONFIG_ZEPHYR_LOG) diff --git a/src/include/sof/ipc/common.h b/src/include/sof/ipc/common.h index ef2e8c8d7f3c..075c83b74a0c 100644 --- a/src/include/sof/ipc/common.h +++ b/src/include/sof/ipc/common.h @@ -34,7 +34,7 @@ extern struct tr_ctx ipc_tr; /* convenience error trace for mismatched internal structures */ #define IPC_SIZE_ERROR_TRACE(ctx, object) \ - tr_err(ctx, "ipc: size %d expected %zu", \ + tr_err("ipc: size %d expected %zu", \ (object).hdr.size, sizeof(object)) /* Returns pipeline source component */ diff --git a/src/include/sof/lib/dai-legacy.h b/src/include/sof/lib/dai-legacy.h index a47f2f5bb704..76bf44c22a44 100644 --- a/src/include/sof/lib/dai-legacy.h +++ b/src/include/sof/lib/dai-legacy.h @@ -223,7 +223,6 @@ struct dai_type_info { }; /* dai tracing */ -#define trace_dai_drv_get_tr_ctx(drv_p) ((drv_p)->tctx) #define trace_dai_drv_get_id(drv_p) (-1) #define trace_dai_drv_get_subid(drv_p) (-1) diff --git a/src/include/sof/trace/trace.h b/src/include/sof/trace/trace.h index 0cc9bb340651..f833c9775702 100644 --- a/src/include/sof/trace/trace.h +++ b/src/include/sof/trace/trace.h @@ -242,13 +242,13 @@ struct tr_ctx { #if defined(__ZEPHYR__) && defined(CONFIG_ZEPHYR_LOG) -#define tr_err(ctx, fmt, ...) LOG_ERR(fmt, ##__VA_ARGS__) +#define tr_err(fmt, ...) LOG_ERR(fmt, ##__VA_ARGS__) -#define tr_warn(ctx, fmt, ...) LOG_WRN(fmt, ##__VA_ARGS__) +#define tr_warn(fmt, ...) LOG_WRN(fmt, ##__VA_ARGS__) -#define tr_info(ctx, fmt, ...) LOG_INF(fmt, ##__VA_ARGS__) +#define tr_info(fmt, ...) LOG_INF(fmt, ##__VA_ARGS__) -#define tr_dbg(ctx, fmt, ...) LOG_DBG(fmt, ##__VA_ARGS__) +#define tr_dbg(fmt, ...) LOG_DBG(fmt, ##__VA_ARGS__) #else @@ -260,20 +260,20 @@ struct tr_ctx { #define LOG_MODULE_DECLARE(ctx, level) #endif -#define tr_err(ctx, fmt, ...) \ +#define tr_err(fmt, ...) \ trace_error_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, fmt, ##__VA_ARGS__) -#define tr_warn(ctx, fmt, ...) \ +#define tr_warn(fmt, ...) \ trace_warn_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, fmt, ##__VA_ARGS__) -#define tr_info(ctx, fmt, ...) \ +#define tr_info(fmt, ...) \ trace_event_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, fmt, ##__VA_ARGS__) /* tracev_ output depends on CONFIG_TRACEV=y */ -#define tr_dbg(ctx, fmt, ...) \ +#define tr_dbg(fmt, ...) \ tracev_event_with_ids_nonzephyr(_TRACE_INV_CLASS, \ _TRACE_INV_ID, _TRACE_INV_ID, fmt, ##__VA_ARGS__) diff --git a/src/ipc/dma-copy.c b/src/ipc/dma-copy.c index ee58ceb98c8c..019eb30f51a4 100644 --- a/src/ipc/dma-copy.c +++ b/src/ipc/dma-copy.c @@ -49,7 +49,7 @@ static struct dma_sg_elem *sg_get_elem_at(struct dma_sg_config *host_sg, } /* host offset in beyond end of SG buffer */ - tr_err(&dmacpy_tr, "sg_get_elem_at(): host offset in beyond end of SG buffer"); + tr_err("sg_get_elem_at(): host offset in beyond end of SG buffer"); return NULL; } #endif @@ -168,7 +168,7 @@ int dma_copy_new(struct dma_copy *dc) cap = 0; dc->dmac = dma_get(dir, cap, dev, DMA_ACCESS_SHARED); if (!dc->dmac) { - tr_err(&dmacpy_tr, "dma_copy_new(): dc->dmac = NULL"); + tr_err("dma_copy_new(): dc->dmac = NULL"); return -ENODEV; } @@ -176,7 +176,7 @@ int dma_copy_new(struct dma_copy *dc) /* get DMA channel from DMAC0 */ dc->chan = dma_channel_get_legacy(dc->dmac, CONFIG_TRACE_CHANNEL); if (!dc->chan) { - tr_err(&dmacpy_tr, "dma_copy_new(): dc->chan is NULL"); + tr_err("dma_copy_new(): dc->chan is NULL"); return -ENODEV; } #endif @@ -189,7 +189,7 @@ int dma_copy_set_stream_tag(struct dma_copy *dc, uint32_t stream_tag) /* get DMA channel from DMAC */ dc->chan = dma_channel_get_legacy(dc->dmac, stream_tag - 1); if (!dc->chan) { - tr_err(&dmacpy_tr, "dma_copy_set_stream_tag(): dc->chan is NULL"); + tr_err("dma_copy_set_stream_tag(): dc->chan is NULL"); return -EINVAL; } diff --git a/src/ipc/ipc-common.c b/src/ipc/ipc-common.c index e5bc82e348bb..6d5c6fbb09fb 100644 --- a/src/ipc/ipc-common.c +++ b/src/ipc/ipc-common.c @@ -48,7 +48,7 @@ int ipc_process_on_core(uint32_t core, bool blocking) /* check if requested core is enabled */ if (!cpu_is_core_enabled(core)) { - tr_err(&ipc_tr, "ipc_process_on_core(): core #%d is disabled", core); + tr_err("ipc_process_on_core(): core #%d is disabled", core); return -EACCES; } @@ -265,7 +265,7 @@ void ipc_schedule_process(struct ipc *ipc) int ipc_init(struct sof *sof) { - tr_dbg(&ipc_tr, "ipc_init()"); + tr_dbg("ipc_init()"); /* init ipc data */ sof->ipc = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*sof->ipc)); diff --git a/src/ipc/ipc-helper.c b/src/ipc/ipc-helper.c index 9d09707a522f..bfe084d197c1 100644 --- a/src/ipc/ipc-helper.c +++ b/src/ipc/ipc-helper.c @@ -41,7 +41,7 @@ struct comp_buffer *buffer_new(const struct sof_ipc_buffer *desc, bool is_shared { struct comp_buffer *buffer; - tr_info(&buffer_tr, "buffer new size 0x%x id %d.%d flags 0x%x", + tr_info("buffer new size 0x%x id %d.%d flags 0x%x", desc->size, desc->comp.pipeline_id, desc->comp.id, desc->flags); /* allocate buffer */ @@ -69,7 +69,7 @@ int32_t ipc_comp_pipe_id(const struct ipc_comp_dev *icd) case COMP_TYPE_PIPELINE: return icd->pipeline->pipeline_id; default: - tr_err(&ipc_tr, "Unknown ipc component type %u", icd->type); + tr_err("Unknown ipc component type %u", icd->type); return -EINVAL; }; @@ -190,7 +190,7 @@ int ipc_pipeline_complete(struct ipc *ipc, uint32_t comp_id) /* check whether pipeline exists */ ipc_pipe = ipc_get_pipeline_by_id(ipc, comp_id); if (!ipc_pipe) { - tr_err(&ipc_tr, "ipc: ipc_pipeline_complete looking for pipe component id %d failed", + tr_err("ipc: ipc_pipeline_complete looking for pipe component id %d failed", comp_id); return -EINVAL; } @@ -204,28 +204,28 @@ int ipc_pipeline_complete(struct ipc *ipc, uint32_t comp_id) /* get pipeline source component */ ipc_ppl_source = ipc_get_ppl_src_comp(ipc, p->pipeline_id); if (!ipc_ppl_source) { - tr_err(&ipc_tr, "ipc: ipc_pipeline_complete looking for pipeline source failed"); + tr_err("ipc: ipc_pipeline_complete looking for pipeline source failed"); return -EINVAL; } /* get pipeline sink component */ ipc_ppl_sink = ipc_get_ppl_sink_comp(ipc, p->pipeline_id); if (!ipc_ppl_sink) { - tr_err(&ipc_tr, "ipc: ipc_pipeline_complete looking for pipeline sink failed"); + tr_err("ipc: ipc_pipeline_complete looking for pipeline sink failed"); return -EINVAL; } /* find the scheduling component */ icd = ipc_get_comp_by_id(ipc, p->sched_id); if (!icd) { - tr_warn(&ipc_tr, "ipc_pipeline_complete(): no scheduling component specified, use comp %d", + tr_warn("ipc_pipeline_complete(): no scheduling component specified, use comp %d", ipc_ppl_sink->id); icd = ipc_ppl_sink; } if (icd->core != ipc_pipe->core) { - tr_err(&ipc_tr, "ipc_pipeline_complete(): icd->core (%d) != ipc_pipe->core (%d) for pipeline scheduling component icd->id %d", + tr_err("ipc_pipeline_complete(): icd->core (%d) != ipc_pipe->core (%d) for pipeline scheduling component icd->id %d", icd->core, ipc_pipe->core, icd->id); return -EINVAL; } @@ -234,7 +234,7 @@ int ipc_pipeline_complete(struct ipc *ipc, uint32_t comp_id) pipeline_id = ipc_pipe->pipeline->pipeline_id; - tr_dbg(&ipc_tr, "ipc: pipe %d -> complete on comp %d", pipeline_id, + tr_dbg("ipc: pipe %d -> complete on comp %d", pipeline_id, comp_id); return pipeline_complete(ipc_pipe->pipeline, ipc_ppl_source->cd, @@ -250,7 +250,7 @@ int ipc_comp_free(struct ipc *ipc, uint32_t comp_id) /* check whether component exists */ icd = ipc_get_comp_by_id(ipc, comp_id); if (!icd) { - tr_err(&ipc_tr, "ipc_comp_free(): comp id: %d is not found", + tr_err("ipc_comp_free(): comp id: %d is not found", comp_id); return -ENODEV; } @@ -261,7 +261,7 @@ int ipc_comp_free(struct ipc *ipc, uint32_t comp_id) /* check state */ if (icd->cd->state != COMP_STATE_READY) { - tr_err(&ipc_tr, "ipc_comp_free(): comp id: %d state is %d cannot be freed", + tr_err("ipc_comp_free(): comp id: %d state is %d cannot be freed", comp_id, icd->cd->state); return -EINVAL; } @@ -276,7 +276,7 @@ int ipc_comp_free(struct ipc *ipc, uint32_t comp_id) * leak on error. Bug-free host drivers won't do * this, this was found via fuzzing. */ - tr_err(&ipc_tr, "ipc_comp_free(): uninitialized buffer lists on comp %d\n", + tr_err("ipc_comp_free(): uninitialized buffer lists on comp %d\n", icd->id); return -EINVAL; } diff --git a/src/ipc/ipc-zephyr.c b/src/ipc/ipc-zephyr.c index 49a448850a99..acfd1eef9aa5 100644 --- a/src/ipc/ipc-zephyr.c +++ b/src/ipc/ipc-zephyr.c @@ -102,14 +102,13 @@ static int ipc_device_suspend_handler(const struct device *dev, void *arg) int ret = 0; if (!(ipc->task_mask & IPC_TASK_POWERDOWN)) { - tr_err(&ipc_tr, - "ipc task mask not set to IPC_TASK_POWERDOWN. Current value: %u", + tr_err("ipc task mask not set to IPC_TASK_POWERDOWN. Current value: %u", ipc->task_mask); ret = -ENOMSG; } if (!ipc->pm_prepare_D3) { - tr_err(&ipc_tr, "power state D3 not requested"); + tr_err("power state D3 not requested"); ret = -EBADMSG; } @@ -130,9 +129,9 @@ static int ipc_device_suspend_handler(const struct device *dev, void *arg) } if (only_buff_status) { - tr_warn(&ipc_tr, "continuing D3 procedure with the msg in the queue"); + tr_warn("continuing D3 procedure with the msg in the queue"); } else { - tr_err(&ipc_tr, "there are queued IPC messages to be sent"); + tr_err("there are queued IPC messages to be sent"); ret = -EINPROGRESS; } } diff --git a/src/ipc/ipc3/dai.c b/src/ipc/ipc3/dai.c index 697b00c960ca..bb541528d23a 100644 --- a/src/ipc/ipc3/dai.c +++ b/src/ipc/ipc3/dai.c @@ -206,7 +206,7 @@ int ipc_comp_dai_config(struct ipc *ipc, struct ipc_config_dai *common_config, int ret = -ENODEV; int i; - tr_info(&ipc_tr, "ipc_comp_dai_config() dai type = %d index = %d", + tr_info("ipc_comp_dai_config() dai type = %d index = %d", config->type, config->dai_index); /* for each component */ @@ -232,7 +232,7 @@ int ipc_comp_dai_config(struct ipc *ipc, struct ipc_config_dai *common_config, } if (ret < 0) { - tr_err(&ipc_tr, "ipc_comp_dai_config(): comp_dai_config() failed"); + tr_err("ipc_comp_dai_config(): comp_dai_config() failed"); return ret; } diff --git a/src/ipc/ipc3/handler.c b/src/ipc/ipc3/handler.c index 87fe5b93265e..0f54af7b23da 100644 --- a/src/ipc/ipc3/handler.c +++ b/src/ipc/ipc3/handler.c @@ -83,12 +83,12 @@ LOG_MODULE_DECLARE(ipc, CONFIG_SOF_LOG_LEVEL); ___ret = memcpy_s(rx, rx_size, tx, tx->size); \ assert(!___ret); \ bzero((char *)rx + tx->size, rx_size - tx->size);\ - tr_dbg(&ipc_tr, "ipc: hdr 0x%x rx (%zu) > tx (%d)",\ + tr_dbg("ipc: hdr 0x%x rx (%zu) > tx (%d)",\ rx->cmd, rx_size, tx->size); \ } else if (tx->size > rx_size) { \ ___ret = memcpy_s(rx, rx_size, tx, rx_size); \ assert(!___ret); \ - tr_warn(&ipc_tr, "ipc: hdr 0x%x tx (%d) > rx (%zu)",\ + tr_warn("ipc: hdr 0x%x tx (%d) > rx (%zu)",\ rx->cmd, tx->size, rx_size); \ } else { \ ___ret = memcpy_s(rx, rx_size, tx, rx_size); \ @@ -112,7 +112,7 @@ struct ipc_cmd_hdr *mailbox_validate(void) /* validate component header */ if (hdr->size < sizeof(*hdr) || hdr->size > SOF_IPC_MSG_MAX_SIZE) { #ifndef CONFIG_ARCH_POSIX_LIBFUZZER - ipc_cmd_err(&ipc_tr, "ipc: invalid size 0x%x", hdr->size); + ipc_cmd_err("ipc: invalid size 0x%x", hdr->size); #endif return NULL; } @@ -219,7 +219,7 @@ static int ipc_stream_pcm_params(uint32_t stream) /* get the pcm_dev */ pcm_dev = ipc_get_comp_by_id(ipc, pcm_params.comp_id); if (!pcm_dev) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d not found", pcm_params.comp_id); + ipc_cmd_err("ipc: comp %d not found", pcm_params.comp_id); return -ENODEV; } @@ -227,11 +227,11 @@ static int ipc_stream_pcm_params(uint32_t stream) if (!cpu_is_me(pcm_dev->core)) return ipc_process_on_core(pcm_dev->core, false); - tr_dbg(&ipc_tr, "ipc: comp %d -> params", pcm_params.comp_id); + tr_dbg("ipc: comp %d -> params", pcm_params.comp_id); /* sanity check comp */ if (!pcm_dev->cd->pipeline) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d pipeline not found", + ipc_cmd_err("ipc: comp %d pipeline not found", pcm_params.comp_id); return -EINVAL; } @@ -239,7 +239,7 @@ static int ipc_stream_pcm_params(uint32_t stream) /* sanity check for pcm_params size */ if (pcm_params.hdr.size != sizeof(pcm_params) + pcm_params.params.ext_data_length) { - ipc_cmd_err(&ipc_tr, "pcm_params invalid size, hdr.size=%d, ext_data_len=%d", + ipc_cmd_err("pcm_params invalid size, hdr.size=%d, ext_data_len=%d", pcm_params.hdr.size, pcm_params.params.ext_data_length); return -EINVAL; } @@ -247,13 +247,13 @@ static int ipc_stream_pcm_params(uint32_t stream) /* sanity check for pcm_params.params size */ if (pcm_params.params.hdr.size != sizeof(pcm_params.params) + pcm_params.params.ext_data_length) { - ipc_cmd_err(&ipc_tr, "pcm_params.params invalid size, hdr.size=%d, ext_data_len=%d", + ipc_cmd_err("pcm_params.params invalid size, hdr.size=%d, ext_data_len=%d", pcm_params.params.hdr.size, pcm_params.params.ext_data_length); return -EINVAL; } if (sizeof(pcm_params) + pcm_params.params.ext_data_length > SOF_IPC_MSG_MAX_SIZE) { - ipc_cmd_err(&ipc_tr, "pcm_params ext_data_length invalid size %d max allowed %zu", + ipc_cmd_err("pcm_params ext_data_length invalid size %d max allowed %zu", pcm_params.params.ext_data_length, SOF_IPC_MSG_MAX_SIZE - sizeof(pcm_params)); return -EINVAL; @@ -294,7 +294,7 @@ static int ipc_stream_pcm_params(uint32_t stream) err = comp_set_attribute(cd, COMP_ATTR_HOST_BUFFER, &elem_array); if (err < 0) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d host buffer failed %d", + ipc_cmd_err("ipc: comp %d host buffer failed %d", pcm_params.comp_id, err); goto error; } @@ -302,7 +302,7 @@ static int ipc_stream_pcm_params(uint32_t stream) /* TODO: should be extracted to platform specific code */ err = comp_set_attribute(cd, COMP_ATTR_COPY_TYPE, ©_type); if (err < 0) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d setting copy type failed %d", + ipc_cmd_err("ipc: comp %d setting copy type failed %d", pcm_params.comp_id, err); goto error; } @@ -314,7 +314,7 @@ static int ipc_stream_pcm_params(uint32_t stream) err = pipeline_params(pcm_dev->cd->pipeline, pcm_dev->cd, (struct sof_ipc_pcm_params *)ipc_get()->comp_data); if (err < 0) { - ipc_cmd_err(&ipc_tr, "ipc: pipe %d comp %d params failed %d", + ipc_cmd_err("ipc: pipe %d comp %d params failed %d", pcm_dev->cd->pipeline->pipeline_id, pcm_params.comp_id, err); goto error; @@ -323,7 +323,7 @@ static int ipc_stream_pcm_params(uint32_t stream) /* prepare pipeline audio params */ err = pipeline_prepare(pcm_dev->cd->pipeline, pcm_dev->cd); if (err < 0) { - ipc_cmd_err(&ipc_tr, "ipc: pipe %d comp %d prepare failed %d", + ipc_cmd_err("ipc: pipe %d comp %d prepare failed %d", pcm_dev->cd->pipeline->pipeline_id, pcm_params.comp_id, err); goto error; @@ -346,7 +346,7 @@ static int ipc_stream_pcm_params(uint32_t stream) error: reset_err = pipeline_reset(pcm_dev->cd->pipeline, pcm_dev->cd); if (reset_err < 0) - ipc_cmd_err(&ipc_tr, "ipc: pipe %d comp %d reset failed %d", + ipc_cmd_err("ipc: pipe %d comp %d reset failed %d", pcm_dev->cd->pipeline->pipeline_id, pcm_params.comp_id, reset_err); return err; @@ -366,7 +366,7 @@ static int ipc_stream_pcm_free(uint32_t header) /* get the pcm_dev */ pcm_dev = ipc_get_comp_by_id(ipc, free_req.comp_id); if (!pcm_dev) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d not found", free_req.comp_id); + ipc_cmd_err("ipc: comp %d not found", free_req.comp_id); return -ENODEV; } @@ -374,11 +374,11 @@ static int ipc_stream_pcm_free(uint32_t header) if (!cpu_is_me(pcm_dev->core)) return ipc_process_on_core(pcm_dev->core, false); - tr_dbg(&ipc_tr, "ipc: comp %d -> free", free_req.comp_id); + tr_dbg("ipc: comp %d -> free", free_req.comp_id); /* sanity check comp */ if (!pcm_dev->cd->pipeline) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d pipeline not found", + ipc_cmd_err("ipc: comp %d pipeline not found", free_req.comp_id); return -EINVAL; } @@ -403,7 +403,7 @@ static int ipc_stream_position(uint32_t header) /* get the pcm_dev */ pcm_dev = ipc_get_comp_by_id(ipc, stream.comp_id); if (!pcm_dev) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d not found", stream.comp_id); + ipc_cmd_err("ipc: comp %d not found", stream.comp_id); return -ENODEV; } @@ -411,7 +411,7 @@ static int ipc_stream_position(uint32_t header) if (!cpu_is_me(pcm_dev->core)) return ipc_process_on_core(pcm_dev->core, false); - tr_info(&ipc_tr, "ipc: comp %d -> position", stream.comp_id); + tr_info("ipc: comp %d -> position", stream.comp_id); memset(&posn, 0, sizeof(posn)); @@ -422,7 +422,7 @@ static int ipc_stream_position(uint32_t header) posn.comp_id = stream.comp_id; if (!pcm_dev->cd->pipeline) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d pipeline not found", + ipc_cmd_err("ipc: comp %d pipeline not found", stream.comp_id); return -EINVAL; } @@ -452,7 +452,7 @@ static int ipc_stream_trigger(uint32_t header) /* get the pcm_dev */ pcm_dev = ipc_get_comp_by_id(ipc, stream.comp_id); if (!pcm_dev) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d not found", stream.comp_id); + ipc_cmd_err("ipc: comp %d not found", stream.comp_id); return -ENODEV; } @@ -460,7 +460,7 @@ static int ipc_stream_trigger(uint32_t header) if (!cpu_is_me(pcm_dev->core)) return ipc_process_on_core(pcm_dev->core, false); - tr_dbg(&ipc_tr, "ipc: comp %d -> trigger cmd 0x%x", + tr_dbg("ipc: comp %d -> trigger cmd 0x%x", stream.comp_id, ipc_command); switch (ipc_command) { @@ -480,18 +480,18 @@ static int ipc_stream_trigger(uint32_t header) case SOF_IPC_STREAM_TRIG_XRUN: return 0; default: - ipc_cmd_err(&ipc_tr, "ipc: invalid trigger cmd 0x%x", ipc_command); + ipc_cmd_err("ipc: invalid trigger cmd 0x%x", ipc_command); return -ENODEV; } if (!pcm_dev->cd->pipeline) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d pipeline not found", + ipc_cmd_err("ipc: comp %d pipeline not found", stream.comp_id); return -EINVAL; } if (pcm_dev->type != COMP_TYPE_COMPONENT) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d not stream (type %d)", + ipc_cmd_err("ipc: comp %d not stream (type %d)", stream.comp_id, pcm_dev->type); return -EINVAL; } @@ -520,7 +520,7 @@ static int ipc_stream_trigger(uint32_t header) } if (ret < 0) - ipc_cmd_err(&ipc_tr, "ipc: comp %d trigger 0x%x failed %d", + ipc_cmd_err("ipc: comp %d trigger 0x%x failed %d", stream.comp_id, ipc_command, ret); @@ -546,7 +546,7 @@ static int ipc_glb_stream_message(uint32_t header) case SOF_IPC_STREAM_POSITION: return ipc_stream_position(header); default: - ipc_cmd_err(&ipc_tr, "ipc: unknown stream cmd 0x%x", cmd); + ipc_cmd_err("ipc: unknown stream cmd 0x%x", cmd); return -EINVAL; } } @@ -576,7 +576,7 @@ static int ipc_dai_config_set(struct sof_ipc_dai_config *config, /* get DAI */ dai = dai_get(config->type, config->dai_index, 0 /* existing only */); if (!dai) { - ipc_cmd_err(&ipc_tr, "ipc: dai %d,%d not found", config->type, + ipc_cmd_err("ipc: dai %d,%d not found", config->type, config->dai_index); return -ENODEV; } @@ -585,7 +585,7 @@ static int ipc_dai_config_set(struct sof_ipc_dai_config *config, ret = dai_set_config(dai, config_dai, config); dai_put(dai); /* free ref immediately */ if (ret < 0) { - ipc_cmd_err(&ipc_tr, "ipc: dai %d,%d config failed %d", config->type, + ipc_cmd_err("ipc: dai %d,%d config failed %d", config->type, config->dai_index, ret); return ret; } @@ -612,7 +612,7 @@ static int ipc_msg_dai_config(uint32_t header) /* copy message with ABI safe method */ IPC_COPY_CMD(config, ipc->comp_data); - tr_info(&ipc_tr, "ipc: dai %d.%d -> config ", config.type, + tr_info("ipc: dai %d.%d -> config ", config.type, config.dai_index); /* set common configuration */ @@ -639,7 +639,7 @@ static int ipc_glb_dai_message(uint32_t header) case SOF_IPC_DAI_LOOPBACK: //return ipc_comp_set_value(header, COMP_CMD_LOOPBACK); default: - ipc_cmd_err(&ipc_tr, "ipc: unknown DAI cmd 0x%x", cmd); + ipc_cmd_err("ipc: unknown DAI cmd 0x%x", cmd); return -EINVAL; } } @@ -652,7 +652,7 @@ static int ipc_pm_context_size(uint32_t header) { struct sof_ipc_pm_ctx pm_ctx; - tr_info(&ipc_tr, "ipc: pm -> size"); + tr_info("ipc: pm -> size"); bzero(&pm_ctx, sizeof(pm_ctx)); @@ -668,7 +668,7 @@ static int ipc_pm_context_save(uint32_t header) { //struct sof_ipc_pm_ctx *pm_ctx = _ipc->comp_data; - tr_info(&ipc_tr, "ipc: pm -> save"); + tr_info("ipc: pm -> save"); sa_exit(sof_get()); @@ -707,7 +707,7 @@ static int ipc_pm_context_restore(uint32_t header) { //struct sof_ipc_pm_ctx *pm_ctx = _ipc->comp_data; - tr_info(&ipc_tr, "ipc: pm -> restore"); + tr_info("ipc: pm -> restore"); ipc_get()->pm_prepare_D3 = 0; @@ -728,12 +728,12 @@ static int ipc_pm_core_enable(uint32_t header) /* check if core enable mask is valid */ if (pm_core_config.enable_mask > MASK(CONFIG_CORE_COUNT - 1, 0)) { - ipc_cmd_err(&ipc_tr, "ipc: CONFIG_CORE_COUNT: %d < core enable mask: %d", + ipc_cmd_err("ipc: CONFIG_CORE_COUNT: %d < core enable mask: %d", CONFIG_CORE_COUNT, pm_core_config.enable_mask); return -EINVAL; } - tr_info(&ipc_tr, "ipc: pm core mask 0x%x -> enable", + tr_info("ipc: pm core mask 0x%x -> enable", pm_core_config.enable_mask); for (i = 0; i < CONFIG_CORE_COUNT; i++) { @@ -741,7 +741,7 @@ static int ipc_pm_core_enable(uint32_t header) if (pm_core_config.enable_mask & (1 << i)) { ret = cpu_enable_core(i); if (ret < 0) { - ipc_cmd_err(&ipc_tr, "Failed to enable core %d", i); + ipc_cmd_err("Failed to enable core %d", i); return ret; } } else { @@ -759,7 +759,7 @@ static int ipc_pm_gate(uint32_t header) IPC_COPY_CMD(pm_gate, ipc_get()->comp_data); - tr_info(&ipc_tr, "ipc: pm gate flags 0x%x", pm_gate.flags); + tr_info("ipc: pm gate flags 0x%x", pm_gate.flags); /* pause dma trace firstly if needed */ if (pm_gate.flags & SOF_PM_NO_TRACE) @@ -803,7 +803,7 @@ static int ipc_glb_pm_message(uint32_t header) case SOF_IPC_PM_CLK_GET: case SOF_IPC_PM_CLK_REQ: default: - ipc_cmd_err(&ipc_tr, "ipc: unknown pm cmd 0x%x", cmd); + ipc_cmd_err("ipc: unknown pm cmd 0x%x", cmd); return -EINVAL; } } @@ -859,7 +859,7 @@ static int ipc_dma_trace_config(uint32_t header) err = dma_trace_host_buffer(dmat, &elem_array, ring_size); if (err < 0) { - ipc_cmd_err(&ipc_tr, "ipc: trace failed to set host buffers %d", + ipc_cmd_err("ipc: trace failed to set host buffers %d", err); goto error; } @@ -873,7 +873,7 @@ static int ipc_dma_trace_config(uint32_t header) err = dma_trace_enable(dmat); if (err < 0) { - ipc_cmd_err(&ipc_tr, "ipc: failed to enable trace %d", err); + ipc_cmd_err("ipc: failed to enable trace %d", err); goto error; } @@ -906,12 +906,12 @@ static int ipc_trace_filter_update(uint32_t header) /* validation, packet->hdr.size has already been compared with SOF_IPC_MSG_MAX_SIZE */ if (sizeof(*packet) + sizeof(*elem) * packet->elem_cnt != packet->hdr.size) { - ipc_cmd_err(&ipc_tr, "trace_filter_update failed, elem_cnt %d is inconsistent with hdr.size %d", + ipc_cmd_err("trace_filter_update failed, elem_cnt %d is inconsistent with hdr.size %d", packet->elem_cnt, packet->hdr.size); return -EINVAL; } - tr_info(&ipc_tr, "ipc: trace_filter_update received, size %d elems", + tr_info("ipc: trace_filter_update received, size %d elems", packet->elem_cnt); /* read each filter set and update selected components trace settings */ @@ -922,12 +922,12 @@ static int ipc_trace_filter_update(uint32_t header) cnt = trace_filter_update(&filter); if (cnt < 0) { - ipc_cmd_err(&ipc_tr, "trace_filter_update failed for UUID key 0x%X, comp %d.%d and log level %d", + ipc_cmd_err("trace_filter_update failed for UUID key 0x%X, comp %d.%d and log level %d", filter.uuid_id, filter.pipe_id, filter.comp_id, filter.log_level); ret = cnt; } else { - tr_info(&ipc_tr, "trace_filter_update for UUID key 0x%X, comp %d.%d affected %d components", + tr_info("trace_filter_update for UUID key 0x%X, comp %d.%d affected %d components", filter.uuid_id, filter.pipe_id, filter.comp_id, cnt); } @@ -942,7 +942,7 @@ static int ipc_glb_trace_message(uint32_t header) { uint32_t cmd = iCS(header); - tr_info(&ipc_tr, "ipc: debug cmd 0x%x", cmd); + tr_info("ipc: debug cmd 0x%x", cmd); switch (cmd) { case SOF_IPC_TRACE_DMA_PARAMS: @@ -954,7 +954,7 @@ static int ipc_glb_trace_message(uint32_t header) case SOF_IPC_TRACE_FILTER_UPDATE: return ipc_trace_filter_update(header); default: - ipc_cmd_err(&ipc_tr, "ipc: unknown debug cmd 0x%x", cmd); + ipc_cmd_err("ipc: unknown debug cmd 0x%x", cmd); return -EINVAL; } } @@ -992,10 +992,10 @@ static inline int ipc_probe_init(uint32_t header) struct sof_ipc_probe_dma_add_params *params = ipc_get()->comp_data; int dma_provided = params->num_elems; - tr_dbg(&ipc_tr, "ipc_probe_init()"); + tr_dbg("ipc_probe_init()"); if (dma_provided > 1 || dma_provided < 0) { - ipc_cmd_err(&ipc_tr, "ipc_probe_init(): Invalid amount of extraction DMAs specified = %d", + ipc_cmd_err("ipc_probe_init(): Invalid amount of extraction DMAs specified = %d", dma_provided); return -EINVAL; } @@ -1005,7 +1005,7 @@ static inline int ipc_probe_init(uint32_t header) static inline int ipc_probe_deinit(uint32_t header) { - tr_dbg(&ipc_tr, "ipc_probe_deinit()"); + tr_dbg("ipc_probe_deinit()"); return probe_deinit(); } @@ -1015,17 +1015,17 @@ static inline int ipc_probe_dma_add(uint32_t header) struct sof_ipc_probe_dma_add_params *params = ipc_get()->comp_data; int dmas_count = params->num_elems; - tr_dbg(&ipc_tr, "ipc_probe_dma_add()"); + tr_dbg("ipc_probe_dma_add()"); if (dmas_count > CONFIG_PROBE_DMA_MAX) { - ipc_cmd_err(&ipc_tr, "ipc_probe_dma_add(): Invalid amount of injection DMAs specified = %d. Max is " + ipc_cmd_err("ipc_probe_dma_add(): Invalid amount of injection DMAs specified = %d. Max is " STRINGIFY(CONFIG_PROBE_DMA_MAX) ".", dmas_count); return -EINVAL; } if (dmas_count <= 0) { - ipc_cmd_err(&ipc_tr, "ipc_probe_dma_add(): Inferred amount of incjection DMAs in payload is %d. This could indicate corrupt size reported in header or invalid IPC payload.", + ipc_cmd_err("ipc_probe_dma_add(): Inferred amount of incjection DMAs in payload is %d. This could indicate corrupt size reported in header or invalid IPC payload.", dmas_count); return -EINVAL; } @@ -1038,17 +1038,17 @@ static inline int ipc_probe_dma_remove(uint32_t header) struct sof_ipc_probe_dma_remove_params *params = ipc_get()->comp_data; int tags_count = params->num_elems; - tr_dbg(&ipc_tr, "ipc_probe_dma_remove()"); + tr_dbg("ipc_probe_dma_remove()"); if (tags_count > CONFIG_PROBE_DMA_MAX) { - ipc_cmd_err(&ipc_tr, "ipc_probe_dma_remove(): Invalid amount of injection DMAs specified = %d. Max is " + ipc_cmd_err("ipc_probe_dma_remove(): Invalid amount of injection DMAs specified = %d. Max is " STRINGIFY(CONFIG_PROBE_DMA_MAX) ".", tags_count); return -EINVAL; } if (tags_count <= 0) { - ipc_cmd_err(&ipc_tr, "ipc_probe_dma_remove(): Inferred amount of incjection DMAs in payload is %d. This could indicate corrupt size reported in header or invalid IPC payload.", + ipc_cmd_err("ipc_probe_dma_remove(): Inferred amount of incjection DMAs in payload is %d. This could indicate corrupt size reported in header or invalid IPC payload.", tags_count); return -EINVAL; } @@ -1061,17 +1061,17 @@ static inline int ipc_probe_point_add(uint32_t header) struct sof_ipc_probe_point_add_params *params = ipc_get()->comp_data; int probes_count = params->num_elems; - tr_dbg(&ipc_tr, "ipc_probe_point_add()"); + tr_dbg("ipc_probe_point_add()"); if (probes_count > CONFIG_PROBE_POINTS_MAX) { - ipc_cmd_err(&ipc_tr, "ipc_probe_point_add(): Invalid amount of Probe Points specified = %d. Max is " + ipc_cmd_err("ipc_probe_point_add(): Invalid amount of Probe Points specified = %d. Max is " STRINGIFY(CONFIG_PROBE_POINT_MAX) ".", probes_count); return -EINVAL; } if (probes_count <= 0) { - ipc_cmd_err(&ipc_tr, "ipc_probe_point_add(): Inferred amount of Probe Points in payload is %d. This could indicate corrupt size reported in header or invalid IPC payload.", + ipc_cmd_err("ipc_probe_point_add(): Inferred amount of Probe Points in payload is %d. This could indicate corrupt size reported in header or invalid IPC payload.", probes_count); return -EINVAL; } @@ -1084,17 +1084,17 @@ static inline int ipc_probe_point_remove(uint32_t header) struct sof_ipc_probe_point_remove_params *params = ipc_get()->comp_data; int probes_count = params->num_elems; - tr_dbg(&ipc_tr, "ipc_probe_point_remove()"); + tr_dbg("ipc_probe_point_remove()"); if (probes_count > CONFIG_PROBE_POINTS_MAX) { - ipc_cmd_err(&ipc_tr, "ipc_probe_point_remove(): Invalid amount of Probe Points specified = %d. Max is " + ipc_cmd_err("ipc_probe_point_remove(): Invalid amount of Probe Points specified = %d. Max is " STRINGIFY(CONFIG_PROBE_POINT_MAX) ".", probes_count); return -EINVAL; } if (probes_count <= 0) { - ipc_cmd_err(&ipc_tr, "ipc_probe_point_remove(): Inferred amount of Probe Points in payload is %d. This could indicate corrupt size reported in header or invalid IPC payload.", + ipc_cmd_err("ipc_probe_point_remove(): Inferred amount of Probe Points in payload is %d. This could indicate corrupt size reported in header or invalid IPC payload.", probes_count); return -EINVAL; } @@ -1107,7 +1107,7 @@ static int ipc_probe_info(uint32_t header) struct sof_ipc_probe_info_params *params = ipc_get()->comp_data; int ret; - tr_dbg(&ipc_tr, "ipc_probe_get_data()"); + tr_dbg("ipc_probe_get_data()"); switch (cmd) { case SOF_IPC_PROBE_DMA_INFO: @@ -1117,13 +1117,13 @@ static int ipc_probe_info(uint32_t header) ret = probe_point_info(params, SOF_IPC_MSG_MAX_SIZE); break; default: - ipc_cmd_err(&ipc_tr, "ipc_probe_info(): Invalid probe INFO command = %u", + ipc_cmd_err("ipc_probe_info(): Invalid probe INFO command = %u", cmd); ret = -EINVAL; } if (ret < 0) { - ipc_cmd_err(&ipc_tr, "ipc_probe_info(): cmd %u failed", cmd); + ipc_cmd_err("ipc_probe_info(): cmd %u failed", cmd); return ret; } @@ -1134,7 +1134,7 @@ static int ipc_probe_info(uint32_t header) mailbox_hostbox_write(0, params, params->rhdr.hdr.size); ret = 1; } else { - ipc_cmd_err(&ipc_tr, "ipc_probe_get_data(): probes module returned too much payload for cmd %u - returned %d bytes, max %d", + ipc_cmd_err("ipc_probe_get_data(): probes module returned too much payload for cmd %u - returned %d bytes, max %d", cmd, params->rhdr.hdr.size, MIN(MAILBOX_HOSTBOX_SIZE, SOF_IPC_MSG_MAX_SIZE)); ret = -EINVAL; @@ -1147,7 +1147,7 @@ static int ipc_glb_probe(uint32_t header) { uint32_t cmd = iCS(header); - tr_dbg(&ipc_tr, "ipc: probe cmd 0x%x", cmd); + tr_dbg("ipc: probe cmd 0x%x", cmd); switch (cmd) { case SOF_IPC_PROBE_INIT: @@ -1166,14 +1166,14 @@ static int ipc_glb_probe(uint32_t header) case SOF_IPC_PROBE_POINT_INFO: return ipc_probe_info(header); default: - ipc_cmd_err(&ipc_tr, "ipc: unknown probe cmd 0x%x", cmd); + ipc_cmd_err("ipc: unknown probe cmd 0x%x", cmd); return -EINVAL; } } #else static inline int ipc_glb_probe(uint32_t header) { - ipc_cmd_err(&ipc_tr, "ipc_glb_probe(): Probes not enabled by Kconfig."); + ipc_cmd_err("ipc_glb_probe(): Probes not enabled by Kconfig."); return -EINVAL; } @@ -1194,7 +1194,7 @@ static int ipc_comp_value(uint32_t header, uint32_t cmd) /* get the component */ comp_dev = ipc_get_comp_by_id(ipc, data->comp_id); if (!comp_dev) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d not found", data->comp_id); + ipc_cmd_err("ipc: comp %d not found", data->comp_id); return -ENODEV; } @@ -1202,12 +1202,12 @@ static int ipc_comp_value(uint32_t header, uint32_t cmd) if (!cpu_is_me(comp_dev->core)) return ipc_process_on_core(comp_dev->core, false); - tr_dbg(&ipc_tr, "ipc: comp %d -> cmd %d", data->comp_id, data->cmd); + tr_dbg("ipc: comp %d -> cmd %d", data->comp_id, data->cmd); /* get component values */ ret = comp_cmd(comp_dev->cd, cmd, data, SOF_IPC_MSG_MAX_SIZE); if (ret < 0) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d cmd %u failed %d", data->comp_id, + ipc_cmd_err("ipc: comp %d cmd %u failed %d", data->comp_id, data->cmd, ret); return ret; } @@ -1218,7 +1218,7 @@ static int ipc_comp_value(uint32_t header, uint32_t cmd) mailbox_hostbox_write(0, data, data->rhdr.hdr.size); ret = 1; } else { - ipc_cmd_err(&ipc_tr, "ipc: comp %d cmd %u returned %d bytes max %d", + ipc_cmd_err("ipc: comp %d cmd %u returned %d bytes max %d", data->comp_id, data->cmd, data->rhdr.hdr.size, MIN(MAILBOX_HOSTBOX_SIZE, SOF_IPC_MSG_MAX_SIZE)); ret = -EINVAL; @@ -1241,7 +1241,7 @@ static int ipc_glb_comp_message(uint32_t header) case SOF_IPC_COMP_GET_DATA: return ipc_comp_value(header, COMP_CMD_GET_DATA); default: - ipc_cmd_err(&ipc_tr, "ipc: unknown comp cmd 0x%x", cmd); + ipc_cmd_err("ipc: unknown comp cmd 0x%x", cmd); return -EINVAL; } } @@ -1262,13 +1262,13 @@ static int ipc_glb_tplg_comp_new(uint32_t header) if (!cpu_is_me(comp->core)) return ipc_process_on_core(comp->core, false); - tr_dbg(&ipc_tr, "ipc: pipe %d comp %d -> new (type %d)", + tr_dbg("ipc: pipe %d comp %d -> new (type %d)", comp->pipeline_id, comp->id, comp->type); /* register component */ ret = ipc_comp_new(ipc, ipc_to_comp_new(comp)); if (ret < 0) { - ipc_cmd_err(&ipc_tr, "ipc: pipe %d comp %d creation failed %d", + ipc_cmd_err("ipc: pipe %d comp %d creation failed %d", comp->pipeline_id, comp->id, ret); return ret; } @@ -1298,13 +1298,13 @@ static int ipc_glb_tplg_buffer_new(uint32_t header) if (!cpu_is_me(ipc_buffer.comp.core)) return ipc_process_on_core(ipc_buffer.comp.core, false); - tr_dbg(&ipc_tr, "ipc: pipe %d buffer %d -> new (0x%x bytes)", + tr_dbg("ipc: pipe %d buffer %d -> new (0x%x bytes)", ipc_buffer.comp.pipeline_id, ipc_buffer.comp.id, ipc_buffer.size); ret = ipc_buffer_new(ipc, (struct sof_ipc_buffer *)ipc->comp_data); if (ret < 0) { - ipc_cmd_err(&ipc_tr, "ipc: pipe %d buffer %d creation failed %d", + ipc_cmd_err("ipc: pipe %d buffer %d creation failed %d", ipc_buffer.comp.pipeline_id, ipc_buffer.comp.id, ret); return ret; @@ -1335,11 +1335,11 @@ static int ipc_glb_tplg_pipe_new(uint32_t header) if (!cpu_is_me(ipc_pipeline.core)) return ipc_process_on_core(ipc_pipeline.core, false); - tr_dbg(&ipc_tr, "ipc: pipe %d -> new", ipc_pipeline.pipeline_id); + tr_dbg("ipc: pipe %d -> new", ipc_pipeline.pipeline_id); ret = ipc_pipeline_new(ipc, ipc->comp_data); if (ret < 0) { - ipc_cmd_err(&ipc_tr, "ipc: pipe %d creation failed %d", + ipc_cmd_err("ipc: pipe %d creation failed %d", ipc_pipeline.pipeline_id, ret); return ret; } @@ -1382,13 +1382,13 @@ static int ipc_glb_tplg_free(uint32_t header, /* copy message with ABI safe method */ IPC_COPY_CMD(ipc_free_msg, ipc->comp_data); - tr_info(&ipc_tr, "ipc: comp %d -> free", ipc_free_msg.id); + tr_info("ipc: comp %d -> free", ipc_free_msg.id); /* free the object */ ret = free_func(ipc, ipc_free_msg.id); if (ret < 0) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d free failed %d", + ipc_cmd_err("ipc: comp %d free failed %d", ipc_free_msg.id, ret); } @@ -1417,7 +1417,7 @@ static int ipc_glb_tplg_message(uint32_t header) case SOF_IPC_TPLG_BUFFER_FREE: return ipc_glb_tplg_free(header, ipc_buffer_free); default: - ipc_cmd_err(&ipc_tr, "ipc: unknown tplg header 0x%x", header); + ipc_cmd_err("ipc: unknown tplg header 0x%x", header); return -EINVAL; } } @@ -1502,7 +1502,7 @@ static int ipc_glb_debug_message(uint32_t header) return ipc_glb_test_mem_usage(header); #endif default: - ipc_cmd_err(&ipc_tr, "ipc: unknown debug header 0x%x", header); + ipc_cmd_err("ipc: unknown debug header 0x%x", header); return -EINVAL; } } @@ -1516,7 +1516,7 @@ static int ipc_glb_test_message(uint32_t header) case SOF_IPC_TEST_IPC_FLOOD: return 0; /* just return so next IPC can be sent */ default: - ipc_cmd_err(&ipc_tr, "ipc: unknown test header 0x%x", header); + ipc_cmd_err("ipc: unknown test header 0x%x", header); return -EINVAL; } } @@ -1627,7 +1627,7 @@ void ipc_cmd(struct ipc_cmd_hdr *_hdr) int ret; if (!hdr) { - ipc_cmd_err(&ipc_tr, "ipc: invalid IPC header."); + ipc_cmd_err("ipc: invalid IPC header."); ret = -EINVAL; goto out; } @@ -1635,7 +1635,7 @@ void ipc_cmd(struct ipc_cmd_hdr *_hdr) if (cpu_is_primary(cpu_get_id())) { /* A new IPC from the host, delivered to the primary core */ ipc->core = PLATFORM_PRIMARY_CORE_ID; - tr_info(&ipc_tr, "ipc: new cmd 0x%x", hdr->cmd); + tr_info("ipc: new cmd 0x%x", hdr->cmd); } type = iGS(hdr->cmd); @@ -1683,13 +1683,13 @@ void ipc_cmd(struct ipc_cmd_hdr *_hdr) break; #endif default: - ipc_cmd_err(&ipc_tr, "ipc: unknown command type %u", type); + ipc_cmd_err("ipc: unknown command type %u", type); ret = -EINVAL; break; } out: - tr_dbg(&ipc_tr, "ipc: last request 0x%x returned %d", type, ret); + tr_dbg("ipc: last request 0x%x returned %d", type, ret); /* if ret > 0, reply created and copied by cmd() */ if (ret <= 0) { diff --git a/src/ipc/ipc3/helper.c b/src/ipc/ipc3/helper.c index 8646d80f0319..cdfd9f691473 100644 --- a/src/ipc/ipc3/helper.c +++ b/src/ipc/ipc3/helper.c @@ -94,7 +94,7 @@ static const struct comp_driver *get_drv(struct sof_ipc_comp *comp) } if (!drv) - tr_err(&comp_tr, "get_drv(): driver not found, comp->type = %u", + tr_err("get_drv(): driver not found, comp->type = %u", comp->type); goto out; @@ -106,7 +106,7 @@ static const struct comp_driver *get_drv(struct sof_ipc_comp *comp) * add. */ if (comp->hdr.size < sizeof(*comp) + comp->ext_data_length) { - tr_err(&comp_tr, "Invalid size, hdr.size=0x%x, ext_data_length=0x%x\n", + tr_err("Invalid size, hdr.size=0x%x, ext_data_length=0x%x\n", comp->hdr.size, comp->ext_data_length); goto out; } @@ -117,7 +117,7 @@ static const struct comp_driver *get_drv(struct sof_ipc_comp *comp) /* UUID is first item in extended data - check its big enough */ if (comp->ext_data_length < UUID_SIZE) { - tr_err(&comp_tr, "UUID is invalid!\n"); + tr_err("UUID is invalid!\n"); goto out; } @@ -135,8 +135,7 @@ static const struct comp_driver *get_drv(struct sof_ipc_comp *comp) } if (!drv) - tr_err(&comp_tr, - "get_drv(): the provided UUID (%8x%8x%8x%8x) doesn't match to any driver!", + tr_err("get_drv(): the provided UUID (%8x%8x%8x%8x) doesn't match to any driver!", *(uint32_t *)(&comp_ext->uuid[0]), *(uint32_t *)(&comp_ext->uuid[4]), *(uint32_t *)(&comp_ext->uuid[8]), @@ -146,7 +145,7 @@ static const struct comp_driver *get_drv(struct sof_ipc_comp *comp) out: if (drv) - tr_dbg(&comp_tr, "get_drv(), found driver type %d, uuid %pU", + tr_dbg("get_drv(), found driver type %d, uuid %pU", drv->type, drv->tctx->uuid_p); return drv; @@ -329,7 +328,7 @@ struct comp_dev *comp_new(struct sof_ipc_comp *comp) return NULL; } - tr_info(&comp_tr, "comp new %pU type %d id %d.%d", + tr_info("comp new %pU type %d id %d.%d", drv->tctx->uuid_p, comp->type, comp->pipeline_id, comp->id); /* build the component */ @@ -360,7 +359,7 @@ int ipc_pipeline_new(struct ipc *ipc, ipc_pipe_new *_pipe_desc) /* check whether the pipeline already exists */ ipc_pipe = ipc_get_pipeline_by_id(ipc, pipe_desc->comp_id); if (ipc_pipe != NULL) { - tr_err(&ipc_tr, "ipc_pipeline_new(): pipeline already exists, pipe_desc->comp_id = %u", + tr_err("ipc_pipeline_new(): pipeline already exists, pipe_desc->comp_id = %u", pipe_desc->comp_id); return -EINVAL; } @@ -369,7 +368,7 @@ int ipc_pipeline_new(struct ipc *ipc, ipc_pipe_new *_pipe_desc) pipe = pipeline_new(pipe_desc->pipeline_id, pipe_desc->priority, pipe_desc->comp_id); if (!pipe) { - tr_err(&ipc_tr, "ipc_pipeline_new(): pipeline_new() failed"); + tr_err("ipc_pipeline_new(): pipeline_new() failed"); return -ENOMEM; } @@ -383,7 +382,7 @@ int ipc_pipeline_new(struct ipc *ipc, ipc_pipe_new *_pipe_desc) /* set xrun time limit */ ret = pipeline_xrun_set_limit(pipe, pipe_desc->xrun_limit_usecs); if (ret) { - tr_err(&ipc_tr, "ipc_pipeline_new(): pipeline_xrun_set_limit() failed"); + tr_err("ipc_pipeline_new(): pipeline_xrun_set_limit() failed"); pipeline_free(pipe); return ret; } @@ -419,7 +418,7 @@ int ipc_pipeline_free(struct ipc *ipc, uint32_t comp_id) /* check type */ if (ipc_pipe->type != COMP_TYPE_PIPELINE) { - tr_err(&ipc_tr, "ipc_pipeline_free(): comp id: %d is not a PIPELINE", + tr_err("ipc_pipeline_free(): comp id: %d is not a PIPELINE", comp_id); return -EINVAL; } @@ -431,7 +430,7 @@ int ipc_pipeline_free(struct ipc *ipc, uint32_t comp_id) /* free buffer and remove from list */ ret = pipeline_free(ipc_pipe->pipeline); if (ret < 0) { - tr_err(&ipc_tr, "ipc_pipeline_free(): pipeline_free() failed"); + tr_err("ipc_pipeline_free(): pipeline_free() failed"); return ret; } ipc_pipe->pipeline = NULL; @@ -450,7 +449,7 @@ int ipc_buffer_new(struct ipc *ipc, const struct sof_ipc_buffer *desc) /* check whether buffer already exists */ ibd = ipc_get_buffer_by_id(ipc, desc->comp.id); if (ibd != NULL) { - tr_err(&ipc_tr, "ipc_buffer_new(): buffer already exists, desc->comp.id = %u", + tr_err("ipc_buffer_new(): buffer already exists, desc->comp.id = %u", desc->comp.id); return -EINVAL; } @@ -458,7 +457,7 @@ int ipc_buffer_new(struct ipc *ipc, const struct sof_ipc_buffer *desc) /* register buffer with pipeline */ buffer = buffer_new(desc, false); if (!buffer) { - tr_err(&ipc_tr, "ipc_buffer_new(): buffer_new() failed"); + tr_err("ipc_buffer_new(): buffer_new() failed"); return -ENOMEM; } @@ -539,7 +538,7 @@ int ipc_buffer_free(struct ipc *ipc, uint32_t buffer_id) if (active_comp->state > COMP_STATE_READY && core != ibd->core && core != cpu_get_id()) { - tr_dbg(&ipc_tr, "ipc_buffer_free(): comp id: %d run on sink core %u", + tr_dbg("ipc_buffer_free(): comp id: %d run on sink core %u", buffer_id, core); ibd->core = core; return ipc_process_on_core(core, false); @@ -566,12 +565,12 @@ int ipc_buffer_free(struct ipc *ipc, uint32_t buffer_id) static int ipc_comp_to_buffer_connect(struct ipc_comp_dev *comp, struct ipc_comp_dev *buffer) { - tr_dbg(&ipc_tr, "ipc: comp sink %d, source %d -> connect", buffer->id, + tr_dbg("ipc: comp sink %d, source %d -> connect", buffer->id, comp->id); #if CONFIG_INCOHERENT if (comp->core != buffer->cb->core) { - tr_err(&ipc_tr, "ipc: shared buffers are not supported for IPC3 incoherent architectures"); + tr_err("ipc: shared buffers are not supported for IPC3 incoherent architectures"); return -ENOTSUP; } #endif @@ -582,12 +581,12 @@ static int ipc_comp_to_buffer_connect(struct ipc_comp_dev *comp, static int ipc_buffer_to_comp_connect(struct ipc_comp_dev *buffer, struct ipc_comp_dev *comp) { - tr_dbg(&ipc_tr, "ipc: comp sink %d, source %d -> connect", comp->id, + tr_dbg("ipc: comp sink %d, source %d -> connect", comp->id, buffer->id); #if CONFIG_INCOHERENT if (comp->core != buffer->cb->core) { - tr_err(&ipc_tr, "ipc: shared buffers are not supported for IPC3 incoherent architectures"); + tr_err("ipc: shared buffers are not supported for IPC3 incoherent architectures"); return -ENOTSUP; } #endif @@ -604,14 +603,14 @@ int ipc_comp_connect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) /* check whether the components already exist */ icd_source = ipc_get_comp_dev(ipc, COMP_TYPE_ANY, connect->source_id); if (!icd_source) { - tr_err(&ipc_tr, "ipc_comp_connect(): source component does not exist, source_id = %u sink_id = %u", + tr_err("ipc_comp_connect(): source component does not exist, source_id = %u sink_id = %u", connect->source_id, connect->sink_id); return -EINVAL; } icd_sink = ipc_get_comp_dev(ipc, COMP_TYPE_ANY, connect->sink_id); if (!icd_sink) { - tr_err(&ipc_tr, "ipc_comp_connect(): sink component does not exist, source_id = %d sink_id = %u", + tr_err("ipc_comp_connect(): sink component does not exist, source_id = %d sink_id = %u", connect->sink_id, connect->source_id); return -EINVAL; } @@ -624,7 +623,7 @@ int ipc_comp_connect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) icd_sink->type == COMP_TYPE_BUFFER) return ipc_comp_to_buffer_connect(icd_source, icd_sink); else { - tr_err(&ipc_tr, "ipc_comp_connect(): invalid source and sink types, connect->source_id = %u, connect->sink_id = %u", + tr_err("ipc_comp_connect(): invalid source and sink types, connect->source_id = %u, connect->sink_id = %u", connect->source_id, connect->sink_id); return -EINVAL; } @@ -638,21 +637,21 @@ int ipc_comp_new(struct ipc *ipc, ipc_comp *_comp) /* check core is valid */ if (comp->core >= CONFIG_CORE_COUNT) { - tr_err(&ipc_tr, "ipc_comp_new(): comp->core = %u", comp->core); + tr_err("ipc_comp_new(): comp->core = %u", comp->core); return -EINVAL; } /* check whether component already exists */ icd = ipc_get_comp_by_id(ipc, comp->id); if (icd != NULL) { - tr_err(&ipc_tr, "ipc_comp_new(): comp->id = %u", comp->id); + tr_err("ipc_comp_new(): comp->id = %u", comp->id); return -EINVAL; } /* create component */ cd = comp_new(comp); if (!cd) { - tr_err(&ipc_tr, "ipc_comp_new(): component cd = NULL"); + tr_err("ipc_comp_new(): component cd = NULL"); return -EINVAL; } @@ -660,7 +659,7 @@ int ipc_comp_new(struct ipc *ipc, ipc_comp *_comp) icd = rzalloc(SOF_MEM_ZONE_RUNTIME_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(struct ipc_comp_dev)); if (!icd) { - tr_err(&ipc_tr, "ipc_comp_new(): alloc failed"); + tr_err("ipc_comp_new(): alloc failed"); rfree(cd); return -ENOMEM; } diff --git a/src/ipc/ipc3/host-page-table.c b/src/ipc/ipc3/host-page-table.c index 3019da987be2..5a8b64eb3437 100644 --- a/src/ipc/ipc3/host-page-table.c +++ b/src/ipc/ipc3/host-page-table.c @@ -38,14 +38,14 @@ static int ipc_parse_page_descriptors(uint8_t *page_table, if ((ring->size <= HOST_PAGE_SIZE * (ring->pages - 1)) || (ring->size > HOST_PAGE_SIZE * ring->pages)) { /* error buffer size */ - tr_err(&ipc_tr, "ipc_parse_page_descriptors(): error buffer size"); + tr_err("ipc_parse_page_descriptors(): error buffer size"); return -EINVAL; } elem_array->elems = rzalloc(SOF_MEM_ZONE_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(struct dma_sg_elem) * ring->pages); if (!elem_array->elems) { - tr_err(&ipc_tr, "ipc_parse_page_descriptors(): There is no heap free with this block size: %zu", + tr_err("ipc_parse_page_descriptors(): There is no heap free with this block size: %zu", sizeof(struct dma_sg_elem) * ring->pages); return -ENOMEM; } @@ -96,7 +96,7 @@ static int ipc_get_page_descriptors(struct dma *dmac, uint8_t *page_table, /* get DMA channel from DMAC */ chan = dma_channel_get_legacy(dmac, 0); if (!chan) { - tr_err(&ipc_tr, "ipc_get_page_descriptors(): chan is NULL"); + tr_err("ipc_get_page_descriptors(): chan is NULL"); return -ENODEV; } @@ -121,7 +121,7 @@ static int ipc_get_page_descriptors(struct dma *dmac, uint8_t *page_table, ret = dma_get_attribute_legacy(dmac, DMA_ATTR_COPY_ALIGNMENT, &dma_copy_align); #endif if (ret < 0) { - tr_err(&ipc_tr, "ipc_get_page_descriptors(): dma_get_attribute() failed"); + tr_err("ipc_get_page_descriptors(): dma_get_attribute() failed"); goto out; } elem.size = SOF_DIV_ROUND_UP(ring->pages * 20, 8); @@ -131,14 +131,14 @@ static int ipc_get_page_descriptors(struct dma *dmac, uint8_t *page_table, ret = dma_set_config_legacy(chan, &config); if (ret < 0) { - tr_err(&ipc_tr, "ipc_get_page_descriptors(): dma_set_config() failed"); + tr_err("ipc_get_page_descriptors(): dma_set_config() failed"); goto out; } /* start the copy of page table to DSP */ ret = dma_copy_legacy(chan, elem.size, DMA_COPY_ONE_SHOT | DMA_COPY_BLOCKING); if (ret < 0) { - tr_err(&ipc_tr, "ipc_get_page_descriptors(): dma_start() failed"); + tr_err("ipc_get_page_descriptors(): dma_start() failed"); goto out; } @@ -165,7 +165,7 @@ int ipc_process_host_buffer(struct ipc *ipc, data_host_buffer->page_table, ring); if (err < 0) { - tr_err(&ipc_tr, "ipc: get descriptors failed %d", err); + tr_err("ipc: get descriptors failed %d", err); goto error; } @@ -175,7 +175,7 @@ int ipc_process_host_buffer(struct ipc *ipc, ring, elem_array, direction); if (err < 0) { - tr_err(&ipc_tr, "ipc: parse descriptors failed %d", err); + tr_err("ipc: parse descriptors failed %d", err); goto error; } diff --git a/src/ipc/ipc4/handler.c b/src/ipc/ipc4/handler.c index e6696aee910c..98f863a05c17 100644 --- a/src/ipc/ipc4/handler.c +++ b/src/ipc/ipc4/handler.c @@ -143,7 +143,7 @@ static int ipc4_delete_pipeline(struct ipc4_message_request *ipc4) struct ipc *ipc = ipc_get(); pipe = (struct ipc4_pipeline_delete *)ipc4; - tr_dbg(&ipc_tr, "ipc4 delete pipeline %x:", (uint32_t)pipe->primary.r.instance_id); + tr_dbg("ipc4 delete pipeline %x:", (uint32_t)pipe->primary.r.instance_id); return ipc_pipeline_free(ipc, pipe->primary.r.instance_id); } @@ -194,14 +194,14 @@ static int ipc4_pcm_params(struct ipc_comp_dev *pcm_dev) /* sanity check comp */ if (!pcm_dev->cd->pipeline) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d pipeline not found", pcm_dev->id); + ipc_cmd_err("ipc: comp %d pipeline not found", pcm_dev->id); return -EINVAL; } /* configure pipeline audio params */ err = ipc4_pipeline_params(pcm_dev->cd->pipeline, pcm_dev->cd); if (err < 0) { - ipc_cmd_err(&ipc_tr, "ipc: pipe %d comp %d params failed %d", + ipc_cmd_err("ipc: pipe %d comp %d params failed %d", pcm_dev->cd->pipeline->pipeline_id, pcm_dev->cd->pipeline->comp_id, err); goto error; @@ -210,7 +210,7 @@ static int ipc4_pcm_params(struct ipc_comp_dev *pcm_dev) /* prepare pipeline audio params */ err = pipeline_prepare(pcm_dev->cd->pipeline, pcm_dev->cd); if (err < 0) { - ipc_cmd_err(&ipc_tr, "ipc: pipe %d comp %d prepare failed %d", + ipc_cmd_err("ipc: pipe %d comp %d prepare failed %d", pcm_dev->cd->pipeline->pipeline_id, pcm_dev->cd->pipeline->comp_id, err); goto error; @@ -221,7 +221,7 @@ static int ipc4_pcm_params(struct ipc_comp_dev *pcm_dev) error: reset_err = pipeline_reset(pcm_dev->cd->pipeline, pcm_dev->cd); if (reset_err < 0) - ipc_cmd_err(&ipc_tr, "ipc: pipe %d comp %d reset failed %d", + ipc_cmd_err("ipc: pipe %d comp %d reset failed %d", pcm_dev->cd->pipeline->pipeline_id, pcm_dev->cd->pipeline->comp_id, reset_err); @@ -258,7 +258,7 @@ static struct ipc_comp_dev *pipeline_get_host_dev(struct ipc_comp_dev *ppl_icd) host_dev = ipc_get_comp_by_id(ipc, host_id); if (!host_dev) - ipc_cmd_err(&ipc_tr, "comp host with ID %d not found", host_id); + ipc_cmd_err("comp host with ID %d not found", host_id); return host_dev; } @@ -300,7 +300,7 @@ int ipc4_pipeline_prepare(struct ipc_comp_dev *ppl_icd, uint32_t cmd) int ret = 0; status = ppl_icd->pipeline->status; - tr_dbg(&ipc_tr, "pipeline %d: initial state: %d, cmd: %d", ppl_icd->id, + tr_dbg("pipeline %d: initial state: %d, cmd: %d", ppl_icd->id, status, cmd); switch (cmd) { @@ -316,14 +316,13 @@ int ipc4_pipeline_prepare(struct ipc_comp_dev *ppl_icd, uint32_t cmd) if (!host) return IPC4_INVALID_RESOURCE_ID; - tr_dbg(&ipc_tr, "pipeline %d: set params", ppl_icd->id); + tr_dbg("pipeline %d: set params", ppl_icd->id); ret = ipc4_pcm_params(host); if (ret < 0) return IPC4_INVALID_REQUEST; break; default: - ipc_cmd_err(&ipc_tr, - "pipeline %d: Invalid state for RUNNING: %d", + ipc_cmd_err("pipeline %d: Invalid state for RUNNING: %d", ppl_icd->id, status); return IPC4_INVALID_REQUEST; } @@ -331,7 +330,7 @@ int ipc4_pipeline_prepare(struct ipc_comp_dev *ppl_icd, uint32_t cmd) case SOF_IPC4_PIPELINE_STATE_RESET: switch (status) { case COMP_STATE_INIT: - tr_dbg(&ipc_tr, "pipeline %d: reset from init", ppl_icd->id); + tr_dbg("pipeline %d: reset from init", ppl_icd->id); ret = ipc4_pipeline_complete(ipc, ppl_icd->id, cmd); if (ret < 0) ret = IPC4_INVALID_REQUEST; @@ -343,8 +342,7 @@ int ipc4_pipeline_prepare(struct ipc_comp_dev *ppl_icd, uint32_t cmd) /* No action needed */ break; default: - ipc_cmd_err(&ipc_tr, - "pipeline %d: Invalid state for RESET: %d", + ipc_cmd_err("pipeline %d: Invalid state for RESET: %d", ppl_icd->id, status); return IPC4_INVALID_REQUEST; } @@ -353,7 +351,7 @@ int ipc4_pipeline_prepare(struct ipc_comp_dev *ppl_icd, uint32_t cmd) case SOF_IPC4_PIPELINE_STATE_PAUSED: switch (status) { case COMP_STATE_INIT: - tr_dbg(&ipc_tr, "pipeline %d: pause from init", ppl_icd->id); + tr_dbg("pipeline %d: pause from init", ppl_icd->id); ret = ipc4_pipeline_complete(ipc, ppl_icd->id, cmd); if (ret < 0) ret = IPC4_INVALID_REQUEST; @@ -373,7 +371,7 @@ int ipc4_pipeline_prepare(struct ipc_comp_dev *ppl_icd, uint32_t cmd) case SOF_IPC4_PIPELINE_STATE_SAVED: case SOF_IPC4_PIPELINE_STATE_ERROR_STOP: default: - ipc_cmd_err(&ipc_tr, "pipeline %d: unsupported trigger cmd: %d", + ipc_cmd_err("pipeline %d: unsupported trigger cmd: %d", ppl_icd->id, cmd); return IPC4_INVALID_REQUEST; } @@ -388,7 +386,7 @@ int ipc4_pipeline_trigger(struct ipc_comp_dev *ppl_icd, uint32_t cmd, bool *dela int ret; status = ppl_icd->pipeline->status; - tr_dbg(&ipc_tr, "pipeline %d: initial state: %d, cmd: %d", ppl_icd->id, + tr_dbg("pipeline %d: initial state: %d, cmd: %d", ppl_icd->id, status, cmd); if (status == COMP_STATE_INIT) @@ -413,8 +411,7 @@ int ipc4_pipeline_trigger(struct ipc_comp_dev *ppl_icd, uint32_t cmd, bool *dela cmd = COMP_TRIGGER_PRE_RELEASE; break; default: - ipc_cmd_err(&ipc_tr, - "pipeline %d: Invalid state for RUNNING: %d", + ipc_cmd_err("pipeline %d: Invalid state for RUNNING: %d", ppl_icd->id, status); return IPC4_INVALID_REQUEST; } @@ -442,7 +439,7 @@ int ipc4_pipeline_trigger(struct ipc_comp_dev *ppl_icd, uint32_t cmd, bool *dela break; default: - ipc_cmd_err(&ipc_tr, "pipeline %d: unsupported trigger cmd: %d", + ipc_cmd_err("pipeline %d: unsupported trigger cmd: %d", ppl_icd->id, cmd); return IPC4_INVALID_REQUEST; } @@ -450,11 +447,11 @@ int ipc4_pipeline_trigger(struct ipc_comp_dev *ppl_icd, uint32_t cmd, bool *dela /* trigger the component */ ret = pipeline_trigger(host->cd->pipeline, host->cd, cmd); if (ret < 0) { - ipc_cmd_err(&ipc_tr, "pipeline %d: trigger cmd %d failed with: %d", + ipc_cmd_err("pipeline %d: trigger cmd %d failed with: %d", ppl_icd->id, cmd, ret); ret = IPC4_PIPELINE_STATE_NOT_SET; } else if (ret == PPL_STATUS_SCHEDULED) { - tr_dbg(&ipc_tr, "pipeline %d: trigger cmd %d is delayed", + tr_dbg("pipeline %d: trigger cmd %d is delayed", ppl_icd->id, cmd); *delayed = true; ret = 0; @@ -484,7 +481,7 @@ static void ipc_compound_pre_start(int msg_id) static void ipc_compound_post_start(uint32_t msg_id, int ret, bool delayed) { if (ret) { - ipc_cmd_err(&ipc_tr, "failed to process msg %d status %d", msg_id, ret); + ipc_cmd_err("failed to process msg %d status %d", msg_id, ret); atomic_set(&msg_data.delayed_reply, 0); return; } @@ -497,7 +494,7 @@ static void ipc_compound_post_start(uint32_t msg_id, int ret, bool delayed) static void ipc_compound_msg_done(uint32_t msg_id, int error) { if (!atomic_read(&msg_data.delayed_reply)) { - ipc_cmd_err(&ipc_tr, "unexpected delayed reply"); + ipc_cmd_err("unexpected delayed reply"); return; } @@ -519,7 +516,7 @@ static int ipc_wait_for_compound_msg(void) if (!try_count--) { atomic_set(&msg_data.delayed_reply, 0); - ipc_cmd_err(&ipc_tr, "ipc4: failed to wait schedule thread"); + ipc_cmd_err("ipc4: failed to wait schedule thread"); return IPC4_FAILURE; } } @@ -561,7 +558,7 @@ static int ipc4_set_pipeline_state(struct ipc4_message_request *ipc4) ppl_icd = ipc_get_comp_by_ppl_id(ipc, COMP_TYPE_PIPELINE, ppl_id[i], IPC_COMP_IGNORE_REMOTE); if (!ppl_icd) { - tr_err(&ipc_tr, "ipc: comp %d not found", ppl_id[i]); + ipc_cmd_err("ipc: comp %d not found", ppl_id[i]); return IPC4_INVALID_RESOURCE_ID; } @@ -578,7 +575,7 @@ static int ipc4_set_pipeline_state(struct ipc4_message_request *ipc4) ppl_icd = ipc_get_comp_by_ppl_id(ipc, COMP_TYPE_PIPELINE, ppl_id[i], IPC_COMP_IGNORE_REMOTE); if (!ppl_icd) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d not found", ppl_id[i]); + ipc_cmd_err("ipc: comp %d not found", ppl_id[i]); return IPC4_INVALID_RESOURCE_ID; } @@ -612,7 +609,7 @@ static int ipc4_set_pipeline_state(struct ipc4_message_request *ipc4) ppl_icd = ipc_get_comp_by_ppl_id(ipc, COMP_TYPE_PIPELINE, ppl_id[i], IPC_COMP_IGNORE_REMOTE); if (!ppl_icd) { - ipc_cmd_err(&ipc_tr, "ipc: comp %d not found", ppl_id[i]); + ipc_cmd_err("ipc: comp %d not found", ppl_id[i]); return IPC4_INVALID_RESOURCE_ID; } @@ -642,7 +639,7 @@ static int ipc4_set_pipeline_state(struct ipc4_message_request *ipc4) * to process the full trigger list. */ if (ipc_wait_for_compound_msg() != 0) { - ipc_cmd_err(&ipc_tr, "ipc4: fail with delayed trigger"); + ipc_cmd_err("ipc4: fail with delayed trigger"); return IPC4_FAILURE; } } @@ -752,7 +749,7 @@ static int ipc4_process_ipcgtw_cmd(struct ipc4_message_request *ipc4) if (msg_reply.tx_data) { msg_reply.tx_size = reply_size; } else { - ipc_cmd_err(&ipc_tr, "failed to allocate %u bytes for msg_reply.tx_data", + ipc_cmd_err("failed to allocate %u bytes for msg_reply.tx_data", reply_size); msg_reply.extension = 0; return IPC4_OUT_OF_MEMORY; @@ -761,7 +758,7 @@ static int ipc4_process_ipcgtw_cmd(struct ipc4_message_request *ipc4) return err < 0 ? IPC4_FAILURE : IPC4_SUCCESS; #else - ipc_cmd_err(&ipc_tr, "CONFIG_IPC4_GATEWAY is disabled"); + ipc_cmd_err("CONFIG_IPC4_GATEWAY is disabled"); return IPC4_UNAVAILABLE; #endif } @@ -779,7 +776,7 @@ static int ipc4_process_glb_message(struct ipc4_message_request *ipc4) case SOF_IPC4_GLB_PERF_MEASUREMENTS_CMD: case SOF_IPC4_GLB_LOAD_MULTIPLE_MODULES: case SOF_IPC4_GLB_UNLOAD_MULTIPLE_MODULES: - ipc_cmd_err(&ipc_tr, "not implemented ipc message type %d", type); + ipc_cmd_err("not implemented ipc message type %d", type); ret = IPC4_UNAVAILABLE; break; @@ -802,7 +799,7 @@ static int ipc4_process_glb_message(struct ipc4_message_request *ipc4) case SOF_IPC4_GLB_GET_PIPELINE_CONTEXT_SIZE: case SOF_IPC4_GLB_SAVE_PIPELINE: case SOF_IPC4_GLB_RESTORE_PIPELINE: - ipc_cmd_err(&ipc_tr, "not implemented ipc message type %d", type); + ipc_cmd_err("not implemented ipc message type %d", type); ret = IPC4_UNAVAILABLE; break; @@ -816,13 +813,13 @@ static int ipc4_process_glb_message(struct ipc4_message_request *ipc4) break; #endif case SOF_IPC4_GLB_INTERNAL_MESSAGE: - ipc_cmd_err(&ipc_tr, "not implemented ipc message type %d", type); + ipc_cmd_err("not implemented ipc message type %d", type); ret = IPC4_UNAVAILABLE; break; /* Notification (FW to SW driver) */ case SOF_IPC4_GLB_NOTIFICATION: - ipc_cmd_err(&ipc_tr, "not implemented ipc message type %d", type); + ipc_cmd_err("not implemented ipc message type %d", type); ret = IPC4_UNAVAILABLE; break; @@ -831,7 +828,7 @@ static int ipc4_process_glb_message(struct ipc4_message_request *ipc4) break; default: - ipc_cmd_err(&ipc_tr, "unsupported ipc message type %d", type); + ipc_cmd_err("unsupported ipc message type %d", type); ret = IPC4_UNAVAILABLE; break; } @@ -857,10 +854,9 @@ static int ipc4_init_module_instance(struct ipc4_message_request *ipc4) if (ret < 0) return IPC4_FAILURE; - tr_dbg(&ipc_tr, - "ipc4_init_module_instance %x : %x", - (uint32_t)module_init.primary.r.module_id, - (uint32_t)module_init.primary.r.instance_id); + tr_dbg("ipc4_init_module_instance %x : %x", + (uint32_t)module_init.primary.r.module_id, + (uint32_t)module_init.primary.r.instance_id); /* Pass IPC to target core */ if (!cpu_is_me(module_init.extension.r.core_id)) @@ -868,7 +864,7 @@ static int ipc4_init_module_instance(struct ipc4_message_request *ipc4) dev = comp_new_ipc4(&module_init); if (!dev) { - ipc_cmd_err(&ipc_tr, "error: failed to init module %x : %x", + ipc_cmd_err("error: failed to init module %x : %x", (uint32_t)module_init.primary.r.module_id, (uint32_t)module_init.primary.r.instance_id); return IPC4_MOD_NOT_INITIALIZED; @@ -886,7 +882,7 @@ static int ipc4_bind_module_instance(struct ipc4_message_request *ipc4) if (ret < 0) return IPC4_FAILURE; - tr_dbg(&ipc_tr, "ipc4_bind_module_instance %x : %x with %x : %x", + tr_dbg("ipc4_bind_module_instance %x : %x with %x : %x", (uint32_t)bu.primary.r.module_id, (uint32_t)bu.primary.r.instance_id, (uint32_t)bu.extension.r.dst_module_id, (uint32_t)bu.extension.r.dst_instance_id); @@ -902,7 +898,7 @@ static int ipc4_unbind_module_instance(struct ipc4_message_request *ipc4) if (ret < 0) return IPC4_FAILURE; - tr_dbg(&ipc_tr, "ipc4_unbind_module_instance %x : %x with %x : %x", + tr_dbg("ipc4_unbind_module_instance %x : %x with %x : %x", (uint32_t)bu.primary.r.module_id, (uint32_t)bu.primary.r.instance_id, (uint32_t)bu.extension.r.dst_module_id, (uint32_t)bu.extension.r.dst_instance_id); @@ -923,7 +919,7 @@ static int ipc4_get_large_config_module_instance(struct ipc4_message_request *ip if (ret < 0) return IPC4_FAILURE; - tr_dbg(&ipc_tr, "ipc4_get_large_config_module_instance %x : %x", + tr_dbg("ipc4_get_large_config_module_instance %x : %x", (uint32_t)config.primary.r.module_id, (uint32_t)config.primary.r.instance_id); drv = ipc4_get_comp_drv(config.primary.r.module_id); @@ -982,7 +978,7 @@ static int ipc4_get_large_config_module_instance(struct ipc4_message_request *ip msg_reply.tx_size = data_offset; msg_reply.tx_data = response_buffer; } else { - ipc_cmd_err(&ipc_tr, "error: failed to allocate tx_data"); + ipc_cmd_err("error: failed to allocate tx_data"); ret = IPC4_OUT_OF_MEMORY; } @@ -1026,7 +1022,7 @@ static int ipc4_set_vendor_config_module_instance(struct comp_dev *dev, ret = drv->ops.set_large_config(dev, tlv->type, init_block, final_block, tlv->length, tlv->value); if (ret < 0) { - ipc_cmd_err(&ipc_tr, "failed to set large_config_module_instance %x : %x", + ipc_cmd_err("failed to set large_config_module_instance %x : %x", (uint32_t)module_id, (uint32_t)instance_id); return IPC4_INVALID_RESOURCE_ID; } @@ -1064,7 +1060,7 @@ static int ipc4_set_large_config_module_instance(struct ipc4_message_request *ip dcache_invalidate_region((__sparse_force void __sparse_cache *)MAILBOX_HOSTBOX_BASE, config.extension.r.data_off_size); - tr_dbg(&ipc_tr, "ipc4_set_large_config_module_instance %x : %x", + tr_dbg("ipc4_set_large_config_module_instance %x : %x", (uint32_t)config.primary.r.module_id, (uint32_t)config.primary.r.instance_id); drv = ipc4_get_comp_drv(config.primary.r.module_id); @@ -1101,7 +1097,7 @@ static int ipc4_set_large_config_module_instance(struct ipc4_message_request *ip config.extension.r.init_block, config.extension.r.final_block, config.extension.r.data_off_size, (const char *)MAILBOX_HOSTBOX_BASE); if (ret < 0) { - ipc_cmd_err(&ipc_tr, "failed to set large_config_module_instance %x : %x", + ipc_cmd_err("failed to set large_config_module_instance %x : %x", (uint32_t)config.primary.r.module_id, (uint32_t)config.primary.r.instance_id); ret = IPC4_INVALID_RESOURCE_ID; @@ -1121,13 +1117,13 @@ static int ipc4_delete_module_instance(struct ipc4_message_request *ipc4) if (ret < 0) return IPC4_FAILURE; - tr_dbg(&ipc_tr, "ipc4_delete_module_instance %x : %x", (uint32_t)module.primary.r.module_id, + tr_dbg("ipc4_delete_module_instance %x : %x", (uint32_t)module.primary.r.module_id, (uint32_t)module.primary.r.instance_id); comp_id = IPC4_COMP_ID(module.primary.r.module_id, module.primary.r.instance_id); ret = ipc_comp_free(ipc, comp_id); if (ret < 0) { - ipc_cmd_err(&ipc_tr, "failed to delete module instance %x : %x", + ipc_cmd_err("failed to delete module instance %x : %x", (uint32_t)module.primary.r.module_id, (uint32_t)module.primary.r.instance_id); ret = IPC4_INVALID_RESOURCE_ID; @@ -1149,11 +1145,11 @@ static int ipc4_module_process_d0ix(struct ipc4_message_request *ipc4) module_id = d0ix.primary.r.module_id; instance_id = d0ix.primary.r.instance_id; - tr_dbg(&ipc_tr, "ipc4_module_process_d0ix %x : %x", module_id, instance_id); + tr_dbg("ipc4_module_process_d0ix %x : %x", module_id, instance_id); /* only module 0 can be used to set d0ix state */ if (d0ix.primary.r.module_id || d0ix.primary.r.instance_id) { - ipc_cmd_err(&ipc_tr, "invalid resource id %x : %x", module_id, instance_id); + ipc_cmd_err("invalid resource id %x : %x", module_id, instance_id); return IPC4_INVALID_RESOURCE_ID; } @@ -1182,7 +1178,7 @@ static int ipc4_module_process_dx(struct ipc4_message_request *ipc4) /* only module 0 can be used to set dx state */ if (module_id || instance_id) { - ipc_cmd_err(&ipc_tr, "invalid resource id %x : %x", module_id, instance_id); + ipc_cmd_err("invalid resource id %x : %x", module_id, instance_id); return IPC4_INVALID_RESOURCE_ID; } @@ -1195,7 +1191,7 @@ static int ipc4_module_process_dx(struct ipc4_message_request *ipc4) /* check if core enable mask is valid */ if (dx_info.core_mask > MASK(CONFIG_CORE_COUNT - 1, 0)) { - ipc_cmd_err(&ipc_tr, "ipc4_module_process_dx: CONFIG_CORE_COUNT: %d < core enable mask: %d", + ipc_cmd_err("ipc4_module_process_dx: CONFIG_CORE_COUNT: %d < core enable mask: %d", CONFIG_CORE_COUNT, dx_info.core_mask); return IPC4_ERROR_INVALID_PARAM; } @@ -1204,7 +1200,7 @@ static int ipc4_module_process_dx(struct ipc4_message_request *ipc4) if ((dx_info.core_mask & BIT(PLATFORM_PRIMARY_CORE_ID)) && (dx_info.dx_mask & BIT(PLATFORM_PRIMARY_CORE_ID))) { /* core0 can't be activated more, it's already active since we got here */ - ipc_cmd_err(&ipc_tr, "Core0 is already active"); + ipc_cmd_err("Core0 is already active"); return IPC4_BAD_STATE; } @@ -1216,13 +1212,13 @@ static int ipc4_module_process_dx(struct ipc4_message_request *ipc4) if (dx_info.dx_mask & BIT(core_id)) { ret = cpu_enable_core(core_id); if (ret != 0) { - ipc_cmd_err(&ipc_tr, "failed to enable core %d", core_id); + ipc_cmd_err("failed to enable core %d", core_id); return IPC4_FAILURE; } } else { cpu_disable_core(core_id); if (cpu_is_core_enabled(core_id)) { - ipc_cmd_err(&ipc_tr, "failed to disable core %d", core_id); + ipc_cmd_err("failed to disable core %d", core_id); return IPC4_FAILURE; } } @@ -1231,13 +1227,13 @@ static int ipc4_module_process_dx(struct ipc4_message_request *ipc4) /* Deactivating primary core if requested. */ if (dx_info.core_mask & BIT(PLATFORM_PRIMARY_CORE_ID)) { if (cpu_enabled_cores() & ~BIT(PLATFORM_PRIMARY_CORE_ID)) { - ipc_cmd_err(&ipc_tr, "secondary cores 0x%x still active", + ipc_cmd_err("secondary cores 0x%x still active", cpu_enabled_cores()); return IPC4_BUSY; } if (is_any_ppl_active()) { - ipc_cmd_err(&ipc_tr, "some pipelines are still active"); + ipc_cmd_err("some pipelines are still active"); return IPC4_BUSY; } @@ -1271,7 +1267,7 @@ static int ipc4_process_module_message(struct ipc4_message_request *ipc4) case SOF_IPC4_MOD_CONFIG_GET: case SOF_IPC4_MOD_CONFIG_SET: ret = IPC4_UNAVAILABLE; - tr_info(&ipc_tr, "unsupported module CONFIG_GET"); + tr_info("unsupported module CONFIG_GET"); break; case SOF_IPC4_MOD_LARGE_CONFIG_GET: ret = ipc4_get_large_config_module_instance(ipc4); @@ -1399,7 +1395,7 @@ void ipc_send_buffer_status_notify(void) msg_notify.extension = 0; msg_notify.tx_size = 0; - tr_dbg(&ipc_tr, "tx-notify\t: %#x|%#x", msg_notify.header, msg_notify.extension); + tr_dbg("tx-notify\t: %#x|%#x", msg_notify.header, msg_notify.extension); ipc_msg_send(&msg_notify, NULL, true); } @@ -1420,7 +1416,7 @@ void ipc_cmd(struct ipc_cmd_hdr *_hdr) int err; if (cpu_is_primary(cpu_get_id())) - tr_info(&ipc_tr, "rx\t: %#x|%#x", in->primary.dat, in->extension.dat); + tr_info("rx\t: %#x|%#x", in->primary.dat, in->extension.dat); /* no process on scheduled thread */ atomic_set(&msg_data.delayed_reply, 0); @@ -1435,16 +1431,16 @@ void ipc_cmd(struct ipc_cmd_hdr *_hdr) case SOF_IPC4_MESSAGE_TARGET_FW_GEN_MSG: err = ipc4_process_glb_message(in); if (err) - ipc_cmd_err(&ipc_tr, "ipc4: FW_GEN_MSG failed with err %d", err); + ipc_cmd_err("ipc4: FW_GEN_MSG failed with err %d", err); break; case SOF_IPC4_MESSAGE_TARGET_MODULE_MSG: err = ipc4_process_module_message(in); if (err) - ipc_cmd_err(&ipc_tr, "ipc4: MODULE_MSG failed with err %d", err); + ipc_cmd_err("ipc4: MODULE_MSG failed with err %d", err); break; default: /* should not reach here as we only have 2 message types */ - ipc_cmd_err(&ipc_tr, "ipc4: invalid target %d", target); + ipc_cmd_err("ipc4: invalid target %d", target); err = IPC4_UNKNOWN_MESSAGE_TYPE; } @@ -1519,7 +1515,7 @@ void ipc_cmd(struct ipc_cmd_hdr *_hdr) return; if (ipc_wait_for_compound_msg() != 0) { - ipc_cmd_err(&ipc_tr, "ipc4: failed to send delayed reply"); + ipc_cmd_err("ipc4: failed to send delayed reply"); err = IPC4_FAILURE; } @@ -1534,7 +1530,7 @@ void ipc_cmd(struct ipc_cmd_hdr *_hdr) msg_reply.header = reply.primary.dat; - tr_dbg(&ipc_tr, "tx-reply\t: %#x|%#x", msg_reply.header, + tr_dbg("tx-reply\t: %#x|%#x", msg_reply.header, msg_reply.extension); ipc4_send_reply(&reply); diff --git a/src/ipc/ipc4/helper.c b/src/ipc/ipc4/helper.c index b4c1deace1ab..8ef6e5be930e 100644 --- a/src/ipc/ipc4/helper.c +++ b/src/ipc/ipc4/helper.c @@ -107,12 +107,12 @@ struct comp_dev *comp_new_ipc4(struct ipc4_module_init_instance *module_init) return NULL; if (ipc4_get_comp_dev(comp_id)) { - tr_err(&ipc_tr, "comp %d exists", comp_id); + tr_err("comp %d exists", comp_id); return NULL; } if (module_init->extension.r.core_id >= CONFIG_CORE_COUNT) { - tr_err(&ipc_tr, "ipc: comp->core = %u", (uint32_t)module_init->extension.r.core_id); + tr_err("ipc: comp->core = %u", (uint32_t)module_init->extension.r.core_id); return NULL; } @@ -129,7 +129,7 @@ struct comp_dev *comp_new_ipc4(struct ipc4_module_init_instance *module_init) ipc_config.proc_domain = COMP_PROCESSING_DOMAIN_LL; #else /* CONFIG_ZEPHYR_DP_SCHEDULER */ if (module_init->extension.r.proc_domain) { - tr_err(&ipc_tr, "ipc: DP scheduling is disabled, cannot create comp %d", comp_id); + tr_err("ipc: DP scheduling is disabled, cannot create comp %d", comp_id); return NULL; } ipc_config.proc_domain = COMP_PROCESSING_DOMAIN_LL; @@ -198,7 +198,7 @@ static int ipc4_create_pipeline(struct ipc4_pipeline_create *pipe_desc) /* check whether pipeline id is already taken or in use */ ipc_pipe = ipc_get_pipeline_by_id(ipc, pipe_desc->primary.r.instance_id); if (ipc_pipe) { - tr_err(&ipc_tr, "ipc: comp id is already taken, pipe_desc->instance_id = %u", + tr_err("ipc: comp id is already taken, pipe_desc->instance_id = %u", (uint32_t)pipe_desc->primary.r.instance_id); return IPC4_INVALID_RESOURCE_ID; } @@ -206,7 +206,7 @@ static int ipc4_create_pipeline(struct ipc4_pipeline_create *pipe_desc) /* create the pipeline */ pipe = pipeline_new(pipe_desc->primary.r.instance_id, pipe_desc->primary.r.ppl_priority, 0); if (!pipe) { - tr_err(&ipc_tr, "ipc: pipeline_new() failed"); + tr_err("ipc: pipeline_new() failed"); return IPC4_OUT_OF_MEMORY; } @@ -242,7 +242,7 @@ int ipc_pipeline_new(struct ipc *ipc, ipc_pipe_new *_pipe_desc) { struct ipc4_pipeline_create *pipe_desc = ipc_from_pipe_new(_pipe_desc); - tr_dbg(&ipc_tr, "ipc: pipeline id = %u", (uint32_t)pipe_desc->primary.r.instance_id); + tr_dbg("ipc: pipeline id = %u", (uint32_t)pipe_desc->primary.r.instance_id); /* pass IPC to target core */ if (!cpu_is_me(pipe_desc->extension.r.core_id)) @@ -326,14 +326,14 @@ int ipc_pipeline_free(struct ipc *ipc, uint32_t comp_id) ret = ipc_pipeline_module_free(ipc_pipe->pipeline->pipeline_id); if (ret != IPC4_SUCCESS) { - tr_err(&ipc_tr, "ipc_pipeline_free(): module free () failed"); + tr_err("ipc_pipeline_free(): module free () failed"); return ret; } /* free buffer, delete all tasks and remove from list */ ret = pipeline_free(ipc_pipe->pipeline); if (ret < 0) { - tr_err(&ipc_tr, "ipc_pipeline_free(): pipeline_free() failed"); + tr_err("ipc_pipeline_free(): pipeline_free() failed"); return IPC4_INVALID_RESOURCE_STATE; } @@ -411,7 +411,7 @@ static int ll_wait_finished_on_core(struct comp_dev *dev) /* Any blocking IDC that does not change component state could be utilized */ ret = comp_ipc4_get_attribute_remote(dev, COMP_ATTR_BASE_CONFIG, &dummy); if (ret < 0) { - tr_err(&ipc_tr, "comp_ipc4_get_attribute_remote() failed for module %#x", + tr_err("comp_ipc4_get_attribute_remote() failed for module %#x", dev_comp_id(dev)); return ret; } @@ -448,7 +448,7 @@ int ipc_comp_connect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) sink = ipc4_get_comp_dev(sink_id); if (!source || !sink) { - tr_err(&ipc_tr, "failed to find src %x, or dst %x", src_id, sink_id); + tr_err("failed to find src %x, or dst %x", src_id, sink_id); return IPC4_INVALID_RESOURCE_ID; } @@ -475,7 +475,7 @@ int ipc_comp_connect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) ret = comp_get_attribute(source, COMP_ATTR_BASE_CONFIG, &source_src_cfg); if (ret < 0) { - tr_err(&ipc_tr, "failed to get base config for src module %#x", + tr_err("failed to get base config for src module %#x", dev_comp_id(source)); return IPC4_FAILURE; } @@ -490,7 +490,7 @@ int ipc_comp_connect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) if (!ibs) { ret = comp_get_attribute(sink, COMP_ATTR_BASE_CONFIG, &sink_src_cfg); if (ret < 0) { - tr_err(&ipc_tr, "failed to get base config for sink module %#x", + tr_err("failed to get base config for sink module %#x", dev_comp_id(sink)); return IPC4_FAILURE; } @@ -512,7 +512,7 @@ int ipc_comp_connect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) buffer = ipc4_create_buffer(source, cross_core_bind, buf_size, bu->extension.r.src_queue, bu->extension.r.dst_queue); if (!buffer) { - tr_err(&ipc_tr, "failed to allocate buffer to bind %d to %d", src_id, sink_id); + tr_err("failed to allocate buffer to bind %d to %d", src_id, sink_id); return IPC4_OUT_OF_MEMORY; } @@ -546,7 +546,7 @@ int ipc_comp_connect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) if (ll_wait_finished_on_core(sink) < 0) goto free; #else - tr_err(&ipc_tr, "Cross-core binding is disabled"); + tr_err("Cross-core binding is disabled"); goto free; #endif } @@ -554,14 +554,14 @@ int ipc_comp_connect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) ret = comp_buffer_connect(source, source->ipc_config.core, buffer, PPL_CONN_DIR_COMP_TO_BUFFER); if (ret < 0) { - tr_err(&ipc_tr, "failed to connect src %d to internal buffer", src_id); + tr_err("failed to connect src %d to internal buffer", src_id); goto free; } ret = comp_buffer_connect(sink, sink->ipc_config.core, buffer, PPL_CONN_DIR_BUFFER_TO_COMP); if (ret < 0) { - tr_err(&ipc_tr, "failed to connect internal buffer to sink %d", sink_id); + tr_err("failed to connect internal buffer to sink %d", sink_id); goto e_sink_connect; } @@ -624,12 +624,12 @@ int ipc_comp_disconnect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) src = ipc4_get_comp_dev(src_id); sink = ipc4_get_comp_dev(sink_id); if (!src || !sink) { - tr_err(&ipc_tr, "failed to find src %x, or dst %x", src_id, sink_id); + tr_err("failed to find src %x, or dst %x", src_id, sink_id); return IPC4_INVALID_RESOURCE_ID; } if (src->pipeline == sink->pipeline) { - tr_warn(&ipc_tr, "ignoring unbinding of src %x and dst %x", src_id, sink_id); + tr_warn("ignoring unbinding of src %x and dst %x", src_id, sink_id); return 0; } @@ -677,7 +677,7 @@ int ipc_comp_disconnect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) return IPC4_FAILURE; } #else - tr_err(&ipc_tr, "Cross-core binding is disabled"); + tr_err("Cross-core binding is disabled"); ll_unblock(cross_core_unbind); return IPC4_FAILURE; #endif @@ -705,7 +705,7 @@ int ipc_comp_disconnect(struct ipc *ipc, ipc_pipe_comp_connect *_connect) static inline int process_dma_index(uint32_t dma_id, uint32_t *dir, uint32_t *chan) { if (dma_id > DAI_NUM_HDA_OUT + DAI_NUM_HDA_IN) { - tr_err(&ipc_tr, "dma id %d is out of range", dma_id); + tr_err("dma id %d is out of range", dma_id); return IPC4_INVALID_NODE_ID; } @@ -879,8 +879,7 @@ const struct comp_driver *ipc4_get_drv(uint8_t *uuid) info = container_of(clist, struct comp_driver_info, list); if (!memcmp(info->drv->uid, uuid, UUID_SIZE)) { - tr_dbg(&comp_tr, - "found type %d, uuid %pU", + tr_dbg("found type %d, uuid %pU", info->drv->type, info->drv->tctx->uuid_p); drv = info->drv; @@ -888,7 +887,7 @@ const struct comp_driver *ipc4_get_drv(uint8_t *uuid) } } - tr_err(&comp_tr, "get_drv(): the provided UUID (%08x %08x %08x %08x) can't be found!", + tr_err("get_drv(): the provided UUID (%08x %08x %08x %08x) can't be found!", *(uint32_t *)(&uuid[0]), *(uint32_t *)(&uuid[4]), *(uint32_t *)(&uuid[8]), @@ -937,7 +936,7 @@ static const struct comp_driver *ipc4_library_get_drv(int module_id) return ipc4_get_drv((uint8_t *)&mod_uuid->uuid); } - tr_err(&comp_tr, "ipc4_library_get_drv(): Unsupported module ID %#x\n", module_id); + tr_err("ipc4_library_get_drv(): Unsupported module ID %#x\n", module_id); return NULL; } #endif @@ -978,7 +977,7 @@ const struct comp_driver *ipc4_get_comp_drv(int module_id) desc = lib_manager_get_library_module_desc(module_id); entry_index = LIB_MANAGER_GET_MODULE_INDEX(module_id); #else - tr_err(&comp_tr, "Error: lib index:%d, while loadable libraries are not supported!!!", + tr_err("Error: lib index:%d, while loadable libraries are not supported!!!", lib_idx); return NULL; #endif @@ -1013,7 +1012,7 @@ int ipc4_add_comp_dev(struct comp_dev *dev) /* check id for duplicates */ icd = ipc_get_comp_by_id(ipc, dev->ipc_config.id); if (icd) { - tr_err(&ipc_tr, "ipc: duplicate component ID"); + tr_err("ipc: duplicate component ID"); return IPC4_INVALID_RESOURCE_ID; } @@ -1021,7 +1020,7 @@ int ipc4_add_comp_dev(struct comp_dev *dev) icd = rzalloc(SOF_MEM_ZONE_RUNTIME_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(struct ipc_comp_dev)); if (!icd) { - tr_err(&ipc_tr, "ipc_comp_new(): alloc failed"); + tr_err("ipc_comp_new(): alloc failed"); rfree(icd); return IPC4_OUT_OF_MEMORY; } @@ -1031,7 +1030,7 @@ int ipc4_add_comp_dev(struct comp_dev *dev) icd->core = dev->ipc_config.core; icd->id = dev->ipc_config.id; - tr_dbg(&ipc_tr, "ipc4_add_comp_dev add comp %x", icd->id); + tr_dbg("ipc4_add_comp_dev add comp %x", icd->id); /* add new component to the list */ list_item_append(&icd->list, &ipc->comp_list); diff --git a/src/lib/agent.c b/src/lib/agent.c index 805fdab1a0b0..487da3556f85 100644 --- a/src/lib/agent.c +++ b/src/lib/agent.c @@ -50,14 +50,14 @@ DECLARE_SOF_UUID("agent-work", agent_work_task_uuid, 0xc63c4e75, 0x8f61, 0x4420, #if CONFIG_PERFORMANCE_COUNTERS static void perf_sa_trace(struct perf_cnt_data *pcd, int ignored) { - tr_info(&sa_tr, "perf sys_load peak plat %u cpu %u", + tr_info("perf sys_load peak plat %u cpu %u", (uint32_t)((pcd)->plat_delta_peak), (uint32_t)((pcd)->cpu_delta_peak)); } static void perf_avg_sa_trace(struct perf_cnt_data *pcd, int ignored) { - tr_info(&sa_tr, "perf sys_load cpu avg %u (current peak %u)", + tr_info("perf sys_load cpu avg %u (current peak %u)", (uint32_t)((pcd)->cpu_delta_sum), (uint32_t)((pcd)->cpu_delta_peak)); } @@ -85,9 +85,9 @@ static enum task_state validate(void *data) /* warning timeout */ if (delta > sa->warn_timeout) { if (delta > UINT_MAX) - tr_warn(&sa_tr, "validate(), ll drift detected, delta > %u", UINT_MAX); + tr_warn("validate(), ll drift detected, delta > %u", UINT_MAX); else - tr_warn(&sa_tr, "validate(), ll drift detected, delta = %u", + tr_warn("validate(), ll drift detected, delta = %u", (unsigned int)delta); } @@ -102,9 +102,9 @@ void sa_init(struct sof *sof, uint64_t timeout) uint64_t ticks; if (timeout > UINT_MAX) - tr_warn(&sa_tr, "sa_init(), timeout > %u", UINT_MAX); + tr_warn("sa_init(), timeout > %u", UINT_MAX); else - tr_info(&sa_tr, "sa_init(), timeout = %u", (unsigned int)timeout); + tr_info("sa_init(), timeout = %u", (unsigned int)timeout); sof->sa = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*sof->sa)); @@ -120,11 +120,9 @@ void sa_init(struct sof *sof, uint64_t timeout) if (ticks > UINT_MAX || sof->sa->warn_timeout > UINT_MAX || sof->sa->panic_timeout > UINT_MAX) - tr_info(&sa_tr, - "sa_init(), some of the values are > %u", UINT_MAX); + tr_info("sa_init(), some of the values are > %u", UINT_MAX); else - tr_info(&sa_tr, - "sa_init(), ticks = %u, sof->sa->warn_timeout = %u, sof->sa->panic_timeout = %u", + tr_info("sa_init(), ticks = %u, sof->sa->warn_timeout = %u, sof->sa->panic_timeout = %u", (unsigned int)ticks, (unsigned int)sof->sa->warn_timeout, (unsigned int)sof->sa->panic_timeout); diff --git a/src/lib/alloc.c b/src/lib/alloc.c index d12e8675e115..c57d9b9427fc 100644 --- a/src/lib/alloc.c +++ b/src/lib/alloc.c @@ -65,10 +65,10 @@ static void validate_memory(void *ptr, size_t size) } if (not_matching) { - tr_info(&mem_tr, "validate_memory() pointer: %p freed pattern not detected", + tr_info("validate_memory() pointer: %p freed pattern not detected", ptr); } else { - tr_err(&mem_tr, "validate_memory() freeing pointer: %p double free detected", + tr_err("validate_memory() freeing pointer: %p double free detected", ptr); } } @@ -158,7 +158,7 @@ static void *rmalloc_sys(struct mm_heap *heap, uint32_t flags, int caps, size_t /* always succeeds or panics */ if (alignment + bytes > heap->info.free) { - tr_err(&mem_tr, "rmalloc_sys(): core = %d, bytes = %d", + tr_err("rmalloc_sys(): core = %d, bytes = %d", cpu_get_id(), bytes); sof_panic(SOF_IPC_PANIC_MEM); } @@ -301,7 +301,7 @@ static void *alloc_cont_blocks(struct mm_heap *heap, int level, } if (found < total_bytes) { - tr_err(&mem_tr, "failed to allocate %u", total_bytes); + tr_err("failed to allocate %u", total_bytes); goto out; } @@ -468,7 +468,7 @@ static void free_block(void *ptr) if (!heap) { heap = get_heap_from_ptr(uncached_ptr); if (!heap) { - tr_err(&mem_tr, "free_block(): invalid heap, ptr = %p, cpu = %d", + tr_err("free_block(): invalid heap, ptr = %p, cpu = %d", ptr, cpu_get_id()); return; } @@ -491,7 +491,7 @@ static void free_block(void *ptr) if (i == heap->blocks) { /* not found */ - tr_err(&mem_tr, "free_block(): invalid free_ptr = %p cpu = %d", + tr_err("free_block(): invalid free_ptr = %p cpu = %d", free_ptr, cpu_get_id()); return; } @@ -564,19 +564,19 @@ void heap_trace(struct mm_heap *heap, int size) int j; for (i = 0; i < size; i++) { - tr_info(&mem_tr, " heap: 0x%x size %d blocks %d caps 0x%x", + tr_info(" heap: 0x%x size %d blocks %d caps 0x%x", heap->heap, heap->size, heap->blocks, heap->caps); - tr_info(&mem_tr, " (In Bytes) used %d free %d", heap->info.used, + tr_info(" (In Bytes) used %d free %d", heap->info.used, heap->info.free); /* map[j]'s base is calculated based on map[j-1] */ for (j = 0; j < heap->blocks; j++) { current_map = &heap->map[j]; - tr_info(&mem_tr, " %d Bytes blocks ID:%d base 0x%x", + tr_info(" %d Bytes blocks ID:%d base 0x%x", current_map->block_size, j, current_map->base); - tr_info(&mem_tr, " Number of Blocks: total %d used %d free %d", + tr_info(" Number of Blocks: total %d used %d free %d", current_map->count, (current_map->count - current_map->free_count), current_map->free_count); @@ -592,18 +592,18 @@ void heap_trace_all(int force) /* has heap changed since last shown */ if (memmap->heap_trace_updated || force) { - tr_info(&mem_tr, "heap: system status"); + tr_info("heap: system status"); heap_trace(memmap->system, PLATFORM_HEAP_SYSTEM); - tr_info(&mem_tr, "heap: system runtime status"); + tr_info("heap: system runtime status"); heap_trace(memmap->system_runtime, PLATFORM_HEAP_SYSTEM_RUNTIME); - tr_info(&mem_tr, "heap: buffer status"); + tr_info("heap: buffer status"); heap_trace(memmap->buffer, PLATFORM_HEAP_BUFFER); - tr_info(&mem_tr, "heap: runtime status"); + tr_info("heap: runtime status"); heap_trace(memmap->runtime, PLATFORM_HEAP_RUNTIME); #if CONFIG_CORE_COUNT > 1 - tr_info(&mem_tr, "heap: runtime shared status"); + tr_info("heap: runtime shared status"); heap_trace(memmap->runtime_shared, PLATFORM_HEAP_RUNTIME_SHARED); - tr_info(&mem_tr, "heap: system shared status"); + tr_info("heap: system shared status"); heap_trace(memmap->system_shared, PLATFORM_HEAP_SYSTEM_SHARED); #endif } @@ -617,8 +617,7 @@ void heap_trace(struct mm_heap *heap, int size) { } #endif #define _ALLOC_FAILURE(bytes, zone, caps, flags) \ - tr_err(&mem_tr, \ - "failed to alloc 0x%x bytes zone 0x%x caps 0x%x flags 0x%x", \ + tr_err("failed to alloc 0x%x bytes zone 0x%x caps 0x%x flags 0x%x", \ bytes, zone, caps, flags) #if CONFIG_DEBUG_HEAP @@ -673,7 +672,7 @@ static void *rmalloc_runtime(uint32_t flags, uint32_t caps, size_t bytes) caps); if (!heap) { - tr_err(&mem_tr, "rmalloc_runtime(): caps = %x, bytes = %d", + tr_err("rmalloc_runtime(): caps = %x, bytes = %d", caps, bytes); return NULL; @@ -694,7 +693,7 @@ static void *rmalloc_runtime_shared(uint32_t flags, uint32_t caps, size_t bytes) /* check shared heap for capabilities */ heap = get_heap_from_caps(memmap->runtime_shared, PLATFORM_HEAP_RUNTIME_SHARED, caps); if (!heap) { - tr_err(&mem_tr, "rmalloc_runtime_shared(): caps = %x, bytes = %d", caps, bytes); + tr_err("rmalloc_runtime_shared(): caps = %x, bytes = %d", caps, bytes); return NULL; } @@ -735,7 +734,7 @@ static void *_malloc_unlocked(enum mem_zone zone, uint32_t flags, uint32_t caps, #endif default: - tr_err(&mem_tr, "rmalloc(): invalid zone"); + tr_err("rmalloc(): invalid zone"); sof_panic(SOF_IPC_PANIC_MEM); /* logic non recoverable problem */ break; } @@ -983,7 +982,7 @@ static void _rfree_unlocked(void *ptr) /* panic if pointer is from system heap */ if (ptr >= (void *)heap->heap && (char *)ptr < (char *)heap->heap + heap->size) { - tr_err(&mem_tr, "rfree(): attempt to free system heap = %p, cpu = %d", + tr_err("rfree(): attempt to free system heap = %p, cpu = %d", ptr, cpu_get_id()); sof_panic(SOF_IPC_PANIC_MEM); } @@ -1047,7 +1046,7 @@ void free_heap(enum mem_zone zone) */ if (cpu_get_id() == PLATFORM_PRIMARY_CORE_ID || zone != SOF_MEM_ZONE_SYS) { - tr_err(&mem_tr, "free_heap(): critical flow issue"); + tr_err("free_heap(): critical flow issue"); sof_panic(SOF_IPC_PANIC_MEM); } @@ -1142,7 +1141,7 @@ int heap_info(enum mem_zone zone, int index, struct mm_info *out) k_spin_unlock(&memmap->lock, key); return 0; error: - tr_err(&mem_tr, "heap_info(): failed for zone 0x%x index %d out ptr 0x%x", zone, index, + tr_err("heap_info(): failed for zone 0x%x index %d out ptr 0x%x", zone, index, (uint32_t)out); return -EINVAL; } diff --git a/src/lib/ams.c b/src/lib/ams.c index 825e265644bc..1d1391edb6ab 100644 --- a/src/lib/ams.c +++ b/src/lib/ams.c @@ -70,7 +70,7 @@ static struct uuid_idx __sparse_cache *ams_find_uuid_entry_by_uuid(struct ams_sh sizeof(uuid_table[index].message_uuid), uuid, UUID_SIZE); if (ec != 0) { - tr_err(&ams_tr, "Failed to create UUID entry: %u", index); + tr_err("Failed to create UUID entry: %u", index); return NULL; } @@ -79,7 +79,7 @@ static struct uuid_idx __sparse_cache *ams_find_uuid_entry_by_uuid(struct ams_sh } } - tr_err(&ams_tr, "No space to create UUID entry"); + tr_err("No space to create UUID entry"); return NULL; } @@ -453,7 +453,7 @@ static int ams_message_send_internal(struct async_message_service *ams, ams_release(shared_c); if (!found_any) - tr_err(&ams_tr, "No entries found!"); + tr_err("No entries found!"); return err; } @@ -496,7 +496,7 @@ static int ams_process_slot(struct async_message_service *ams, uint32_t slot) instance_id = shared_c->slots[slot].instance_id; ams_release(shared_c); - tr_info(&ams_tr, "ams_process_slot slot %d msg %d from 0x%08x", + tr_info("ams_process_slot slot %d msg %d from 0x%08x", slot, msg.message_type_id, msg.producer_module_id << 16 | msg.producer_instance_id); @@ -535,7 +535,7 @@ static enum task_state process_message(void *arg) int flags; if (ams_task->pending_slots == 0) { - tr_err(&ams_tr, "Could not process message! Skipping."); + tr_err("Could not process message! Skipping."); return SOF_TASK_STATE_COMPLETED; } @@ -563,7 +563,7 @@ static int ams_task_init(void) ret = schedule_task_init_ll(&task->ams_task, SOF_UUID(ams_uuid), SOF_SCHEDULE_LL_TIMER, SOF_TASK_PRI_MED, process_message, &ams->ams_task, cpu_get_id(), 0); if (ret) - tr_err(&ams_tr, "Could not init AMS task!"); + tr_err("Could not init AMS task!"); return ret; } diff --git a/src/lib/clk.c b/src/lib/clk.c index a3439b2a01ae..e38cd9c07219 100644 --- a/src/lib/clk.c +++ b/src/lib/clk.c @@ -72,7 +72,7 @@ void clock_set_freq(int clock, uint32_t hz) hz); clk_notify_data.freq = clk_info->freqs[idx].freq; - tr_info(&clock_tr, "clock %d set freq %dHz freq_idx %d", + tr_info("clock %d set freq %dHz freq_idx %d", clock, hz, idx); /* tell anyone interested we are about to change freq */ diff --git a/src/lib/dai.c b/src/lib/dai.c index 8ce98521508a..6023e10c5b3f 100644 --- a/src/lib/dai.c +++ b/src/lib/dai.c @@ -89,7 +89,7 @@ struct dai_group *dai_group_get(uint32_t group_id, uint32_t flags) struct dai_group *group; if (!group_id) { - tr_err(&dai_tr, "dai_group_get(): invalid group_id %u", + tr_err("dai_group_get(): invalid group_id %u", group_id); return NULL; } @@ -112,7 +112,7 @@ struct dai_group *dai_group_get(uint32_t group_id, uint32_t flags) group->num_dais++; } else { - tr_err(&dai_tr, "dai_group_get(): failed to get group_id %u", + tr_err("dai_group_get(): failed to get group_id %u", group_id); } @@ -209,7 +209,7 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) dev = dai_get_zephyr_device(type, index); if (!dev) { - tr_err(&dai_tr, "dai_get: failed to get dai with index %d type %d", + tr_err("dai_get: failed to get dai with index %d type %d", index, type); return NULL; } @@ -225,7 +225,7 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) dai_set_device_params(d); if (dai_probe(d->dev)) { - tr_err(&dai_tr, "dai_get: failed to probe dai with index %d type %d", + tr_err("dai_get: failed to probe dai with index %d type %d", index, type); rfree(d); return NULL; @@ -241,7 +241,7 @@ void dai_put(struct dai *dai) ret = dai_remove(dai->dev); if (ret < 0) { - tr_err(&dai_tr, "dai_put_zephyr: index %d failed ret = %d", + tr_err("dai_put_zephyr: index %d failed ret = %d", dai->index, ret); } @@ -287,14 +287,14 @@ struct dai *dai_get(uint32_t type, uint32_t index, uint32_t flags) if (!ret) d->sref++; - tr_info(&dai_tr, "dai_get type %d index %d new sref %d", + tr_info("dai_get type %d index %d new sref %d", type, index, d->sref); k_spin_unlock(&d->lock, key); return !ret ? d : NULL; } - tr_err(&dai_tr, "dai_get: type %d index %d not found", type, index); + tr_err("dai_get: type %d index %d not found", type, index); return NULL; } @@ -307,11 +307,11 @@ void dai_put(struct dai *dai) if (--dai->sref == 0) { ret = dai_remove(dai); if (ret < 0) { - tr_err(&dai_tr, "dai_put: type %d index %d dai_remove() failed ret = %d", + tr_err("dai_put: type %d index %d dai_remove() failed ret = %d", dai->drv->type, dai->index, ret); } } - tr_info(&dai_tr, "dai_put type %d index %d new sref %d", + tr_info("dai_put type %d index %d new sref %d", dai->drv->type, dai->index, dai->sref); k_spin_unlock(&dai->lock, key); } diff --git a/src/lib/dma.c b/src/lib/dma.c index f124a040c4c3..d850715488df 100644 --- a/src/lib/dma.c +++ b/src/lib/dma.c @@ -40,7 +40,7 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) k_spinlock_key_t key; if (!info->num_dmas) { - tr_err(&dma_tr, "dma_get(): No DMACs installed"); + tr_err("dma_get(): No DMACs installed"); return NULL; } @@ -79,16 +79,16 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) } if (!dmin) { - tr_err(&dma_tr, "No DMAC dir %d caps 0x%x dev 0x%x flags 0x%x", + tr_err("No DMAC dir %d caps 0x%x dev 0x%x flags 0x%x", dir, cap, dev, flags); for (d = info->dma_array; d < info->dma_array + info->num_dmas; d++) { - tr_err(&dma_tr, " DMAC ID %d users %d busy channels %ld", + tr_err(" DMAC ID %d users %d busy channels %ld", d->plat_data.id, d->sref, atomic_read(&d->num_channels_busy)); - tr_err(&dma_tr, " caps 0x%x dev 0x%x", + tr_err(" caps 0x%x dev 0x%x", d->plat_data.caps, d->plat_data.devs); } @@ -96,7 +96,7 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) } /* return DMAC */ - tr_dbg(&dma_tr, "dma_get(), dma-probe id = %d", + tr_dbg("dma_get(), dma-probe id = %d", dmin->plat_data.id); /* Shared DMA controllers with multiple channels @@ -108,7 +108,7 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) if (!dmin->sref) { ret = dma_init(dmin); if (ret < 0) { - tr_err(&dma_tr, "dma_get(): dma-probe failed id = %d, ret = %d", + tr_err("dma_get(): dma-probe failed id = %d, ret = %d", dmin->plat_data.id, ret); goto out; } @@ -116,7 +116,7 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) dmin->sref++; - tr_info(&dma_tr, "dma_get() ID %d sref = %d busy channels %ld", + tr_info("dma_get() ID %d sref = %d busy channels %ld", dmin->plat_data.id, dmin->sref, atomic_read(&dmin->num_channels_busy)); out: @@ -134,7 +134,7 @@ void dma_put(struct dma *dma) dma->chan = NULL; } - tr_info(&dma_tr, "dma_put(), dma = %p, sref = %d", + tr_info("dma_put(), dma = %p, sref = %d", dma, dma->sref); k_spin_unlock(&dma->lock, key); } @@ -149,7 +149,7 @@ static int dma_init(struct dma *dma) sizeof(struct dma_chan_data) * dma->plat_data.channels); if (!dma->chan) { - tr_err(&dma_tr, "dma_probe_sof(): dma %d allocaction of channels failed", + tr_err("dma_probe_sof(): dma %d allocaction of channels failed", dma->plat_data.id); return -ENOMEM; } @@ -173,7 +173,7 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) k_spinlock_key_t key; if (!info->num_dmas) { - tr_err(&dma_tr, "dma_get(): No DMACs installed"); + tr_err("dma_get(): No DMACs installed"); return NULL; } @@ -217,16 +217,16 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) } if (!dmin) { - tr_err(&dma_tr, "No DMAC dir %d caps 0x%x dev 0x%x flags 0x%x", + tr_err("No DMAC dir %d caps 0x%x dev 0x%x flags 0x%x", dir, cap, dev, flags); for (d = info->dma_array; d < info->dma_array + info->num_dmas; d++) { - tr_err(&dma_tr, " DMAC ID %d users %d busy channels %ld", + tr_err(" DMAC ID %d users %d busy channels %ld", d->plat_data.id, d->sref, atomic_read(&d->num_channels_busy)); - tr_err(&dma_tr, " caps 0x%x dev 0x%x", + tr_err(" caps 0x%x dev 0x%x", d->plat_data.caps, d->plat_data.devs); } @@ -234,7 +234,7 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) } /* return DMAC */ - tr_dbg(&dma_tr, "dma_get(), dma-probe id = %d", + tr_dbg("dma_get(), dma-probe id = %d", dmin->plat_data.id); /* Shared DMA controllers with multiple channels @@ -247,14 +247,14 @@ struct dma *dma_get(uint32_t dir, uint32_t cap, uint32_t dev, uint32_t flags) if (!dmin->sref) { ret = dma_probe_legacy(dmin); if (ret < 0) { - tr_err(&dma_tr, "dma_get(): dma-probe failed id = %d, ret = %d", + tr_err("dma_get(): dma-probe failed id = %d, ret = %d", dmin->plat_data.id, ret); } } if (!ret) dmin->sref++; - tr_info(&dma_tr, "dma_get() ID %d sref = %d busy channels %ld", + tr_info("dma_get() ID %d sref = %d busy channels %ld", dmin->plat_data.id, dmin->sref, atomic_read(&dmin->num_channels_busy)); @@ -271,11 +271,11 @@ void dma_put(struct dma *dma) if (--dma->sref == 0) { ret = dma_remove_legacy(dma); if (ret < 0) { - tr_err(&dma_tr, "dma_put(): dma_remove() failed id = %d, ret = %d", + tr_err("dma_put(): dma_remove() failed id = %d, ret = %d", dma->plat_data.id, ret); } } - tr_info(&dma_tr, "dma_put(), dma = %p, sref = %d", + tr_info("dma_put(), dma = %p, sref = %d", dma, dma->sref); k_spin_unlock(&dma->lock, key); } diff --git a/src/lib/notifier.c b/src/lib/notifier.c index a601538d7c0c..febc5bc3f163 100644 --- a/src/lib/notifier.c +++ b/src/lib/notifier.c @@ -64,7 +64,7 @@ int notifier_register(void *receiver, void *caller, enum notify_id type, sizeof(*handle)); if (!handle) { - tr_err(&nt_tr, "notifier_register(): callback handle allocation failed."); + tr_err("notifier_register(): callback handle allocation failed."); ret = -ENOMEM; goto out; } diff --git a/src/lib/pm_runtime.c b/src/lib/pm_runtime.c index ba60eb0540a0..75eb9395d878 100644 --- a/src/lib/pm_runtime.c +++ b/src/lib/pm_runtime.c @@ -57,7 +57,7 @@ void pm_runtime_init(struct sof *sof) */ void pm_runtime_get(enum pm_runtime_context context, uint32_t index) { - tr_dbg(&pm_tr, "pm_runtime_get() context %d index %d", context, index); + tr_dbg("pm_runtime_get() context %d index %d", context, index); switch (context) { default: @@ -68,7 +68,7 @@ void pm_runtime_get(enum pm_runtime_context context, uint32_t index) void pm_runtime_get_sync(enum pm_runtime_context context, uint32_t index) { - tr_dbg(&pm_tr, "pm_runtime_get_sync() context %d index %d", context, + tr_dbg("pm_runtime_get_sync() context %d index %d", context, index); switch (context) { @@ -82,7 +82,7 @@ void pm_runtime_get_sync(enum pm_runtime_context context, uint32_t index) */ void pm_runtime_put(enum pm_runtime_context context, uint32_t index) { - tr_dbg(&pm_tr, "pm_runtime_put() context %d index %d", context, index); + tr_dbg("pm_runtime_put() context %d index %d", context, index); switch (context) { default: @@ -93,7 +93,7 @@ void pm_runtime_put(enum pm_runtime_context context, uint32_t index) void pm_runtime_put_sync(enum pm_runtime_context context, uint32_t index) { - tr_dbg(&pm_tr, "pm_runtime_put_sync() context %d index %d", context, + tr_dbg("pm_runtime_put_sync() context %d index %d", context, index); switch (context) { @@ -106,7 +106,7 @@ void pm_runtime_put_sync(enum pm_runtime_context context, uint32_t index) /** Enables power _management_. The management, not the power. */ void pm_runtime_enable(enum pm_runtime_context context, uint32_t index) { - tr_dbg(&pm_tr, "pm_runtime_enable() context %d index %d", context, + tr_dbg("pm_runtime_enable() context %d index %d", context, index); switch (context) { @@ -119,7 +119,7 @@ void pm_runtime_enable(enum pm_runtime_context context, uint32_t index) /** Disables power _management_. The management, not the power. */ void pm_runtime_disable(enum pm_runtime_context context, uint32_t index) { - tr_dbg(&pm_tr, "pm_runtime_disable() context %d index %d", context, + tr_dbg("pm_runtime_disable() context %d index %d", context, index); switch (context) { @@ -132,7 +132,7 @@ void pm_runtime_disable(enum pm_runtime_context context, uint32_t index) /** Is the _power_ active. The power, not its management. */ bool pm_runtime_is_active(enum pm_runtime_context context, uint32_t index) { - tr_dbg(&pm_tr, "pm_runtime_is_active() context %d index %d", context, + tr_dbg("pm_runtime_is_active() context %d index %d", context, index); #if defined(__ZEPHYR__) && defined(CONFIG_PM) return pm_policy_state_lock_is_active(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES); diff --git a/src/lib/wait.c b/src/lib/wait.c index b61f6fcba6e6..1d09a86db111 100644 --- a/src/lib/wait.c +++ b/src/lib/wait.c @@ -49,7 +49,7 @@ int poll_for_register_delay(uint32_t reg, uint32_t mask, while ((io_reg_read(reg) & mask) != val) { if (!tries--) { - tr_err(&wait_tr, "poll timeout reg %u mask %u val %u us %u", + tr_err("poll timeout reg %u mask %u val %u us %u", reg, mask, val, (uint32_t)us); return -EIO; } diff --git a/src/library_manager/lib_manager.c b/src/library_manager/lib_manager.c index 1b74c3ce748d..1b696c04e3a4 100644 --- a/src/library_manager/lib_manager.c +++ b/src/library_manager/lib_manager.c @@ -69,7 +69,7 @@ static int lib_manager_auth_init(void) ret = auth_api_init(&ext_lib->auth_ctx, ext_lib->auth_buffer, AUTH_SCRATCH_BUFF_SZ, IMG_TYPE_LIB); if (ret != 0) { - tr_err(&lib_manager_tr, "lib_manager_auth_init() failed with error: %d", ret); + tr_err("lib_manager_auth_init() failed with error: %d", ret); rfree(ext_lib->auth_buffer); ret = -EACCES; } @@ -98,7 +98,7 @@ static int lib_manager_auth_proc(const void *buffer_data, ret = auth_api_init_auth_proc(&ext_lib->auth_ctx, buffer_data, buffer_size, phase); if (ret != 0) { - tr_err(&lib_manager_tr, "lib_manager_auth_proc() failed with error: %d", ret); + tr_err("lib_manager_auth_proc() failed with error: %d", ret); return -ENOTSUP; } @@ -109,7 +109,7 @@ static int lib_manager_auth_proc(const void *buffer_data, ret = auth_api_result(&ext_lib->auth_ctx); if (ret != AUTH_IMAGE_TRUSTED) { - tr_err(&lib_manager_tr, "lib_manager_auth_proc() Untrasted library!"); + tr_err("lib_manager_auth_proc() Untrasted library!"); return -EACCES; } @@ -288,8 +288,7 @@ static void __sparse_cache *lib_manager_get_instance_bss_address(uint32_t module (void __sparse_cache *)(mod->segment[SOF_MAN_SEGMENT_BSS].v_base_addr + inst_offset); - tr_dbg(&lib_manager_tr, - "lib_manager_get_instance_bss_address(): instance_bss_size: %#x, pointer: %p", + tr_dbg("lib_manager_get_instance_bss_address(): instance_bss_size: %#x, pointer: %p", instance_bss_size, (__sparse_force void *)va_base); return va_base; @@ -305,8 +304,7 @@ static int lib_manager_allocate_module_instance(uint32_t module_id, uint32_t ins instance_id, mod); if ((is_pages * PAGE_SZ) > bss_size) { - tr_err(&lib_manager_tr, - "lib_manager_allocate_module_instance(): invalid is_pages: %u, required: %u", + tr_err("lib_manager_allocate_module_instance(): invalid is_pages: %u, required: %u", is_pages, bss_size / PAGE_SZ); return -ENOMEM; } @@ -353,13 +351,12 @@ uint32_t lib_manager_allocate_module(const struct comp_driver *drv, uint32_t module_id = IPC4_MOD_ID(ipc_config->id); uint32_t entry_index = LIB_MANAGER_GET_MODULE_INDEX(module_id); - tr_dbg(&lib_manager_tr, "lib_manager_allocate_module(): mod_id: %#x", + tr_dbg("lib_manager_allocate_module(): mod_id: %#x", ipc_config->id); desc = lib_manager_get_library_module_desc(module_id); if (!desc) { - tr_err(&lib_manager_tr, - "lib_manager_allocate_module(): failed to get module descriptor"); + tr_err("lib_manager_allocate_module(): failed to get module descriptor"); return 0; } @@ -381,8 +378,7 @@ uint32_t lib_manager_allocate_module(const struct comp_driver *drv, ret = lib_manager_allocate_module_instance(module_id, IPC4_INST_ID(ipc_config->id), base_cfg->is_pages, mod); if (ret < 0) { - tr_err(&lib_manager_tr, - "lib_manager_allocate_module(): module allocation failed: %d", ret); + tr_err("lib_manager_allocate_module(): module allocation failed: %d", ret); #ifdef CONFIG_LIBCODE_MODULE_SUPPORT lib_manager_unload_libcode_modules(module_id, desc); #endif /* CONFIG_LIBCODE_MODULE_SUPPORT */ @@ -404,7 +400,7 @@ int lib_manager_free_module(const struct comp_driver *drv, uint32_t entry_index = LIB_MANAGER_GET_MODULE_INDEX(module_id); int ret; - tr_dbg(&lib_manager_tr, "lib_manager_free_module(): mod_id: %#x", ipc_config->id); + tr_dbg("lib_manager_free_module(): mod_id: %#x", ipc_config->id); desc = lib_manager_get_library_module_desc(module_id); mod = (struct sof_man_module *)((char *)desc + SOF_MAN_MODULE_OFFSET(entry_index)); @@ -424,8 +420,7 @@ int lib_manager_free_module(const struct comp_driver *drv, ret = lib_manager_free_module_instance(module_id, IPC4_INST_ID(ipc_config->id), mod); if (ret < 0) { - tr_err(&lib_manager_tr, - "lib_manager_free_module(): free module instance failed: %d", ret); + tr_err("lib_manager_free_module(): free module instance failed: %d", ret); return ret; } return 0; @@ -439,8 +434,7 @@ uint32_t lib_manager_allocate_module(const struct comp_driver *drv, struct comp_ipc_config *ipc_config, const void *ipc_specific_config) { - tr_err(&lib_manager_tr, - "lib_manager_allocate_module(): Dynamic module allocation is not supported"); + tr_err("lib_manager_allocate_module(): Dynamic module allocation is not supported"); return 0; } @@ -448,8 +442,7 @@ int lib_manager_free_module(const struct comp_driver *drv, struct comp_ipc_config *ipc_config) { /* Since we cannot allocate the freeing is not considered to be an error */ - tr_warn(&lib_manager_tr, - "lib_manager_free_module(): Dynamic module freeing is not supported"); + tr_warn("lib_manager_free_module(): Dynamic module freeing is not supported"); return 0; } #endif /* CONFIG_MM_DRV */ @@ -500,8 +493,7 @@ int lib_manager_register_module(struct sof_man_fw_desc *desc, int module_id) sizeof(struct comp_driver_info)); if (!new_drv_info) { - tr_err(&lib_manager_tr, - "lib_manager_register_module(): failed to allocate comp_driver_info"); + tr_err("lib_manager_register_module(): failed to allocate comp_driver_info"); ret = -ENOMEM; goto cleanup; } @@ -510,8 +502,7 @@ int lib_manager_register_module(struct sof_man_fw_desc *desc, int module_id) SOF_MEM_CAPS_RAM | SOF_MEM_FLAG_COHERENT, sizeof(struct comp_driver)); if (!drv) { - tr_err(&lib_manager_tr, - "lib_manager_register_module(): failed to allocate comp_driver"); + tr_err("lib_manager_register_module(): failed to allocate comp_driver"); ret = -ENOMEM; goto cleanup; } @@ -540,8 +531,7 @@ int lib_manager_register_module(struct sof_man_fw_desc *desc, int module_id) #else /* CONFIG_INTEL_MODULES */ int lib_manager_register_module(struct sof_man_fw_desc *desc, int module_id) { - tr_err(&lib_manager_tr, - "lib_manager_register_module(): Dynamic module loading is not supported"); + tr_err("lib_manager_register_module(): Dynamic module loading is not supported"); return -ENOTSUP; } #endif /* CONFIG_INTEL_MODULES */ @@ -556,12 +546,11 @@ static int lib_manager_dma_buffer_alloc(struct lib_manager_dma_ext *dma_ext, dma_ext->dma_addr = (uintptr_t)rballoc_align(SOF_MEM_FLAG_COHERENT, SOF_MEM_CAPS_DMA, size, dma_ext->addr_align); if (!dma_ext->dma_addr) { - tr_err(&lib_manager_tr, "lib_manager_dma_buffer_alloc(): alloc failed"); + tr_err("lib_manager_dma_buffer_alloc(): alloc failed"); return -ENOMEM; } - tr_dbg(&lib_manager_tr, - "lib_manager_dma_buffer_alloc(): address: %#lx, size: %u", + tr_dbg("lib_manager_dma_buffer_alloc(): address: %#lx, size: %u", dma_ext->dma_addr, size); return 0; @@ -582,8 +571,7 @@ static int lib_manager_dma_init(struct lib_manager_dma_ext *dma_ext, uint32_t dm dma_ext->dma = dma_get(DMA_DIR_HMEM_TO_LMEM, 0, DMA_DEV_HOST, DMA_ACCESS_EXCLUSIVE); if (!dma_ext->dma) { - tr_err(&lib_manager_tr, - "lib_manager_dma_init(): dma_ext->dma = NULL"); + tr_err("lib_manager_dma_init(): dma_ext->dma = NULL"); return -ENODEV; } @@ -633,8 +621,7 @@ static int lib_manager_load_data_from_host(struct lib_manager_dma_ext *dma_ext, k_usleep(100); } - tr_err(&lib_manager_tr, - "lib_manager_load_data_from_host(): timeout during DMA transfer"); + tr_err("lib_manager_load_data_from_host(): timeout during DMA transfer"); return -ETIMEDOUT; } @@ -680,7 +667,7 @@ static void __sparse_cache *lib_manager_allocate_store_mem(uint32_t size, local_add = (__sparse_force void __sparse_cache *)rballoc_align(0, caps, size, addr_align); if (!local_add) { - tr_err(&lib_manager_tr, "lib_manager_allocate_store_mem(): alloc failed"); + tr_err("lib_manager_allocate_store_mem(): alloc failed"); return NULL; } @@ -702,7 +689,7 @@ static int lib_manager_store_library(struct lib_manager_dma_ext *dma_ext, if (!library_base_address) return -ENOMEM; - tr_dbg(&lib_manager_tr, "lib_manager_store_library(): pointer: %p", + tr_dbg("lib_manager_store_library(): pointer: %p", (__sparse_force void *)library_base_address); #if CONFIG_LIBRARY_AUTH_SUPPORT @@ -823,8 +810,7 @@ int lib_manager_load_library(uint32_t dma_id, uint32_t lib_id, uint32_t type) if (type == SOF_IPC4_GLB_LOAD_LIBRARY && (lib_id == 0 || lib_id >= LIB_MANAGER_MAX_LIBS)) { - tr_err(&lib_manager_tr, - "lib_manager_load_library(): invalid lib_id: %u", lib_id); + tr_err("lib_manager_load_library(): invalid lib_id: %u", lib_id); return -EINVAL; } @@ -873,8 +859,7 @@ int lib_manager_load_library(uint32_t dma_id, uint32_t lib_id, uint32_t type) stop_dma: ret2 = dma_stop(dma_ext->chan->dma->z_dev, dma_ext->chan->index); if (ret2 < 0) { - tr_err(&lib_manager_tr, - "lib_manager_load_library(): error stopping DMA: %d", ret); + tr_err("lib_manager_load_library(): error stopping DMA: %d", ret); if (!ret) ret = ret2; } @@ -889,7 +874,7 @@ int lib_manager_load_library(uint32_t dma_id, uint32_t lib_id, uint32_t type) _ext_lib->runtime_data = NULL; if (!ret) - tr_info(&ipc_tr, "loaded library id: %u", lib_id); + tr_info("loaded library id: %u", lib_id); return ret; } diff --git a/src/library_manager/lib_notification.c b/src/library_manager/lib_notification.c index 29ca2de2a951..1b9859dd804e 100644 --- a/src/library_manager/lib_notification.c +++ b/src/library_manager/lib_notification.c @@ -44,7 +44,7 @@ struct ipc_msg *lib_notif_msg_init(uint32_t header, uint32_t size) k_spinlock_key_t key; /* No free element or list empty, create new handle */ if (ext_lib->lib_notif_count > LIB_MANAGER_LIB_NOTIX_MAX_COUNT) { - tr_dbg(&lib_manager_tr, "lib_nofig_msg_init() LIB_MANAGER_LIB_NOTIX_MAX_COUNT < %d", + tr_dbg("lib_nofig_msg_init() LIB_MANAGER_LIB_NOTIX_MAX_COUNT < %d", ext_lib->lib_notif_count); return NULL; } diff --git a/src/library_manager/llext_manager.c b/src/library_manager/llext_manager.c index a0503117bd95..cc1b20cc4916 100644 --- a/src/library_manager/llext_manager.c +++ b/src/library_manager/llext_manager.c @@ -60,7 +60,7 @@ static int llext_manager_load_data_from_storage(void __sparse_cache *vma, void * int ret = llext_manager_align_map(vma, size, flags); if (ret < 0) { - tr_err(&lib_manager_tr, "cannot map %u of %p", size, (__sparse_force void *)vma); + tr_err("cannot map %u of %p", size, (__sparse_force void *)vma); return ret; } @@ -144,8 +144,7 @@ static int llext_manager_allocate_module_bss(uint32_t module_id, void __sparse_cache *va_base = llext_manager_get_bss_address(module_id, mod); if (is_pages * PAGE_SZ > bss_size) { - tr_err(&lib_manager_tr, - "llext_manager_allocate_module_bss(): invalid is_pages: %u, required: %u", + tr_err("llext_manager_allocate_module_bss(): invalid is_pages: %u, required: %u", is_pages, bss_size / PAGE_SZ); return -ENOMEM; } @@ -182,13 +181,12 @@ uint32_t llext_manager_allocate_module(const struct comp_driver *drv, uint32_t entry_index = LIB_MANAGER_GET_MODULE_INDEX(module_id); struct lib_manager_mod_ctx *ctx = lib_manager_get_mod_ctx(module_id); - tr_dbg(&lib_manager_tr, "llext_manager_allocate_module(): mod_id: %#x", + tr_dbg("llext_manager_allocate_module(): mod_id: %#x", ipc_config->id); desc = lib_manager_get_library_module_desc(module_id); if (!ctx || !desc) { - tr_err(&lib_manager_tr, - "llext_manager_allocate_module(): failed to get module descriptor"); + tr_err("llext_manager_allocate_module(): failed to get module descriptor"); return 0; } @@ -203,8 +201,7 @@ uint32_t llext_manager_allocate_module(const struct comp_driver *drv, ret = llext_manager_allocate_module_bss(module_id, base_cfg->is_pages, mod); if (ret < 0) { - tr_err(&lib_manager_tr, - "llext_manager_allocate_module(): module allocation failed: %d", ret); + tr_err("llext_manager_allocate_module(): module allocation failed: %d", ret); return 0; } return mod->entry_point; @@ -219,7 +216,7 @@ int llext_manager_free_module(const struct comp_driver *drv, uint32_t entry_index = LIB_MANAGER_GET_MODULE_INDEX(module_id); int ret; - tr_dbg(&lib_manager_tr, "llext_manager_free_module(): mod_id: %#x", ipc_config->id); + tr_dbg("llext_manager_free_module(): mod_id: %#x", ipc_config->id); desc = lib_manager_get_library_module_desc(module_id); mod = (struct sof_man_module *)((char *)desc + SOF_MAN_MODULE_OFFSET(entry_index)); @@ -230,8 +227,7 @@ int llext_manager_free_module(const struct comp_driver *drv, ret = llext_manager_free_module_bss(module_id, mod); if (ret < 0) { - tr_err(&lib_manager_tr, - "llext_manager_free_module(): free module bss failed: %d", ret); + tr_err("llext_manager_free_module(): free module bss failed: %d", ret); return ret; } return 0; diff --git a/src/math/power.c b/src/math/power.c index 112ae716e1bb..7c8c0492fe6b 100644 --- a/src/math/power.c +++ b/src/math/power.c @@ -58,7 +58,7 @@ int32_t power_int32(int32_t b, int32_t e) multiplier = (int32_t)((1LL << 50) / (int64_t)b); } else { multiplier = INT32_MAX; - tr_err(&math_power_tr, "power_int32(): Divide by zero error."); + tr_err("power_int32(): Divide by zero error."); } } else { multiplier = b; diff --git a/src/platform/amd/acp_6_3/lib/clk.c b/src/platform/amd/acp_6_3/lib/clk.c index 566a988df3ef..fcabc211a964 100644 --- a/src/platform/amd/acp_6_3/lib/clk.c +++ b/src/platform/amd/acp_6_3/lib/clk.c @@ -192,7 +192,7 @@ void acp_change_clock_notify(uint32_t clock_freq) acp_6_3_get_boot_ref_clock(&boot_ref_clk); - tr_info(&acp_clk_tr, "acp_change_clock_notify clock_freq : %d clock_type : %d", + tr_info("acp_change_clock_notify clock_freq : %d clock_type : %d", clock_freq, clock_type); fraction_val = (float)(clock_freq / (float)1000000.0f); @@ -213,7 +213,7 @@ void acp_change_clock_notify(uint32_t clock_freq) bypass_cntl.bitfields.CLK1_BYPASS_DIV = 0xF; } else { did = (float)(boot_ref_clk / (float)fraction_val); - tr_info(&acp_clk_tr, "acp_change_clock_notify CLK Divider : %d boot_ref_clk : %d\n", + tr_info("acp_change_clock_notify CLK Divider : %d boot_ref_clk : %d\n", (uint32_t)(did * 100), (uint32_t)boot_ref_clk); if (did > 62.0f) { @@ -252,7 +252,7 @@ void acp_change_clock_notify(uint32_t clock_freq) do { dfs_status.u32all = acp_reg_read_via_smn(CLK5_CLK1_DFS_STATUS, sizeof(int)); - tr_info(&acp_clk_tr, "acp_change_clock_notify ACLK1 CLK1_DIVIDER : %d dfsstatus %d ", + tr_info("acp_change_clock_notify ACLK1 CLK1_DIVIDER : %d dfsstatus %d ", dfs_cntl.u32all, dfs_status.u32all); } while (dfs_status.bitfields.CLK1_DFS_DIV_REQ_IDLE == 0); updated_clk = acp_reg_read_via_smn(CLK5_CLK1_CURRENT_CNT, sizeof(int)); @@ -269,7 +269,7 @@ void acp_change_clock_notify(uint32_t clock_freq) dfs_cntl.u32all = acp_reg_read_via_smn(CLK5_CLK1_DFS_CNTL, sizeof(int)); - tr_info(&acp_clk_tr, "acp_change_clock_notify ACLK2 CLK1_DIVIDER:%d dfsstatus %d ", + tr_info("acp_change_clock_notify ACLK2 CLK1_DIVIDER:%d dfsstatus %d ", dfs_cntl.u32all, dfs_status.u32all); } while (dfs_status.bitfields.CLK1_DFS_DIV_REQ_IDLE == 0); } @@ -282,14 +282,13 @@ void acp_change_clock_notify(uint32_t clock_freq) do { dfs_status.u32all = acp_reg_read_via_smn(CLK5_CLK0_DFS_STATUS, sizeof(int)); - tr_info(&acp_clk_tr, "acp_change_clock_notify SCLK CLK1_DIVIDER: %d", + tr_info("acp_change_clock_notify SCLK CLK1_DIVIDER: %d", dfs_cntl.u32all); } while (dfs_status.bitfields.CLK1_DFS_DIV_REQ_IDLE == 0); updated_clk = acp_reg_read_via_smn(CLK5_CLK0_CURRENT_CNT, sizeof(int)); } - tr_info(&acp_clk_tr, - "clock_notify:CLK1_DIVIDER :%x boot_ref_clk : %d ClkReq : %d FinalClockValue: %d", + tr_info("clock_notify:CLK1_DIVIDER :%x boot_ref_clk : %d ClkReq : %d FinalClockValue: %d", dfs_cntl.u32all, (uint32_t)boot_ref_clk, clock_freq, updated_clk); } @@ -446,7 +445,7 @@ void audio_pll_mode_switch(uint32_t mode, uint32_t fcw_int, uint32_t fcw_frac, u acp_reg_write_via_smn(CLK5_CLK_PLL_RESET_STOP_TIMER, 0xbbb11aa, sizeof(int)); break; default: - tr_err(&acp_clk_tr, "ERROR: Invalid PLL Mode"); + tr_err("ERROR: Invalid PLL Mode"); return; } diff --git a/src/platform/intel/ace/lib/watchdog.c b/src/platform/intel/ace/lib/watchdog.c index a77325468f08..c970f3b13e35 100644 --- a/src/platform/intel/ace/lib/watchdog.c +++ b/src/platform/intel/ace/lib/watchdog.c @@ -88,7 +88,7 @@ void watchdog_init(void) ret = wdt_install_timeout(watchdog, &watchdog_config); if (ret) { - tr_warn(&wdt_tr, "Watchdog install timeout error %d", ret); + tr_warn("Watchdog install timeout error %d", ret); return; } @@ -97,7 +97,7 @@ void watchdog_init(void) ret = wdt_setup(watchdog, 0); if (ret) - tr_warn(&wdt_tr, "Watchdog setup error %d", ret); + tr_warn("Watchdog setup error %d", ret); } void watchdog_enable(int core) diff --git a/src/platform/library/schedule/edf_schedule.c b/src/platform/library/schedule/edf_schedule.c index 81afdb15e889..56abf2750cab 100644 --- a/src/platform/library/schedule/edf_schedule.c +++ b/src/platform/library/schedule/edf_schedule.c @@ -111,7 +111,7 @@ int schedule_task_init_edf(struct task *task, const struct sof_uuid_entry *uid, /* initialize scheduler */ int scheduler_init_edf(void) { - tr_info(&edf_tr, "edf_scheduler_init()"); + tr_info("edf_scheduler_init()"); sch = malloc(sizeof(*sch)); list_init(&sch->list); diff --git a/src/platform/library/schedule/ll_schedule.c b/src/platform/library/schedule/ll_schedule.c index 5e0ee47325b2..77d8cc64fc4b 100644 --- a/src/platform/library/schedule/ll_schedule.c +++ b/src/platform/library/schedule/ll_schedule.c @@ -113,7 +113,7 @@ int schedule_task_init_ll(struct task *task, /* initialize scheduler */ int scheduler_init_ll(struct ll_schedule_domain *domain) { - tr_info(&ll_tr, "ll_scheduler_init()"); + tr_info("ll_scheduler_init()"); list_init(&sched_list); scheduler_init(SOF_SCHEDULE_LL_TIMER, &schedule_ll_ops, NULL); diff --git a/src/platform/mt8186/lib/clk.c b/src/platform/mt8186/lib/clk.c index c8e53c9f5aa0..3e99762be033 100644 --- a/src/platform/mt8186/lib/clk.c +++ b/src/platform/mt8186/lib/clk.c @@ -38,7 +38,7 @@ static SHARED_DATA struct clock_info platform_clocks_info[NUM_CLOCKS]; static void clk_dsppll_enable(uint32_t value) { - tr_dbg(&clkdrv_tr, "clk_dsppll_enable: %d\n", value); + tr_dbg("clk_dsppll_enable: %d\n", value); switch (value) { case ADSP_CLK_PLL_300M: @@ -48,7 +48,7 @@ static void clk_dsppll_enable(uint32_t value) io_reg_write(MTK_ADSPPLL_CON1, MTK_PLL_DIV_RATIO_400M); break; default: - tr_err(&clkdrv_tr, "invalid dsppll: %d\n", value); + tr_err("invalid dsppll: %d\n", value); return; } @@ -62,7 +62,7 @@ static void clk_dsppll_enable(uint32_t value) static void clk_dsppll_disable(void) { - tr_dbg(&clkdrv_tr, "clk_dsppll_disable\n"); + tr_dbg("clk_dsppll_disable\n"); io_reg_update_bits(MTK_ADSPPLL_CON0, MTK_PLL_BASE_EN, 0); wait_delay_us(1); @@ -77,7 +77,7 @@ static void set_mux_adsp_sel(uint32_t value) io_reg_write(MTK_CLK_CFG_11_SET, value << MTK_CLK_ADSP_OFFSET); io_reg_write(MTK_CLK_CFG_UPDATE, MTK_CLK_CFG_ADSP_UPDATE); - tr_dbg(&clkdrv_tr, "adsp_clk_mux=%x, CLK_CFG_11=0x%08x\n", + tr_dbg("adsp_clk_mux=%x, CLK_CFG_11=0x%08x\n", value, io_reg_read(MTK_CLK_CFG_11)); } @@ -87,7 +87,7 @@ static void set_mux_adsp_bus_src_sel(uint32_t value) io_reg_write(MTK_ADSP_CLK_BUS_UPDATE, MTK_ADSP_CLK_BUS_UPDATE_BIT); wait_delay_us(1); - tr_dbg(&clkdrv_tr, "adsp_bus_mux=%x, MTK_ADSP_BUS_SRC=0x%08x\n", + tr_dbg("adsp_bus_mux=%x, MTK_ADSP_BUS_SRC=0x%08x\n", value, io_reg_read(MTK_ADSP_BUS_SRC)); } @@ -97,7 +97,7 @@ static void set_mux_adsp_bus_sel(uint32_t value) io_reg_write(MTK_CLK_CFG_15_SET, value << MTK_CLK_ADSP_BUS_OFFSET); io_reg_write(MTK_CLK_CFG_UPDATE, MTK_CLK_CFG_ADSP_BUS_UPDATE); - tr_dbg(&clkdrv_tr, "adsp_bus_clk_mux=%x, CLK_CFG_15=0x%08x\n", + tr_dbg("adsp_bus_clk_mux=%x, CLK_CFG_15=0x%08x\n", value, io_reg_read(MTK_CLK_CFG_15)); } @@ -126,7 +126,7 @@ static int clock_platform_set_dsp_freq(int clock, int freq_idx) break; default: clock_platform_set_dsp_freq(clock, ADSP_CLK_26M); - tr_err(&clkdrv_tr, "unknown freq index %x\n", freq_idx); + tr_err("unknown freq index %x\n", freq_idx); break; } diff --git a/src/platform/mt8188/lib/clk.c b/src/platform/mt8188/lib/clk.c index 8737ac98f237..5b115eabe134 100644 --- a/src/platform/mt8188/lib/clk.c +++ b/src/platform/mt8188/lib/clk.c @@ -38,7 +38,7 @@ static SHARED_DATA struct clock_info platform_clocks_info[NUM_CLOCKS]; static void clk_dsppll_enable(uint32_t value) { - tr_dbg(&clkdrv_tr, "clk_dsppll_enable %d\n", value); + tr_dbg("clk_dsppll_enable %d\n", value); switch (value) { case ADSP_CLK_PLL_400M: @@ -48,7 +48,7 @@ static void clk_dsppll_enable(uint32_t value) io_reg_write(MTK_ADSPPLL_CON1, MTK_PLL_DIV_RATIO_800M); break; default: - tr_err(&clkdrv_tr, "invalid dsppll: %d\n", value); + tr_err("invalid dsppll: %d\n", value); return; } @@ -62,7 +62,7 @@ static void clk_dsppll_enable(uint32_t value) static void clk_dsppll_disable(void) { - tr_dbg(&clkdrv_tr, "clk_dsppll_disable\n"); + tr_dbg("clk_dsppll_disable\n"); io_reg_update_bits(MTK_ADSPPLL_CON0, MTK_PLL_EN, 0); wait_delay_us(1); @@ -77,7 +77,7 @@ static void set_mux_adsp_sel(uint32_t value) io_reg_write(MTK_CLK_CFG_17_SET, value << MTK_CLK_ADSP_OFFSET); io_reg_write(MTK_CLK_CFG_UPDATE2, MTK_CLK_UPDATE_ADSK_CLK); - tr_dbg(&clkdrv_tr, "adsp_clk_mux=%x, CLK_CFG_17=0x%08x\n", + tr_dbg("adsp_clk_mux=%x, CLK_CFG_17=0x%08x\n", value, io_reg_read(MTK_CLK_CFG_17)); } @@ -88,7 +88,7 @@ static void set_mux_adsp_bus_sel(uint32_t value) io_reg_write(MTK_CLK_CFG_17_SET, value << MTK_CLK_AUDIO_LOCAL_BUS_OFFSET); io_reg_write(MTK_CLK_CFG_UPDATE2, MTK_CLK_UPDATE_AUDIO_LOCAL_BUS_CLK); - tr_dbg(&clkdrv_tr, "audio_local_bus_mux=%x, CLK_CFG_17=0x%08x\n", + tr_dbg("audio_local_bus_mux=%x, CLK_CFG_17=0x%08x\n", value, io_reg_read(MTK_CLK_CFG_17)); } @@ -96,7 +96,7 @@ static int clock_platform_set_dsp_freq(int clock, int freq_idx) { int freq = platform_cpu_freq[freq_idx].freq; - tr_info(&clkdrv_tr, "clock_platform_set_cpu_freq %d\n", freq); + tr_info("clock_platform_set_cpu_freq %d\n", freq); switch (freq_idx) { case ADSP_CLK_26M: diff --git a/src/platform/mt8195/lib/clk.c b/src/platform/mt8195/lib/clk.c index cbc1ef7618f2..62d228c3416a 100644 --- a/src/platform/mt8195/lib/clk.c +++ b/src/platform/mt8195/lib/clk.c @@ -78,7 +78,7 @@ static inline int dsp_clk_value_convert(int value) static void clk_dsppll_enable(void) { - tr_dbg(&clkdrv_tr, "clk_dsppll_enable\n"); + tr_dbg("clk_dsppll_enable\n"); io_reg_update_bits(AUDIODSP_CK_CG, 0x1 << RG_AUDIODSP_SW_CG, 0x0); clk_setl(DSPPLL_CON4, PLL_PWR_ON); @@ -92,7 +92,7 @@ static void clk_dsppll_enable(void) static void clk_dsppll_disable(void) { - tr_dbg(&clkdrv_tr, "clk_dsppll_disable\n"); + tr_dbg("clk_dsppll_disable\n"); clk_clrl(DSPPLL_CON0, PLL_EN); wait_delay_us(1); @@ -104,7 +104,7 @@ static void clk_dsppll_disable(void) static int dsppll_get_enable(void) { - tr_dbg(&clkdrv_tr, "dsppll_enable=%d.\n", dsppll_enable); + tr_dbg("dsppll_enable=%d.\n", dsppll_enable); return dsppll_enable; } @@ -117,7 +117,7 @@ static int set_mux_sel(enum mux_id_t mux_id, uint32_t value) io_reg_update_bits(CLK_CFG_22_SET, (0xF << 0), (value << 0)); io_reg_write(CLK_CFG_UPDATE2, 1 << CLK_UPDATE_ADSP_CK); - tr_dbg(&clkdrv_tr, "adspclk_mux=%x, CLK_CFG_22=0x%08x\n", + tr_dbg("adspclk_mux=%x, CLK_CFG_22=0x%08x\n", value, io_reg_read(CLK_CFG_22)); break; case MUX_CLK_AUDIO_LOCAL_BUS_SEL: @@ -125,11 +125,11 @@ static int set_mux_sel(enum mux_id_t mux_id, uint32_t value) io_reg_update_bits(CLK_CFG_28_SET, (0xF << 16), (value << 16)); io_reg_write(CLK_CFG_UPDATE3, 1 << CLK_UPDATE_AUDIO_LOCAL_BUS_CK); - tr_dbg(&clkdrv_tr, "audio_local_bus_clk_mux=%x, CLK_CFG_28=0x%08x\n", + tr_dbg("audio_local_bus_clk_mux=%x, CLK_CFG_28=0x%08x\n", value, io_reg_read(CLK_CFG_28)); break; default: - tr_dbg(&clkdrv_tr, "error: unknown mux_id (%d)\n", mux_id); + tr_dbg("error: unknown mux_id (%d)\n", mux_id); break; } @@ -145,7 +145,7 @@ static int clock_platform_set_cpu_freq(int clock, int freq_idx) if (adsp_clock == adsp_clk_req) return 0; - tr_info(&clkdrv_tr, "clock_platform_set_cpu_freq %d\n", adsp_clk_req); + tr_info("clock_platform_set_cpu_freq %d\n", adsp_clk_req); /* convert res manager value to driver map */ clk_mux = dsp_clk_value_convert(freq_idx); diff --git a/src/probe/probe.c b/src/probe/probe.c index 67f6520c663c..bd73125b5ee8 100644 --- a/src/probe/probe.c +++ b/src/probe/probe.c @@ -105,7 +105,7 @@ static int probe_dma_buffer_init(struct probe_dma_buf *buffer, uint32_t size, size, align); if (!buffer->addr) { - tr_err(&pr_tr, "probe_dma_buffer_init(): alloc failed"); + tr_err("probe_dma_buffer_init(): alloc failed"); return -ENOMEM; } @@ -148,7 +148,7 @@ static int probe_dma_init(struct probe_dma_ext *dma, uint32_t direction) dma->dc.dmac = dma_get(direction, 0, DMA_DEV_HOST, DMA_ACCESS_SHARED); if (!dma->dc.dmac) { - tr_err(&pr_tr, "probe_dma_init(): dma->dc.dmac = NULL"); + tr_err("probe_dma_init(): dma->dc.dmac = NULL"); return -ENODEV; } dma->dc.dmac->priv_data = &dma->dc.dmac->chan->index; @@ -204,7 +204,7 @@ static int probe_dma_init(struct probe_dma_ext *dma, uint32_t direction) dma->dc.dmac = dma_get(direction, 0, DMA_DEV_HOST, DMA_ACCESS_SHARED); if (!dma->dc.dmac) { - tr_err(&pr_tr, "probe_dma_init(): dma->dc.dmac = NULL"); + tr_err("probe_dma_init(): dma->dc.dmac = NULL"); return -ENODEV; } @@ -216,7 +216,7 @@ static int probe_dma_init(struct probe_dma_ext *dma, uint32_t direction) channel = dma_request_channel(dma->dc.dmac->z_dev, &channel); if (channel < 0) { - tr_err(&pr_tr, "probe_dma_init(): dma_request_channel() failed"); + tr_err("probe_dma_init(): dma_request_channel() failed"); return -EINVAL; } dma->dc.chan = &dma->dc.dmac->chan[channel]; @@ -265,7 +265,7 @@ static int probe_dma_deinit(struct probe_dma_ext *dma) err = dma_stop_legacy(dma->dc.chan); #endif if (err < 0) { - tr_err(&pr_tr, "probe_dma_deinit(): dma_stop() failed"); + tr_err("probe_dma_deinit(): dma_stop() failed"); return err; } #if CONFIG_ZEPHYR_NATIVE_DRIVERS @@ -305,7 +305,7 @@ static enum task_state probe_task(void *data) ©_align); #endif if (err < 0) { - tr_err(&pr_tr, "probe_task(): dma_get_attribute failed."); + tr_err("probe_task(): dma_get_attribute failed."); return SOF_TASK_STATE_COMPLETED; } @@ -327,7 +327,7 @@ static enum task_state probe_task(void *data) return SOF_TASK_STATE_RESCHEDULE; if (err < 0) { - tr_err(&pr_tr, "probe_task(): dma_copy_to_host() failed."); + tr_err("probe_task(): dma_copy_to_host() failed."); return err; } @@ -346,10 +346,10 @@ int probe_init(const struct probe_dma *probe_dma) uint32_t i; int err; - tr_dbg(&pr_tr, "probe_init()"); + tr_dbg("probe_init()"); if (_probe) { - tr_err(&pr_tr, "probe_init(): Probes already initialized."); + tr_err("probe_init(): Probes already initialized."); return -EINVAL; } @@ -359,13 +359,13 @@ int probe_init(const struct probe_dma *probe_dma) _probe = probe_get(); if (!_probe) { - tr_err(&pr_tr, "probe_init(): Alloc failed."); + tr_err("probe_init(): Alloc failed."); return -ENOMEM; } /* setup extraction dma if requested */ if (probe_dma) { - tr_dbg(&pr_tr, "\tstream_tag = %u, dma_buffer_size = %u", + tr_dbg("\tstream_tag = %u, dma_buffer_size = %u", probe_dma->stream_tag, probe_dma->dma_buffer_size); _probe->ext_dma.stream_tag = probe_dma->stream_tag; @@ -373,7 +373,7 @@ int probe_init(const struct probe_dma *probe_dma) err = probe_dma_init(&_probe->ext_dma, DMA_DIR_LMEM_TO_HMEM); if (err < 0) { - tr_err(&pr_tr, "probe_init(): probe_dma_init() failed"); + tr_err("probe_init(): probe_dma_init() failed"); _probe->ext_dma.stream_tag = PROBE_DMA_INVALID; return err; } @@ -383,7 +383,7 @@ int probe_init(const struct probe_dma *probe_dma) err = dma_start_legacy(_probe->ext_dma.dc.chan); #endif if (err < 0) { - tr_err(&pr_tr, "probe_init(): failed to start extraction dma"); + tr_err("probe_init(): failed to start extraction dma"); return -EBUSY; } @@ -393,7 +393,7 @@ int probe_init(const struct probe_dma *probe_dma) SOF_SCHEDULE_LL_TIMER, SOF_TASK_PRI_LOW, probe_task, _probe, 0, 0); } else { - tr_dbg(&pr_tr, "\tno extraction DMA setup"); + tr_dbg("\tno extraction DMA setup"); _probe->ext_dma.stream_tag = PROBE_DMA_INVALID; } @@ -415,10 +415,10 @@ int probe_deinit(void) uint32_t i; int err; - tr_dbg(&pr_tr, "probe_deinit()"); + tr_dbg("probe_deinit()"); if (!_probe) { - tr_err(&pr_tr, "probe_deinit(): Not initialized."); + tr_err("probe_deinit(): Not initialized."); return -EINVAL; } @@ -426,7 +426,7 @@ int probe_deinit(void) /* check for attached injection probe DMAs */ for (i = 0; i < CONFIG_PROBE_DMA_MAX; i++) { if (_probe->inject_dma[i].stream_tag != PROBE_DMA_INVALID) { - tr_err(&pr_tr, "probe_deinit(): Cannot deinitialize with injection DMAs attached."); + tr_err("probe_deinit(): Cannot deinitialize with injection DMAs attached."); return -EINVAL; } } @@ -434,13 +434,13 @@ int probe_deinit(void) /* check for connected probe points */ for (i = 0; i < CONFIG_PROBE_POINTS_MAX; i++) { if (_probe->probe_points[i].stream_tag != PROBE_POINT_INVALID) { - tr_err(&pr_tr, "probe_deinit(): Cannot deinitialize with probe points connected."); + tr_err("probe_deinit(): Cannot deinitialize with probe points connected."); return -EINVAL; } } if (_probe->ext_dma.stream_tag != PROBE_DMA_INVALID) { - tr_dbg(&pr_tr, "probe_deinit() Freeing task and extraction DMA."); + tr_dbg("probe_deinit() Freeing task and extraction DMA."); schedule_task_free(&_probe->dmap_work); err = probe_dma_deinit(&_probe->ext_dma); if (err < 0) @@ -462,10 +462,10 @@ int probe_dma_add(uint32_t count, const struct probe_dma *probe_dma) uint32_t first_free; int err; - tr_dbg(&pr_tr, "probe_dma_add() count = %u", count); + tr_dbg("probe_dma_add() count = %u", count); if (!_probe) { - tr_err(&pr_tr, "probe_dma_add(): Not initialized."); + tr_err("probe_dma_add(): Not initialized."); return -EINVAL; } @@ -473,7 +473,7 @@ int probe_dma_add(uint32_t count, const struct probe_dma *probe_dma) /* Iterate over all (DMA) fields if there are multiple of them */ /* add them if there is free place and they are not already attached */ for (i = 0; i < count; i++) { - tr_dbg(&pr_tr, "\tprobe_dma[%u] stream_tag = %u, dma_buffer_size = %u", + tr_dbg("\tprobe_dma[%u] stream_tag = %u, dma_buffer_size = %u", i, probe_dma[i].stream_tag, probe_dma[i].dma_buffer_size); @@ -491,14 +491,14 @@ int probe_dma_add(uint32_t count, const struct probe_dma *probe_dma) } if (stream_tag == probe_dma[i].stream_tag) { - tr_err(&pr_tr, "probe_dma_add(): Probe DMA %u already attached.", + tr_err("probe_dma_add(): Probe DMA %u already attached.", stream_tag); return -EINVAL; } } if (first_free == CONFIG_PROBE_DMA_MAX) { - tr_err(&pr_tr, "probe_dma_add(): Exceeded maximum number of DMAs attached = " + tr_err("probe_dma_add(): Exceeded maximum number of DMAs attached = " STRINGIFY(CONFIG_PROBE_DMA_MAX)); return -EINVAL; } @@ -511,7 +511,7 @@ int probe_dma_add(uint32_t count, const struct probe_dma *probe_dma) err = probe_dma_init(&_probe->inject_dma[first_free], DMA_DIR_HMEM_TO_LMEM); if (err < 0) { - tr_err(&pr_tr, "probe_dma_add(): probe_dma_init() failed"); + tr_err("probe_dma_add(): probe_dma_init() failed"); _probe->inject_dma[first_free].stream_tag = PROBE_DMA_INVALID; return err; @@ -546,17 +546,17 @@ int probe_dma_remove(uint32_t count, const uint32_t *stream_tag) uint32_t j; int err; - tr_dbg(&pr_tr, "probe_dma_remove() count = %u", count); + tr_dbg("probe_dma_remove() count = %u", count); if (!_probe) { - tr_err(&pr_tr, "probe_dma_remove(): Not initialized."); + tr_err("probe_dma_remove(): Not initialized."); return -EINVAL; } /* remove each DMA if they are not used */ for (i = 0; i < count; i++) { - tr_dbg(&pr_tr, "\tstream_tag[%u] = %u", i, stream_tag[i]); + tr_dbg("\tstream_tag[%u] = %u", i, stream_tag[i]); if (is_probe_stream_used(stream_tag[i])) return -EINVAL; @@ -604,7 +604,7 @@ static int copy_to_pbuffer(struct probe_dma_buf *pbuf, void *data, /* copy data to probe buffer */ if (memcpy_s((void *)pbuf->w_ptr, pbuf->end_addr - pbuf->w_ptr, data, head)) { - tr_err(&pr_tr, "copy_to_pbuffer(): memcpy_s() failed"); + tr_err("copy_to_pbuffer(): memcpy_s() failed"); return -EINVAL; } dcache_writeback_region((__sparse_force void __sparse_cache *)pbuf->w_ptr, head); @@ -614,7 +614,7 @@ static int copy_to_pbuffer(struct probe_dma_buf *pbuf, void *data, pbuf->w_ptr = pbuf->addr; if (memcpy_s((void *)pbuf->w_ptr, (char *)pbuf->end_addr - (char *)pbuf->w_ptr, (char *)data + head, tail)) { - tr_err(&pr_tr, "copy_to_pbuffer(): memcpy_s() failed"); + tr_err("copy_to_pbuffer(): memcpy_s() failed"); return -EINVAL; } dcache_writeback_region((__sparse_force void __sparse_cache *)pbuf->w_ptr, tail); @@ -661,7 +661,7 @@ static int copy_from_pbuffer(struct probe_dma_buf *pbuf, void *data, /* data from DMA so invalidate it */ dcache_invalidate_region((__sparse_force void __sparse_cache *)pbuf->r_ptr, head); if (memcpy_s(data, bytes, (void *)pbuf->r_ptr, head)) { - tr_err(&pr_tr, "copy_from_pbuffer(): memcpy_s() failed"); + tr_err("copy_from_pbuffer(): memcpy_s() failed"); return -EINVAL; } @@ -671,7 +671,7 @@ static int copy_from_pbuffer(struct probe_dma_buf *pbuf, void *data, pbuf->r_ptr = pbuf->addr; dcache_invalidate_region((__sparse_force void __sparse_cache *)pbuf->r_ptr, tail); if (memcpy_s((char *)data + head, tail, (void *)pbuf->r_ptr, tail)) { - tr_err(&pr_tr, "copy_from_pbuffer(): memcpy_s() failed"); + tr_err("copy_from_pbuffer(): memcpy_s() failed"); return -EINVAL; } pbuf->r_ptr = pbuf->r_ptr + tail; @@ -760,7 +760,7 @@ static uint32_t probe_gen_format(uint32_t frame_fmt, uint32_t rate, float_fmt = 1; break; default: - tr_err(&pr_tr, "probe_gen_format(): Invalid frame format specified = 0x%08x", + tr_err("probe_gen_format(): Invalid frame format specified = 0x%08x", frame_fmt); assert(false); return 0; @@ -896,7 +896,7 @@ static void probe_cb_produce(void *arg, enum notify_id type, void *data) break; if (i == CONFIG_PROBE_POINTS_MAX) { - tr_err(&pr_tr, "probe_cb_produce(): probe not found for buffer id: %d", + tr_err("probe_cb_produce(): probe not found for buffer id: %d", buffer_id); return; } @@ -953,7 +953,7 @@ static void probe_cb_produce(void *arg, enum notify_id type, void *data) } } if (j == CONFIG_PROBE_DMA_MAX) { - tr_err(&pr_tr, "probe_cb_produce(): dma not found"); + tr_err("probe_cb_produce(): dma not found"); return; } dma = &_probe->inject_dma[j]; @@ -970,7 +970,7 @@ static void probe_cb_produce(void *arg, enum notify_id type, void *data) &free_bytes); #endif if (ret < 0) { - tr_err(&pr_tr, "probe_cb_produce(): dma_get_data_size() failed, ret = %u", + tr_err("probe_cb_produce(): dma_get_data_size() failed, ret = %u", ret); goto err; } @@ -1030,7 +1030,7 @@ static void probe_cb_produce(void *arg, enum notify_id type, void *data) } return; err: - tr_err(&pr_tr, "probe_cb_produce(): failed to generate probe data"); + tr_err("probe_cb_produce(): failed to generate probe data"); } /** @@ -1044,11 +1044,11 @@ static void probe_cb_free(void *arg, enum notify_id type, void *data) uint32_t buffer_id = *(int *)arg; int ret; - tr_dbg(&pr_tr, "probe_cb_free() buffer_id = %u", buffer_id); + tr_dbg("probe_cb_free() buffer_id = %u", buffer_id); ret = probe_point_remove(1, &buffer_id); if (ret < 0) - tr_err(&pr_tr, "probe_cb_free(): probe_point_remove() failed"); + tr_err("probe_cb_free(): probe_point_remove() failed"); } static bool probe_purpose_needs_ext_dma(uint32_t purpose) @@ -1125,10 +1125,10 @@ int probe_point_add(uint32_t count, const struct probe_point *probe) #if CONFIG_IPC_MAJOR_4 struct comp_buffer *buf = NULL; #endif - tr_dbg(&pr_tr, "probe_point_add() count = %u", count); + tr_dbg("probe_point_add() count = %u", count); if (!_probe) { - tr_err(&pr_tr, "probe_point_add(): Not initialized."); + tr_err("probe_point_add(): Not initialized."); return -EINVAL; } @@ -1138,12 +1138,12 @@ int probe_point_add(uint32_t count, const struct probe_point *probe) const probe_point_id_t *buf_id = &probe[i].buffer_id; uint32_t stream_tag; - tr_dbg(&pr_tr, "\tprobe[%u] buffer_id = %u, purpose = %u, stream_tag = %u", + tr_dbg("\tprobe[%u] buffer_id = %u, purpose = %u, stream_tag = %u", i, buf_id->full_id, probe[i].purpose, probe[i].stream_tag); if (!verify_purpose(probe[i].purpose)) { - tr_err(&pr_tr, "probe_point_add() error: invalid purpose %d", + tr_err("probe_point_add() error: invalid purpose %d", probe[i].purpose); return -EINVAL; @@ -1151,7 +1151,7 @@ int probe_point_add(uint32_t count, const struct probe_point *probe) if (_probe->ext_dma.stream_tag == PROBE_DMA_INVALID && probe_purpose_needs_ext_dma(probe[i].purpose)) { - tr_err(&pr_tr, "probe_point_add(): extraction DMA not enabled."); + tr_err("probe_point_add(): extraction DMA not enabled."); return -EINVAL; } @@ -1167,7 +1167,7 @@ int probe_point_add(uint32_t count, const struct probe_point *probe) #endif /* check if buffer exists */ if (!dev) { - tr_err(&pr_tr, "probe_point_add(): No device with ID %u found.", + tr_err("probe_point_add(): No device with ID %u found.", buf_id->full_id); return -EINVAL; @@ -1175,14 +1175,14 @@ int probe_point_add(uint32_t count, const struct probe_point *probe) #if CONFIG_IPC_MAJOR_4 buf = ipc4_get_buffer(dev, *buf_id); if (!buf) { - tr_err(&pr_tr, "probe_point_add(): buffer %u not found.", + tr_err("probe_point_add(): buffer %u not found.", buf_id->full_id); return -EINVAL; } #else if (dev->type != COMP_TYPE_BUFFER) { - tr_err(&pr_tr, "probe_point_add(): Device ID %u is not a buffer.", + tr_err("probe_point_add(): Device ID %u is not a buffer.", buf_id->full_id); return -EINVAL; @@ -1206,7 +1206,7 @@ int probe_point_add(uint32_t count, const struct probe_point *probe) if (buffer_id == buf_id->full_id) { if (_probe->probe_points[j].purpose == probe[i].purpose) { - tr_err(&pr_tr, "probe_point_add(): Probe already attached to buffer %u with purpose %u", + tr_err("probe_point_add(): Probe already attached to buffer %u with purpose %u", buffer_id, probe[i].purpose); @@ -1216,7 +1216,7 @@ int probe_point_add(uint32_t count, const struct probe_point *probe) } if (first_free == CONFIG_PROBE_POINTS_MAX) { - tr_err(&pr_tr, "probe_point_add(): Maximum number of probe points connected aleady: " + tr_err("probe_point_add(): Maximum number of probe points connected already: " STRINGIFY(CONFIG_PROBE_POINTS_MAX)); return -EINVAL; @@ -1237,7 +1237,7 @@ int probe_point_add(uint32_t count, const struct probe_point *probe) } if (!dma_found) { - tr_err(&pr_tr, "probe_point_add(): No DMA with stream tag %u found for injection.", + tr_err("probe_point_add(): No DMA with stream tag %u found for injection.", probe[i].stream_tag); return -EINVAL; @@ -1248,7 +1248,7 @@ int probe_point_add(uint32_t count, const struct probe_point *probe) #else if (dma_start_legacy(_probe->inject_dma[j].dc.chan) < 0) { #endif - tr_err(&pr_tr, "probe_point_add(): failed to start dma"); + tr_err("probe_point_add(): failed to start dma"); return -EBUSY; } @@ -1263,7 +1263,7 @@ int probe_point_add(uint32_t count, const struct probe_point *probe) } if (j == CONFIG_PROBE_POINTS_MAX) { - tr_dbg(&pr_tr, "probe_point_add(): start probe task"); + tr_dbg("probe_point_add(): start probe task"); schedule_task(&_probe->dmap_work, 1000, 1000); } /* ignore probe stream tag for extraction probes */ @@ -1308,10 +1308,10 @@ int probe_dma_info(struct sof_ipc_probe_info_params *data, uint32_t max_size) uint32_t i = 0; uint32_t j = 0; - tr_dbg(&pr_tr, "probe_dma_info()"); + tr_dbg("probe_dma_info()"); if (!_probe) { - tr_err(&pr_tr, "probe_dma_info(): Not initialized."); + tr_err("probe_dma_info(): Not initialized."); return -EINVAL; } @@ -1346,10 +1346,10 @@ int probe_point_info(struct sof_ipc_probe_info_params *data, uint32_t max_size) uint32_t i = 0; uint32_t j = 0; - tr_dbg(&pr_tr, "probe_point_info()"); + tr_dbg("probe_point_info()"); if (!_probe) { - tr_err(&pr_tr, "probe_point_info(): Not initialized."); + tr_err("probe_point_info(): Not initialized."); return -EINVAL; } @@ -1388,15 +1388,15 @@ int probe_point_remove(uint32_t count, const uint32_t *buffer_id) struct comp_buffer *buf; #endif - tr_dbg(&pr_tr, "probe_point_remove() count = %u", count); + tr_dbg("probe_point_remove() count = %u", count); if (!_probe) { - tr_err(&pr_tr, "probe_point_remove(): Not initialized."); + tr_err("probe_point_remove(): Not initialized."); return -EINVAL; } /* remove each requested probe point */ for (i = 0; i < count; i++) { - tr_dbg(&pr_tr, "\tbuffer_id[%u] = %u", i, buffer_id[i]); + tr_dbg("\tbuffer_id[%u] = %u", i, buffer_id[i]); for (j = 0; j < CONFIG_PROBE_POINTS_MAX; j++) { probe_point_id_t *buf_id = &_probe->probe_points[j].buffer_id; @@ -1436,7 +1436,7 @@ int probe_point_remove(uint32_t count, const uint32_t *buffer_id) break; } if (j == CONFIG_PROBE_POINTS_MAX) { - tr_dbg(&pr_tr, "probe_point_remove(): cancel probe task"); + tr_dbg("probe_point_remove(): cancel probe task"); schedule_task_cancel(&_probe->dmap_work); } diff --git a/src/schedule/dma_multi_chan_domain.c b/src/schedule/dma_multi_chan_domain.c index 88b2e9c58e74..bcc102cf7e98 100644 --- a/src/schedule/dma_multi_chan_domain.c +++ b/src/schedule/dma_multi_chan_domain.c @@ -86,7 +86,7 @@ static int dma_multi_chan_domain_irq_register(struct dma_domain_data *data, { int ret; - tr_info(&ll_tr, "dma_multi_chan_domain_irq_register()"); + tr_info("dma_multi_chan_domain_irq_register()"); /* always go through dma_multi_chan_domain_irq_handler, * so we have different arg registered for every channel @@ -125,7 +125,7 @@ static int dma_multi_chan_domain_register(struct ll_schedule_domain *domain, int i; int j; - tr_info(&ll_tr, "dma_multi_chan_domain_register()"); + tr_info("dma_multi_chan_domain_register()"); /* check if task should be registered */ if (!pipe_task->registrable) @@ -192,7 +192,7 @@ static int dma_multi_chan_domain_register(struct ll_schedule_domain *domain, */ static void dma_multi_chan_domain_irq_unregister(struct dma_domain_data *data) { - tr_info(&ll_tr, "dma_multi_chan_domain_irq_unregister()"); + tr_info("dma_multi_chan_domain_irq_unregister()"); interrupt_disable(data->irq, data); @@ -217,7 +217,7 @@ static int dma_multi_chan_domain_unregister(struct ll_schedule_domain *domain, int i; int j; - tr_info(&ll_tr, "dma_multi_chan_domain_unregister()"); + tr_info("dma_multi_chan_domain_unregister()"); /* check if task should be unregistered */ if (!task || !pipe_task->registrable) @@ -365,7 +365,7 @@ struct ll_schedule_domain *dma_multi_chan_domain_init(struct dma *dma_array, int i; int j; - tr_info(&ll_tr, "dma_multi_chan_domain_init(): num_dma %d, clk %d, aggregated_irq %d", + tr_info("dma_multi_chan_domain_init(): num_dma %d, clk %d, aggregated_irq %d", num_dma, clk, aggregated_irq); domain = domain_init(SOF_SCHEDULE_LL_DMA, clk, true, diff --git a/src/schedule/dma_single_chan_domain.c b/src/schedule/dma_single_chan_domain.c index 7016bc64688a..f6227611ab1a 100644 --- a/src/schedule/dma_single_chan_domain.c +++ b/src/schedule/dma_single_chan_domain.c @@ -101,7 +101,7 @@ static struct dma_chan_data *dma_chan_min_period(struct dma_domain *dma_domain) */ static void dma_domain_notify_change(struct dma_chan_data *channel) { - tr_info(&ll_tr, "dma_domain_notify_change()"); + tr_info("dma_domain_notify_change()"); notifier_event(channel, NOTIFIER_ID_DMA_DOMAIN_CHANGE, NOTIFIER_TARGET_CORE_ALL_MASK & ~BIT(cpu_get_id()), @@ -124,7 +124,7 @@ static int dma_single_chan_domain_irq_register(struct dma_chan_data *channel, int irq = dma_chan_irq(channel->dma, channel->index); int ret; - tr_info(&ll_tr, "dma_single_chan_domain_irq_register()"); + tr_info("dma_single_chan_domain_irq_register()"); data->irq = interrupt_get_irq(irq, dma_irq_name(channel->dma)); if (data->irq < 0) { @@ -154,7 +154,7 @@ static int dma_single_chan_domain_irq_register(struct dma_chan_data *channel, */ static void dma_single_chan_domain_irq_unregister(struct dma_domain_data *data) { - tr_info(&ll_tr, "dma_single_chan_domain_irq_unregister()"); + tr_info("dma_single_chan_domain_irq_unregister()"); interrupt_disable(data->irq, data->arg); interrupt_unregister(data->irq, data->arg); @@ -188,7 +188,7 @@ static int dma_single_chan_domain_register(struct ll_schedule_domain *domain, bool register_needed = true; int ret = 0; - tr_info(&ll_tr, "dma_single_chan_domain_register()"); + tr_info("dma_single_chan_domain_register()"); /* check if task should be registered */ if (!pipe_task->registrable) @@ -206,7 +206,7 @@ static int dma_single_chan_domain_register(struct ll_schedule_domain *domain, if (data->channel->period == channel->period) goto out; - tr_info(&ll_tr, "dma_single_chan_domain_register(): lower period detected, registering again"); + tr_info("dma_single_chan_domain_register(): lower period detected, registering again"); /* unregister from current channel */ dma_single_chan_domain_irq_unregister(data); @@ -220,12 +220,10 @@ static int dma_single_chan_domain_register(struct ll_schedule_domain *domain, } if (channel->period <= UINT_MAX) - tr_info(&ll_tr, - "dma_single_chan_domain_register(): registering on channel with period %u", + tr_info("dma_single_chan_domain_register(): registering on channel with period %u", (unsigned int)channel->period); else - tr_info(&ll_tr, - "dma_single_chan_domain_register(): registering on channel with period > %u", + tr_info("dma_single_chan_domain_register(): registering on channel with period > %u", UINT_MAX); /* register for interrupt */ @@ -308,7 +306,7 @@ static void dma_domain_unregister_owner(struct ll_schedule_domain *domain, struct dma *dmas = dma_domain->dma_array; struct dma_chan_data *channel; - tr_info(&ll_tr, "dma_domain_unregister_owner()"); + tr_info("dma_domain_unregister_owner()"); /* transfers still scheduled on this channel */ if (data->channel->status == COMP_STATE_ACTIVE) @@ -317,7 +315,7 @@ static void dma_domain_unregister_owner(struct ll_schedule_domain *domain, channel = dma_chan_min_period(dma_domain); if (channel && dma_chan_is_any_running(dmas, dma_domain->num_dma)) { /* another channel is running */ - tr_info(&ll_tr, "dma_domain_unregister_owner(): domain in use, change owner"); + tr_info("dma_domain_unregister_owner(): domain in use, change owner"); /* change owner */ dma_domain->owner = channel->core; @@ -369,7 +367,7 @@ static int dma_single_chan_domain_unregister(struct ll_schedule_domain *domain, int core = cpu_get_id(); struct dma_domain_data *data = &dma_domain->data[core]; - tr_info(&ll_tr, "dma_single_chan_domain_unregister()"); + tr_info("dma_single_chan_domain_unregister()"); /* check if task should be unregistered */ if (!task || !pipe_task->registrable) @@ -505,7 +503,7 @@ static void dma_domain_changed(void *arg, enum notify_id type, void *data) int core = cpu_get_id(); struct dma_domain_data *domain_data = &dma_domain->data[core]; - tr_info(&ll_tr, "dma_domain_changed()"); + tr_info("dma_domain_changed()"); /* unregister from current DMA channel */ dma_single_chan_domain_irq_unregister(domain_data); @@ -538,7 +536,7 @@ struct ll_schedule_domain *dma_single_chan_domain_init(struct dma *dma_array, struct ll_schedule_domain *domain; struct dma_domain *dma_domain; - tr_info(&ll_tr, "dma_single_chan_domain_init(): num_dma %d, clk %d", + tr_info("dma_single_chan_domain_init(): num_dma %d, clk %d", num_dma, clk); domain = domain_init(SOF_SCHEDULE_LL_DMA, clk, false, diff --git a/src/schedule/edf_schedule.c b/src/schedule/edf_schedule.c index e64c8f379d28..38496defda92 100644 --- a/src/schedule/edf_schedule.c +++ b/src/schedule/edf_schedule.c @@ -65,7 +65,7 @@ static void edf_scheduler_run(void *data) uint64_t deadline; uint32_t flags; - tr_dbg(&edf_tr, "edf_scheduler_run()"); + tr_dbg("edf_scheduler_run()"); irq_local_disable(flags); @@ -112,7 +112,7 @@ static int schedule_edf_task(void *data, struct task *task, uint64_t start, /* not enough MCPS to complete */ if (task->state == SOF_TASK_STATE_QUEUED || task->state == SOF_TASK_STATE_RUNNING) { - tr_err(&edf_tr, "schedule_edf_task(), task already queued or running %d", + tr_err("schedule_edf_task(), task already queued or running %d", task->state); irq_local_enable(flags); return -EALREADY; @@ -148,7 +148,7 @@ int schedule_task_init_edf(struct task *task, const struct sof_uuid_entry *uid, edf_pdata = rzalloc(SOF_MEM_ZONE_SYS_RUNTIME, 0, SOF_MEM_CAPS_RAM, sizeof(*edf_pdata)); if (!edf_pdata) { - tr_err(&edf_tr, "schedule_task_init_edf(): alloc failed"); + tr_err("schedule_task_init_edf(): alloc failed"); return -ENOMEM; } @@ -171,7 +171,7 @@ int schedule_task_init_edf(struct task *task, const struct sof_uuid_entry *uid, return 0; error: - tr_err(&edf_tr, "schedule_task_init_edf(): init context failed"); + tr_err("schedule_task_init_edf(): init context failed"); if (edf_pdata->ctx) task_context_free(edf_pdata->ctx); rfree(edf_pdata); @@ -184,7 +184,7 @@ static int schedule_edf_task_running(void *data, struct task *task) struct edf_task_pdata *edf_pdata = edf_sch_get_pdata(task); uint32_t flags; - tr_dbg(&edf_tr, "schedule_edf_task_running()"); + tr_dbg("schedule_edf_task_running()"); irq_local_disable(flags); @@ -200,7 +200,7 @@ static int schedule_edf_task_complete(void *data, struct task *task) { uint32_t flags; - tr_dbg(&edf_tr, "schedule_edf_task_complete()"); + tr_dbg("schedule_edf_task_complete()"); irq_local_disable(flags); @@ -218,7 +218,7 @@ static int schedule_edf_task_cancel(void *data, struct task *task) { uint32_t flags; - tr_dbg(&edf_tr, "schedule_edf_task_cancel()"); + tr_dbg("schedule_edf_task_cancel()"); irq_local_disable(flags); @@ -256,7 +256,7 @@ int scheduler_init_edf(void) { struct edf_schedule_data *edf_sch; - tr_info(&edf_tr, "edf_scheduler_init()"); + tr_info("edf_scheduler_init()"); edf_sch = rzalloc(SOF_MEM_ZONE_SYS, 0, SOF_MEM_CAPS_RAM, sizeof(*edf_sch)); @@ -309,7 +309,7 @@ static int scheduler_restore_edf(void *data) PLATFORM_SCHEDULE_IRQ_NAME); if (edf_sch->irq < 0) { - tr_err(&edf_tr, "scheduler_restore_edf(): getting irq failed."); + tr_err("scheduler_restore_edf(): getting irq failed."); return edf_sch->irq; } diff --git a/src/schedule/ll_schedule.c b/src/schedule/ll_schedule.c index ea086150641c..660a5daa9f1d 100644 --- a/src/schedule/ll_schedule.c +++ b/src/schedule/ll_schedule.c @@ -81,14 +81,14 @@ static const struct scheduler_ops schedule_ll_ops; #if CONFIG_PERFORMANCE_COUNTERS static void perf_ll_sched_trace(struct perf_cnt_data *pcd, int ignored) { - tr_info(&ll_tr, "perf ll_work peak plat %u cpu %u", + tr_info("perf ll_work peak plat %u cpu %u", (uint32_t)((pcd)->plat_delta_peak), (uint32_t)((pcd)->cpu_delta_peak)); } static void perf_avg_ll_sched_trace(struct perf_cnt_data *pcd, int ignored) { - tr_info(&ll_tr, "perf ll_work cpu avg %u (current peak %u)", + tr_info("perf ll_work cpu avg %u (current peak %u)", (uint32_t)((pcd)->cpu_delta_sum), (uint32_t)((pcd)->cpu_delta_peak)); } @@ -154,8 +154,8 @@ static void schedule_ll_task_done(struct ll_schedule_data *sch, /* unregister the task */ domain_unregister(sch->domain, task, atomic_sub(&sch->num_tasks, 1) - 1); - tr_info(&ll_tr, "task complete %p %pU", task, task->uid); - tr_info(&ll_tr, "num_tasks %ld total_num_tasks %ld", + tr_info("task complete %p %pU", task, task->uid); + tr_info("num_tasks %ld total_num_tasks %ld", atomic_read(&sch->num_tasks), atomic_read(&sch->domain->total_num_tasks)); } @@ -180,7 +180,7 @@ static inline void dsp_load_check(struct task *task, uint32_t cycles0, uint32_t if (++task->cycles_cnt == 1 << CHECKS_WINDOW_SIZE) { task->cycles_sum >>= CHECKS_WINDOW_SIZE; - tr_info(&ll_tr, "task %p %pU avg %u, max %u", task, task->uid, + tr_info("task %p %pU avg %u, max %u", task, task->uid, task->cycles_sum, task->cycles_max); task->cycles_sum = 0; task->cycles_max = 0; @@ -215,7 +215,7 @@ static void schedule_ll_tasks_execute(struct ll_schedule_data *sch) continue; } - tr_dbg(&ll_tr, "task %p %pU being started...", task, task->uid); + tr_dbg("task %p %pU being started...", task, task->uid); #ifdef CONFIG_SCHEDULE_LOG_CYCLE_STATISTICS cycles0 = (uint32_t)sof_cycle_get_64(); @@ -238,7 +238,7 @@ static void schedule_ll_tasks_execute(struct ll_schedule_data *sch) } else { /* update task's start time */ schedule_ll_task_update_start(sch, task); - tr_dbg(&ll_tr, "task %p uid %pU finished, next period ticks %u, domain->next_tick %u", + tr_dbg("task %p uid %pU finished, next period ticks %u, domain->next_tick %u", task, task->uid, (uint32_t)task->start, (uint32_t)domain->next_tick); } @@ -276,8 +276,7 @@ static void schedule_ll_client_reschedule(struct ll_schedule_data *sch) } } - tr_dbg(&ll_tr, - "schedule_ll_clients_reschedule next_tick %u task_take %p", + tr_dbg("schedule_ll_clients_reschedule next_tick %u task_take %p", (unsigned int)next_tick, task_take); /* update the target_tick */ @@ -295,7 +294,7 @@ static void schedule_ll_tasks_run(void *data) uint32_t flags; uint32_t core = cpu_get_id(); - tr_dbg(&ll_tr, "timer interrupt on core %d, at %u, previous next_tick %u", + tr_dbg("timer interrupt on core %d, at %u, previous next_tick %u", core, (unsigned int)sof_cycle_get_64_atomic(), (unsigned int)domain->next_tick); @@ -337,7 +336,7 @@ static void schedule_ll_tasks_run(void *data) /* set the next interrupt according to the new_target_tick */ if (domain->new_target_tick < domain->next_tick) { domain_set(domain, domain->new_target_tick); - tr_dbg(&ll_tr, "tasks on core %d done, new_target_tick %u set", + tr_dbg("tasks on core %d done, new_target_tick %u set", core, (unsigned int)domain->new_target_tick); } @@ -367,12 +366,12 @@ static int schedule_ll_domain_set(struct ll_schedule_data *sch, ret = domain_register(domain, task, &schedule_ll_tasks_run, sch); if (ret < 0) { - tr_err(&ll_tr, "schedule_ll_domain_set: cannot register domain %d", + tr_err("schedule_ll_domain_set: cannot register domain %d", ret); goto done; } - tr_dbg(&ll_tr, "task->start %u next_tick %u", + tr_dbg("task->start %u next_tick %u", (unsigned int)task->start, (unsigned int)domain->next_tick); @@ -411,10 +410,10 @@ static int schedule_ll_domain_set(struct ll_schedule_data *sch, /* make sure enable domain on the core */ domain_enable(domain, core); - tr_info(&ll_tr, "new added task->start %u at %u", + tr_info("new added task->start %u at %u", (unsigned int)task->start, (unsigned int)sof_cycle_get_64_atomic()); - tr_info(&ll_tr, "num_tasks %ld total_num_tasks %ld", + tr_info("num_tasks %ld total_num_tasks %ld", atomic_read(&sch->num_tasks), atomic_read(&domain->total_num_tasks)); @@ -442,7 +441,7 @@ static void schedule_ll_domain_clear(struct ll_schedule_data *sch, /* unregister the task */ domain_unregister(domain, task, atomic_read(&sch->num_tasks)); - tr_info(&ll_tr, "num_tasks %ld total_num_tasks %ld", + tr_info("num_tasks %ld total_num_tasks %ld", atomic_read(&sch->num_tasks), atomic_read(&domain->total_num_tasks)); @@ -508,13 +507,13 @@ static int schedule_ll_task_common(struct ll_schedule_data *sch, struct task *ta pdata = ll_sch_get_pdata(task); - tr_info(&ll_tr, "task add %p %pU", task, task->uid); + tr_info("task add %p %pU", task, task->uid); if (start <= UINT_MAX && period <= UINT_MAX) - tr_info(&ll_tr, "task params pri %d flags %d start %u period %u", + tr_info("task params pri %d flags %d start %u period %u", task->priority, task->flags, (unsigned int)start, (unsigned int)period); else - tr_info(&ll_tr, "task params pri %d flags %d start or period > %u", + tr_info("task params pri %d flags %d start or period > %u", task->priority, task->flags, UINT_MAX); pdata->period = period; @@ -549,8 +548,7 @@ static int schedule_ll_task_common(struct ll_schedule_data *sch, struct task *ta * task has the smallest period */ if (pdata->period < reg_pdata->period) { - tr_err(&ll_tr, - "schedule_ll_task(): registrable task has a period longer than current task"); + tr_err("schedule_ll_task(): registrable task has a period longer than current task"); ret = -EINVAL; goto out; } @@ -627,7 +625,7 @@ int schedule_task_init_ll(struct task *task, sizeof(*ll_pdata)); if (!ll_pdata) { - tr_err(&ll_tr, "schedule_task_init_ll(): alloc failed"); + tr_err("schedule_task_init_ll(): alloc failed"); return -ENOMEM; } @@ -663,7 +661,7 @@ static int schedule_ll_task_cancel(void *data, struct task *task) irq_local_disable(flags); - tr_info(&ll_tr, "task cancel %p %pU", task, task->uid); + tr_info("task cancel %p %pU", task, task->uid); /* check to see if we are scheduled */ list_for_item(tlist, &sch->tasks) { @@ -711,7 +709,7 @@ static int reschedule_ll_task(void *data, struct task *task, uint64_t start) } } - tr_err(&ll_tr, "reschedule_ll_task(): task not found"); + tr_err("reschedule_ll_task(): task not found"); out: diff --git a/src/schedule/schedule.c b/src/schedule/schedule.c index aefd13ca9c6a..a40286f63ece 100644 --- a/src/schedule/schedule.c +++ b/src/schedule/schedule.c @@ -29,7 +29,7 @@ int schedule_task_init(struct task *task, void *data, uint16_t core, uint32_t flags) { if (type >= SOF_SCHEDULE_COUNT) { - tr_err(&sch_tr, "schedule_task_init(): invalid task type"); + tr_err("schedule_task_init(): invalid task type"); return -EINVAL; } diff --git a/src/schedule/timer_domain.c b/src/schedule/timer_domain.c index 25c37cfc4c3a..86e99cb7f9cb 100644 --- a/src/schedule/timer_domain.c +++ b/src/schedule/timer_domain.c @@ -33,10 +33,10 @@ static void timer_report_delay(int id, uint64_t delay) clock_ms_to_ticks(PLATFORM_DEFAULT_CLOCK, 1); if (delay <= UINT_MAX) - tr_err(&ll_tr, "timer_report_delay(): timer %d delayed by %d uS %d ticks", + tr_err("timer_report_delay(): timer %d delayed by %d uS %d ticks", id, ll_delay_us, (unsigned int)delay); else - tr_err(&ll_tr, "timer_report_delay(): timer %d delayed by %d uS, ticks > %u", + tr_err("timer_report_delay(): timer %d delayed by %d uS, ticks > %u", id, ll_delay_us, UINT_MAX); /* Fix compile error when traces are disabled */ @@ -50,7 +50,7 @@ static int timer_domain_register(struct ll_schedule_domain *domain, struct timer_domain *timer_domain = ll_sch_domain_get_pdata(domain); int core = cpu_get_id(); - tr_dbg(&ll_tr, "timer_domain_register()"); + tr_dbg("timer_domain_register()"); /* tasks already registered on this core */ if (timer_domain->arg[core]) @@ -58,7 +58,7 @@ static int timer_domain_register(struct ll_schedule_domain *domain, timer_domain->arg[core] = arg; - tr_info(&ll_tr, "timer_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d", + tr_info("timer_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d", domain->type, domain->clk, domain->ticks_per_ms); return timer_register(timer_domain->timer, handler, arg); @@ -73,13 +73,13 @@ static int timer_domain_unregister(struct ll_schedule_domain *domain, if (task) return 0; - tr_dbg(&ll_tr, "timer_domain_unregister()"); + tr_dbg("timer_domain_unregister()"); /* tasks still registered on this core */ if (!timer_domain->arg[core] || num_tasks) return 0; - tr_info(&ll_tr, "timer_domain_unregister domain->type %d domain->clk %d", + tr_info("timer_domain_unregister domain->type %d domain->clk %d", domain->type, domain->clk); timer_unregister(timer_domain->timer, timer_domain->arg[core]); @@ -113,7 +113,7 @@ static void timer_domain_set(struct ll_schedule_domain *domain, uint64_t start) ticks_set = platform_timer_set(timer_domain->timer, ticks_req); - tr_dbg(&ll_tr, "timer_domain_set(): ticks_set %u ticks_req %u current %u", + tr_dbg("timer_domain_set(): ticks_set %u ticks_req %u current %u", (unsigned int)ticks_set, (unsigned int)ticks_req, (unsigned int)platform_timer_get_atomic(timer_get())); diff --git a/src/schedule/zephyr_dma_domain.c b/src/schedule/zephyr_dma_domain.c index 45c26387c4fe..7d8c0dc27e11 100644 --- a/src/schedule/zephyr_dma_domain.c +++ b/src/schedule/zephyr_dma_domain.c @@ -381,7 +381,7 @@ static int zephyr_dma_domain_register(struct ll_schedule_domain *domain, dt = zephyr_dma_domain->domain_thread + core; pipe_task = pipeline_task_get(task); - tr_info(&ll_tr, "zephyr_dma_domain_register()"); + tr_info("zephyr_dma_domain_register()"); /* don't even bother trying to register DMA IRQ for * non-registrable tasks. @@ -406,8 +406,7 @@ static int zephyr_dma_domain_register(struct ll_schedule_domain *domain, core); if (ret < 0) { - tr_err(&ll_tr, - "failed to register DMA IRQ for pipe task %p on core %d", + tr_err("failed to register DMA IRQ for pipe task %p on core %d", pipe_task, core); return ret; @@ -471,8 +470,7 @@ static int zephyr_dma_domain_register(struct ll_schedule_domain *domain, ret = enable_dma_irq(irq_data); irq_local_enable(flags); if (ret < 0) { - tr_err(&ll_tr, - "failed to enable DMA IRQ for pipe task %p on core %d", + tr_err("failed to enable DMA IRQ for pipe task %p on core %d", pipe_task, core); return ret; @@ -486,7 +484,7 @@ static int zephyr_dma_domain_register(struct ll_schedule_domain *domain, * already enabled even though the Zephyr thread wasn't started */ - tr_err(&ll_tr, "failed to register pipeline task %p on core %d", + tr_err("failed to register pipeline task %p on core %d", pipe_task, core); @@ -554,7 +552,7 @@ static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain, core = cpu_get_id(); dt = zephyr_dma_domain->domain_thread + core; - tr_info(&ll_tr, "zephyr_dma_domain_unregister()"); + tr_info("zephyr_dma_domain_unregister()"); /* unregister the DMA IRQ only for PPL tasks marked as "registrable" * @@ -573,12 +571,12 @@ static int zephyr_dma_domain_unregister(struct ll_schedule_domain *domain, chan_data = fetch_channel_by_ptask(zephyr_dma_domain, pipe_task); if (!chan_data) { irq_local_enable(flags); - tr_err(&ll_tr, "pipeline task %p doesn't have an associated channel.", pipe_task); + tr_err("pipeline task %p doesn't have an associated channel.", pipe_task); return -EINVAL; } if (chan_data->channel->status == COMP_STATE_ACTIVE) { - tr_warn(&ll_tr, "trying to unregister ptask %p while channel still active.", + tr_warn("trying to unregister ptask %p while channel still active.", pipe_task); } diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index 7fb9b4ae0465..fa0519f3ceb4 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -108,7 +108,7 @@ static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3) if (++runs == 1 << CYCLES_WINDOW_SIZE) { cycles_sum >>= CYCLES_WINDOW_SIZE; - tr_info(&ll_tr, "ll core %u timer avg %u, max %u, overruns %u", + tr_info("ll core %u timer avg %u, max %u, overruns %u", core, cycles_sum, cycles_max, overruns); cycles_sum = 0; cycles_max = 0; @@ -159,7 +159,7 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, k_tid_t thread; k_spinlock_key_t key; - tr_dbg(&ll_tr, "zephyr_domain_register()"); + tr_dbg("zephyr_domain_register()"); /* domain work only needs registered once on each core */ if (dt->handler) @@ -201,7 +201,7 @@ static int zephyr_domain_register(struct ll_schedule_domain *domain, k_spin_unlock(&domain->lock, key); - tr_info(&ll_tr, "zephyr_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d period %d", + tr_info("zephyr_domain_register domain->type %d domain->clk %d domain->ticks_per_ms %d period %d", domain->type, domain->clk, domain->ticks_per_ms, (uint32_t)LL_TIMER_PERIOD_US); return 0; @@ -214,7 +214,7 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain, int core = cpu_get_id(); k_spinlock_key_t key; - tr_dbg(&ll_tr, "zephyr_domain_unregister()"); + tr_dbg("zephyr_domain_unregister()"); /* tasks still registered on this core */ if (num_tasks) @@ -234,7 +234,7 @@ static int zephyr_domain_unregister(struct ll_schedule_domain *domain, k_spin_unlock(&domain->lock, key); - tr_info(&ll_tr, "zephyr_domain_unregister domain->type %d domain->clk %d", + tr_info("zephyr_domain_unregister domain->type %d domain->clk %d", domain->type, domain->clk); /* @@ -251,7 +251,7 @@ static void zephyr_domain_block(struct ll_schedule_domain *domain) { struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); - tr_dbg(&ll_tr, "Blocking LL scheduler"); + tr_dbg("Blocking LL scheduler"); k_mutex_lock(&zephyr_domain->block_mutex, K_FOREVER); atomic_set(&zephyr_domain->block, 1); @@ -262,7 +262,7 @@ static void zephyr_domain_unblock(struct ll_schedule_domain *domain) { struct zephyr_domain *zephyr_domain = ll_sch_domain_get_pdata(domain); - tr_dbg(&ll_tr, "Unblocking LL scheduler"); + tr_dbg("Unblocking LL scheduler"); k_mutex_lock(&zephyr_domain->block_mutex, K_FOREVER); atomic_set(&zephyr_domain->block, 0); diff --git a/src/schedule/zephyr_dp_schedule.c b/src/schedule/zephyr_dp_schedule.c index 4e95692ed6cb..5d3a2a6af86f 100644 --- a/src/schedule/zephyr_dp_schedule.c +++ b/src/schedule/zephyr_dp_schedule.c @@ -393,7 +393,7 @@ static int scheduler_dp_task_shedule(void *data, struct task *task, uint64_t sta pdata->mod->dp_startup_delay = true; scheduler_dp_unlock(lock_key); - tr_dbg(&dp_tr, "DP task scheduled with period %u [us]", (uint32_t)period); + tr_dbg("DP task scheduled with period %u [us]", (uint32_t)period); return 0; } @@ -463,7 +463,7 @@ int scheduler_dp_task_init(struct task **task, task_memory = rzalloc(SOF_MEM_ZONE_RUNTIME_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*task_memory)); if (!task_memory) { - tr_err(&dp_tr, "zephyr_dp_task_init(): memory alloc failed"); + tr_err("zephyr_dp_task_init(): memory alloc failed"); return -ENOMEM; } @@ -472,7 +472,7 @@ int scheduler_dp_task_init(struct task **task, p_stack = (__sparse_force void __sparse_cache *) rballoc_align(0, SOF_MEM_CAPS_RAM, stack_size, Z_KERNEL_STACK_OBJ_ALIGN); if (!p_stack) { - tr_err(&dp_tr, "zephyr_dp_task_init(): stack alloc failed"); + tr_err("zephyr_dp_task_init(): stack alloc failed"); ret = -ENOMEM; goto err; } @@ -483,14 +483,14 @@ int scheduler_dp_task_init(struct task **task, task_priority, K_USER, K_FOREVER); if (!thread_id) { ret = -EFAULT; - tr_err(&dp_tr, "zephyr_dp_task_init(): zephyr thread create failed"); + tr_err("zephyr_dp_task_init(): zephyr thread create failed"); goto err; } /* pin the thread to specific core */ ret = k_thread_cpu_pin(thread_id, core); if (ret < 0) { ret = -EFAULT; - tr_err(&dp_tr, "zephyr_dp_task_init(): zephyr task pin to core failed"); + tr_err("zephyr_dp_task_init(): zephyr task pin to core failed"); goto err; } @@ -498,7 +498,7 @@ int scheduler_dp_task_init(struct task **task, ret = schedule_task_init(&task_memory->task, uid, SOF_SCHEDULE_DP, 0, ops->run, mod, core, 0); if (ret < 0) { - tr_err(&dp_tr, "zephyr_dp_task_init(): schedule_task_init failed"); + tr_err("zephyr_dp_task_init(): schedule_task_init failed"); goto err; } diff --git a/src/schedule/zephyr_ll.c b/src/schedule/zephyr_ll.c index 2069bb6585c4..8ba0a65ec2f3 100644 --- a/src/schedule/zephyr_ll.c +++ b/src/schedule/zephyr_ll.c @@ -62,7 +62,7 @@ static void zephyr_ll_task_done(struct zephyr_ll *sch, list_item_del(&task->list); if (!sch->n_tasks) { - tr_info(&ll_tr, "task count underrun!"); + tr_info("task count underrun!"); k_panic(); } @@ -75,8 +75,8 @@ static void zephyr_ll_task_done(struct zephyr_ll *sch, */ k_sem_give(&pdata->sem); - tr_info(&ll_tr, "task complete %p %pU", task, task->uid); - tr_info(&ll_tr, "num_tasks %d total_num_tasks %ld", + tr_info("task complete %p %pU", task, task->uid); + tr_info("num_tasks %d total_num_tasks %ld", sch->n_tasks, atomic_read(&sch->ll_domain->total_num_tasks)); /* @@ -214,8 +214,7 @@ static void zephyr_ll_run(void *data) state = do_task_run(task); if (state != SOF_TASK_STATE_COMPLETED && state != SOF_TASK_STATE_RESCHEDULE) { - tr_err(&ll_tr, - "zephyr_ll_run: invalid return state %u", + tr_err("zephyr_ll_run: invalid return state %u", state); state = SOF_TASK_STATE_RESCHEDULE; } @@ -271,7 +270,7 @@ static int zephyr_ll_task_schedule_common(struct zephyr_ll *sch, struct task *ta zephyr_ll_assert_core(sch); - tr_info(&ll_tr, "task add %p %pU priority %d flags 0x%x", task, task->uid, + tr_info("task add %p %pU priority %d flags 0x%x", task, task->uid, task->priority, task->flags); zephyr_ll_lock(sch, &flags); @@ -302,7 +301,7 @@ static int zephyr_ll_task_schedule_common(struct zephyr_ll *sch, struct task *ta * Remove after verification */ zephyr_ll_unlock(sch, &flags); - tr_warn(&ll_tr, "task %p (%pU) already scheduled", + tr_warn("task %p (%pU) already scheduled", task, task->uid); return 0; } @@ -328,7 +327,7 @@ static int zephyr_ll_task_schedule_common(struct zephyr_ll *sch, struct task *ta ret = domain_register(sch->ll_domain, task, &zephyr_ll_run, sch); if (ret < 0) - tr_err(&ll_tr, "zephyr_ll_task_schedule: cannot register domain %d", + tr_err("zephyr_ll_task_schedule: cannot register domain %d", ret); return 0; @@ -372,8 +371,7 @@ static int zephyr_ll_task_free(void *data, struct task *task) zephyr_ll_assert_core(sch); if (k_is_in_isr()) { - tr_err(&ll_tr, - "zephyr_ll_task_free: cannot free tasks from interrupt context!"); + tr_err("zephyr_ll_task_free: cannot free tasks from interrupt context!"); return -EDEADLK; } @@ -467,7 +465,7 @@ static void zephyr_ll_scheduler_free(void *data, uint32_t flags) zephyr_ll_assert_core(sch); if (sch->n_tasks) - tr_err(&ll_tr, "zephyr_ll_scheduler_free: %u tasks are still active!", + tr_err("zephyr_ll_scheduler_free: %u tasks are still active!", sch->n_tasks); } @@ -499,7 +497,7 @@ int zephyr_ll_task_init(struct task *task, pdata = rzalloc(SOF_MEM_ZONE_SYS_SHARED, 0, SOF_MEM_CAPS_RAM, sizeof(*pdata)); if (!pdata) { - tr_err(&ll_tr, "zephyr_ll_task_init(): alloc failed"); + tr_err("zephyr_ll_task_init(): alloc failed"); return -ENOMEM; } diff --git a/src/trace/dma-trace.c b/src/trace/dma-trace.c index 3155c9f72f1c..d31b2aa9da55 100644 --- a/src/trace/dma-trace.c +++ b/src/trace/dma-trace.c @@ -100,7 +100,7 @@ static enum task_state trace_work(void *data) size = dma_copy_to_host(&d->dc, config, d->posn.host_offset, buffer->r_ptr, size); if (size < 0) { - tr_err(&dt_tr, "trace_work(): dma_copy_to_host() failed"); + tr_err("trace_work(): dma_copy_to_host() failed"); goto out; } @@ -185,7 +185,7 @@ int dma_trace_init_complete(struct dma_trace_data *d) { int ret = 0; - tr_info(&dt_tr, "dma_trace_init_complete()"); + tr_info("dma_trace_init_complete()"); if (!d) { mtrace_printf(LOG_LEVEL_ERROR, @@ -355,8 +355,7 @@ static int dma_trace_buffer_init(struct dma_trace_data *d) SOF_ABI_VERSION, SOF_ABI_DBG_VERSION, SOF_SRC_HASH); /* Use a different, DMA: prefix to ease identification of log files */ - tr_info(&dt_tr, - "DMA: " SOF_BANNER_COMMON, + tr_info("DMA: " SOF_BANNER_COMMON, SOF_ABI_VERSION, SOF_ABI_DBG_VERSION, SOF_SRC_HASH); return 0; @@ -661,7 +660,7 @@ static void dtrace_add_event(const char *e, uint32_t length) * so after it we have to recalculate margin and * overflow */ - tr_err(&dt_tr, "dtrace_add_event(): number of dropped logs = %u", + tr_err("dtrace_add_event(): number of dropped logs = %u", tmp_dropped_entries); margin = dtrace_calc_buf_margin(buffer); overflow = dtrace_calc_buf_overflow(buffer, length); diff --git a/src/trace/trace.c b/src/trace/trace.c index ff48bd21e057..0bdbe82071fa 100644 --- a/src/trace/trace.c +++ b/src/trace/trace.c @@ -303,7 +303,7 @@ struct sof_ipc_trace_filter_elem *trace_filter_fill(struct sof_ipc_trace_filter_ filter->pipe_id = elem->value; break; default: - tr_err(&ipc_tr, "Invalid SOF_IPC_TRACE_FILTER_ELEM 0x%x", + tr_err("Invalid SOF_IPC_TRACE_FILTER_ELEM 0x%x", elem->key); return NULL; } @@ -311,7 +311,7 @@ struct sof_ipc_trace_filter_elem *trace_filter_fill(struct sof_ipc_trace_filter_ /* each filter set must be terminated with FIN flag and have new log level */ if (elem->key & SOF_IPC_TRACE_FILTER_ELEM_FIN) { if (filter->log_level < 0) { - tr_err(&ipc_tr, "Each trace filter set must specify new log level"); + tr_err("Each trace filter set must specify new log level"); return NULL; } else { return elem + 1; @@ -321,7 +321,7 @@ struct sof_ipc_trace_filter_elem *trace_filter_fill(struct sof_ipc_trace_filter_ ++elem; } - tr_err(&ipc_tr, "Trace filter elements set is not properly terminated"); + tr_err("Trace filter elements set is not properly terminated"); return NULL; } @@ -370,7 +370,7 @@ static struct tr_ctx *trace_filter_ipc_comp_context(struct ipc_comp_dev *icd) return &icd->pipeline->tctx; /* each COMP_TYPE must be specified */ default: - tr_err(&ipc_tr, "Unknown trace context for ipc component type 0x%X", + tr_err("Unknown trace context for ipc component type 0x%X", icd->type); return NULL; } @@ -413,7 +413,7 @@ int trace_filter_update(const struct trace_filter *filter) if (!trace->user_filter_override) { trace->user_filter_override = true; - tr_info(&ipc_tr, "Adaptive filtering disabled by user"); + tr_info("Adaptive filtering disabled by user"); } #endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */ diff --git a/xtos/include/rtos/spinlock.h b/xtos/include/rtos/spinlock.h index 71faa503ccb1..aaf256dc1022 100644 --- a/xtos/include/rtos/spinlock.h +++ b/xtos/include/rtos/spinlock.h @@ -77,8 +77,6 @@ typedef uint32_t k_spinlock_key_t; extern uint32_t lock_dbg_atomic; extern uint32_t lock_dbg_user[DBG_LOCK_USERS]; -extern struct tr_ctx sl_tr; - /* panic on deadlock */ #define spin_try_lock_dbg(lock, line) \ do { \ @@ -115,14 +113,14 @@ extern struct tr_ctx sl_tr; #define spin_lock_dbg(line) \ do { \ - tr_info(&sl_tr, "LcE"); \ - tr_info(&sl_tr, "line: %d", line); \ + tr_info("LcE"); \ + tr_info("line: %d", line); \ } while (0) #define spin_unlock_dbg(line) \ do { \ - tr_info(&sl_tr, "LcX"); \ - tr_info(&sl_tr, "line: %d", line); \ + tr_info("LcX"); \ + tr_info("line: %d", line); \ } while (0) #else /* CONFIG_DEBUG_LOCKS_VERBOSE */ diff --git a/xtos/include/rtos/wait.h b/xtos/include/rtos/wait.h index d222bd5ca7bd..fefb2c9930d4 100644 --- a/xtos/include/rtos/wait.h +++ b/xtos/include/rtos/wait.h @@ -29,13 +29,13 @@ static inline void wait_for_interrupt(int level) { LOG_MODULE_DECLARE(wait, CONFIG_SOF_LOG_LEVEL); - tr_dbg(&wait_tr, "WFE"); + tr_dbg("WFE"); #if CONFIG_DEBUG_LOCKS if (lock_dbg_atomic) tr_err_atomic("atm"); #endif platform_wait_for_interrupt(level); - tr_dbg(&wait_tr, "WFX"); + tr_dbg("WFX"); } /** diff --git a/xtos/include/sof/lib/perf_cnt.h b/xtos/include/sof/lib/perf_cnt.h index 1bae17e14480..a4af636f4240 100644 --- a/xtos/include/sof/lib/perf_cnt.h +++ b/xtos/include/sof/lib/perf_cnt.h @@ -40,7 +40,7 @@ struct perf_cnt_data { #if CONFIG_PERFORMANCE_COUNTERS #define perf_cnt_trace(ctx, pcd) \ - tr_info(ctx, "perf plat last %u peak %u cpu last %u, peak %u", \ + tr_info("perf plat last %u peak %u cpu last %u, peak %u", \ (uint32_t)((pcd)->plat_delta_last), \ (uint32_t)((pcd)->plat_delta_peak), \ (uint32_t)((pcd)->cpu_delta_last), \ @@ -88,7 +88,7 @@ struct perf_cnt_data { /* perf measurement windows size 2^x */ #define PERF_CNT_CHECK_WINDOW_SIZE 10 #define task_perf_avg_info(pcd, task_p, class) \ - tr_info(task_p, "perf_cycle task %p, %pU cpu avg %u peak %u",\ + tr_info("perf_cycle task %p, %pU cpu avg %u peak %u",\ class, (class)->uid, \ (uint32_t)((pcd)->cpu_delta_sum), \ (uint32_t)((pcd)->cpu_delta_peak)) diff --git a/zephyr/include/rtos/interrupt.h b/zephyr/include/rtos/interrupt.h index b9559c89b431..53cd2191fa3a 100644 --- a/zephyr/include/rtos/interrupt.h +++ b/zephyr/include/rtos/interrupt.h @@ -33,8 +33,7 @@ static inline int interrupt_register(uint32_t irq, void(*handler)(void *arg), vo return arch_irq_connect_dynamic(irq, 0, (void (*)(const void *))handler, arg, 0); #else - tr_err(&zephyr_tr, "Cannot register handler for IRQ %u: dynamic IRQs are disabled", - irq); + tr_err("Cannot register handler for IRQ %u: dynamic IRQs are disabled", irq); return -EOPNOTSUPP; #endif } diff --git a/zephyr/lib/alloc.c b/zephyr/lib/alloc.c index 7a395397fe88..6cf6e2099a9a 100644 --- a/zephyr/lib/alloc.c +++ b/zephyr/lib/alloc.c @@ -33,8 +33,6 @@ LOG_MODULE_REGISTER(mem_allocator, CONFIG_SOF_LOG_LEVEL); -extern struct tr_ctx zephyr_tr; - /* * Memory - Create Zephyr HEAP for SOF. * @@ -157,7 +155,7 @@ static void *l3_heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t by struct sys_memory_stats stats; #endif if (!cpu_is_primary(arch_proc_id())) { - tr_err(&zephyr_tr, "L3_HEAP available only for primary core!"); + tr_err("L3_HEAP available only for primary core!"); return NULL; } @@ -167,7 +165,7 @@ static void *l3_heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t by #if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4 sys_heap_runtime_stats_get(&h->heap, &stats); - tr_info(&zephyr_tr, "heap allocated: %u free: %u max allocated: %u", + tr_info("heap allocated: %u free: %u max allocated: %u", stats.allocated_bytes, stats.free_bytes, stats.max_allocated_bytes); #endif @@ -177,7 +175,7 @@ static void *l3_heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t by static void l3_heap_free(struct k_heap *h, void *mem) { if (!cpu_is_primary(arch_proc_id())) { - tr_err(&zephyr_tr, "L3_HEAP available only for primary core!"); + tr_err("L3_HEAP available only for primary core!"); return; } @@ -203,7 +201,7 @@ static void *heap_alloc_aligned(struct k_heap *h, size_t min_align, size_t bytes #if CONFIG_SYS_HEAP_RUNTIME_STATS && CONFIG_IPC_MAJOR_4 sys_heap_runtime_stats_get(&h->heap, &stats); - tr_info(&zephyr_tr, "heap allocated: %u free: %u max allocated: %u", + tr_info("heap allocated: %u free: %u max allocated: %u", stats.allocated_bytes, stats.free_bytes, stats.max_allocated_bytes); #endif @@ -287,7 +285,7 @@ void *rmalloc(enum mem_zone zone, uint32_t flags, uint32_t caps, size_t bytes) heap = &l3_heap; /* Uncached L3_HEAP should be not used */ if (!zone_is_cached(zone)) { - tr_err(&zephyr_tr, "L3_HEAP available for cached zones only!"); + tr_err("L3_HEAP available for cached zones only!"); return NULL; } ptr = (__sparse_force void *)l3_heap_alloc_aligned(heap, 0, bytes); @@ -334,7 +332,7 @@ void *rbrealloc_align(void *ptr, uint32_t flags, uint32_t caps, size_t bytes, /* Original version returns NULL without freeing this memory */ if (!bytes) { /* TODO: Should we call rfree(ptr); */ - tr_err(&zephyr_tr, "realloc failed for 0 bytes"); + tr_err("realloc failed for 0 bytes"); return NULL; } @@ -347,7 +345,7 @@ void *rbrealloc_align(void *ptr, uint32_t flags, uint32_t caps, size_t bytes, rfree(ptr); - tr_info(&zephyr_tr, "rbealloc: new ptr %p", new_ptr); + tr_info("rbealloc: new ptr %p", new_ptr); return new_ptr; } @@ -388,7 +386,7 @@ void *rballoc_align(uint32_t flags, uint32_t caps, size_t bytes, heap = &l3_heap; return (__sparse_force void *)l3_heap_alloc_aligned(heap, align, bytes); #else - tr_err(&zephyr_tr, "L3_HEAP not available."); + tr_err("L3_HEAP not available."); return NULL; #endif } else { diff --git a/zephyr/lib/cpu.c b/zephyr/lib/cpu.c index 87ca92b9d36f..488678086dc6 100644 --- a/zephyr/lib/cpu.c +++ b/zephyr/lib/cpu.c @@ -66,8 +66,6 @@ static FUNC_NORETURN void secondary_init(void *arg) LOG_MODULE_DECLARE(zephyr, CONFIG_SOF_LOG_LEVEL); -extern struct tr_ctx zephyr_tr; - /* address where zephyr PM will save memory during D3 transition */ #ifdef CONFIG_ADSP_IMR_CONTEXT_SAVE extern void *global_imr_ram_storage; @@ -102,7 +100,7 @@ void cpu_notify_state_entry(enum pm_state state) /* If no IMR buffer we can not recover */ if (!global_imr_ram_storage) { - tr_err(&zephyr_tr, "failed to allocate global_imr_ram_storage"); + tr_err("failed to allocate global_imr_ram_storage"); k_panic(); } @@ -178,13 +176,13 @@ void cpu_disable_core(int id) __ASSERT_NO_MSG(cpu_is_primary(arch_proc_id())); if (!arch_cpu_active(id)) { - tr_warn(&zephyr_tr, "core %d is already disabled", id); + tr_warn("core %d is already disabled", id); return; } #if defined(CONFIG_PM) /* TODO: before requesting core shut down check if it's not actively used */ if (!pm_state_force(id, &(struct pm_state_info){PM_STATE_SOFT_OFF, 0, 0})) { - tr_err(&zephyr_tr, "failed to set PM_STATE_SOFT_OFF on core %d", id); + tr_err("failed to set PM_STATE_SOFT_OFF on core %d", id); return; } @@ -203,12 +201,12 @@ void cpu_disable_core(int id) idelay(PLATFORM_DEFAULT_DELAY); if (arch_cpu_active(id)) { - tr_err(&zephyr_tr, "core %d did not enter idle state", id); + tr_err("core %d did not enter idle state", id); return; } if (soc_adsp_halt_cpu(id) != 0) - tr_err(&zephyr_tr, "failed to disable core %d", id); + tr_err("failed to disable core %d", id); #endif /* CONFIG_PM */ } diff --git a/zephyr/lib/pm_runtime.c b/zephyr/lib/pm_runtime.c index 99a23c549d30..5c583c1ca74d 100644 --- a/zephyr/lib/pm_runtime.c +++ b/zephyr/lib/pm_runtime.c @@ -59,7 +59,7 @@ const struct pm_state_info *pm_policy_next_state(uint8_t cpu, int32_t ticks) /* TODO: PM_STATE_RUNTIME_IDLE requires substates to be defined * to handle case with enabled PG andf disabled CG. */ - tr_dbg(&power_tr, "transition to state %x (min_residency = %u, exit_latency = %u)", + tr_dbg("transition to state %x (min_residency = %u, exit_latency = %u)", state->state, min_residency, exit_latency); return state; } @@ -74,7 +74,7 @@ void platform_pm_runtime_enable(uint32_t context, uint32_t index) switch (context) { case PM_RUNTIME_DSP: pm_policy_state_lock_put(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES); - tr_dbg(&power_tr, "removing prevent on d0i3 (lock is active=%d)", + tr_dbg("removing prevent on d0i3 (lock is active=%d)", pm_policy_state_lock_is_active(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES)); break; default: @@ -86,7 +86,7 @@ void platform_pm_runtime_disable(uint32_t context, uint32_t index) { switch (context) { case PM_RUNTIME_DSP: - tr_dbg(&power_tr, "putting prevent on d0i3"); + tr_dbg("putting prevent on d0i3"); pm_policy_state_lock_get(PM_STATE_RUNTIME_IDLE, PM_ALL_SUBSTATES); break; default: diff --git a/zephyr/wrapper.c b/zephyr/wrapper.c index e2fed0558a43..1616370c6e39 100644 --- a/zephyr/wrapper.c +++ b/zephyr/wrapper.c @@ -62,8 +62,7 @@ int interrupt_register(uint32_t irq, void(*handler)(void *arg), void *arg) return arch_irq_connect_dynamic(irq, 0, (void (*)(const void *))handler, arg, 0); #else - tr_err(&zephyr_tr, "Cannot register handler for IRQ %u: dynamic IRQs are disabled", - irq); + tr_err("Cannot register handler for IRQ %u: dynamic IRQs are disabled", irq); return -EOPNOTSUPP; #endif } From e89eafcd4de3b17b1c197b133b1979a2dabbd039 Mon Sep 17 00:00:00 2001 From: Baofeng Tian Date: Mon, 19 Feb 2024 10:26:25 +0800 Subject: [PATCH 13/13] sof-logger: remove component name print for sof logger Due to previous change, there is no valid component uuid for sof logger, so component name loopup will return unknown, remove this part. Signed-off-by: Baofeng Tian --- tools/logger/convert.c | 25 ++----------------------- 1 file changed, 2 insertions(+), 23 deletions(-) diff --git a/tools/logger/convert.c b/tools/logger/convert.c index 5571706439c4..6b3fd9770171 100644 --- a/tools/logger/convert.c +++ b/tools/logger/convert.c @@ -406,25 +406,6 @@ static const char *get_level_name(uint32_t level) } } -static const char *get_component_name(uint32_t trace_class, uint32_t uid_ptr) -{ - const struct snd_sof_uids_header *uids_dict = global_config->uids_dict; - const struct sof_uuid_entry *uid_entry; - - /* if uid_ptr is non-zero, find name in the ldc file */ - if (uid_ptr) { - if (uid_ptr < uids_dict->base_address || - uid_ptr >= uids_dict->base_address + - uids_dict->data_length) - return ""; - uid_entry = get_uuid_entry(uid_ptr); - return uid_entry->name; - } - - /* do not resolve legacy (deprecated) trace class name */ - return "unknown"; -} - /* remove superfluous leading file path and shrink to last 20 chars */ static char *format_file_name(char *file_name_raw, int full_name) { @@ -512,12 +493,11 @@ static void print_entry_params(const struct log_entry_header *dma_log, ids[0] = '\0'; if (raw_output) { /* "raw" means script-friendly (not all hex) */ - fprintf(out_fd, "%s%u %u %s%s%s ", + fprintf(out_fd, "%s%u %u %s%s ", entry->header.level == use_colors ? (LOG_LEVEL_CRITICAL ? KRED : KNRM) : "", dma_log->core_id, entry->header.level, - get_component_name(entry->header.component_class, 0), raw_output && strlen(ids) ? "-" : "", ids); @@ -546,9 +526,8 @@ static void print_entry_params(const struct log_entry_header *dma_log, fprintf(out_fd, "c%d ", dma_log->core_id); /* component name and id */ - fprintf(out_fd, "%s%-12s %-5s%s ", + fprintf(out_fd, "%s %-5s%s ", use_colors ? KYEL : "", - get_component_name(entry->header.component_class, 0), ids, use_colors ? KNRM : "");