Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion src/include/sof/lib/perf_cnt.h
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,8 @@ struct perf_cnt_data {
*
* If current arch delta exceeds the previous peak value, trace_m is run.
* \param pcd Performance counters data.
* \param trace_m Trace macro trace_m(pcd, arg).
* \param trace_m Trace function trace_m(pcd, arg) or trace macro if a
* more precise line number is desired in the logs.
* \param arg Argument passed to trace_m as arg.
*/
#define perf_cnt_stamp(pcd, trace_m, arg) do { \
Expand Down
16 changes: 10 additions & 6 deletions src/lib/agent.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,15 +44,19 @@ DECLARE_SOF_UUID("sa", sa_uuid, 0x5276b491, 0x5b64, 0x464e,

DECLARE_TR_CTX(sa_tr, SOF_UUID(sa_uuid), LOG_LEVEL_INFO);

#define perf_sa_trace(pcd, sa) \
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Using a macro preserve __func__ and __LINE__. Can you check the output is correct as before with this change ?

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I considered this. It is still possible to use macros, I did not remove that feature. I even made that clearer, see the very first hunk in perf_cnt.h. However I replaced these two specific macros because each is local to a single .c file and invoked only once in that .c file. So in these two particular cases __LINE__ would be completely useless because git grep is more than enough for these two.

BTW we can't use __func__ because it's not compatible with the dictionary. __func__ is not a static string that can be concatenated and only 32bits parameters are supported.

tr_info(&sa_tr, "perf sys_load peak plat %u cpu %u", \
(uint32_t)((pcd)->plat_delta_peak), \
(uint32_t)((pcd)->cpu_delta_peak))

/* c63c4e75-8f61-4420-9319-1395932efa9e */
DECLARE_SOF_UUID("agent-work", agent_work_task_uuid, 0xc63c4e75, 0x8f61, 0x4420,
0x93, 0x19, 0x13, 0x95, 0x93, 0x2e, 0xfa, 0x9e);

#if CONFIG_PERFORMANCE_COUNTERS
static void perf_sa_trace(struct perf_cnt_data *pcd, int ignored)
{
tr_info(&sa_tr, "perf sys_load peak plat %u cpu %u",
(uint32_t)((pcd)->plat_delta_peak),
(uint32_t)((pcd)->cpu_delta_peak));
}
#endif

static enum task_state validate(void *data)
{
struct sa *sa = data;
Expand All @@ -62,7 +66,7 @@ static enum task_state validate(void *data)
current = platform_timer_get(timer_get());
delta = current - sa->last_check;

perf_cnt_stamp(&sa->pcd, perf_sa_trace, sa);
perf_cnt_stamp(&sa->pcd, perf_sa_trace, 0 /* ignored */);

#if CONFIG_AGENT_PANIC_ON_DELAY
/* panic timeout */
Expand Down
14 changes: 9 additions & 5 deletions src/schedule/ll_schedule.c
Original file line number Diff line number Diff line change
Expand Up @@ -76,10 +76,14 @@ struct ll_schedule_data {

static const struct scheduler_ops schedule_ll_ops;

#define perf_ll_sched_trace(pcd, ll_sched) \
tr_info(&ll_tr, "perf ll_work peak plat %u cpu %u", \
(uint32_t)((pcd)->plat_delta_peak), \
(uint32_t)((pcd)->cpu_delta_peak))
#if CONFIG_PERFORMANCE_COUNTERS
static void perf_ll_sched_trace(struct perf_cnt_data *pcd, int ignored)
{
tr_info(&ll_tr, "perf ll_work peak plat %u cpu %u",
(uint32_t)((pcd)->plat_delta_peak),
(uint32_t)((pcd)->cpu_delta_peak));
}
#endif

static bool schedule_ll_is_pending(struct ll_schedule_data *sch)
{
Expand Down Expand Up @@ -265,7 +269,7 @@ static void schedule_ll_tasks_run(void *data)
notifier_event(sch, NOTIFIER_ID_LL_POST_RUN,
NOTIFIER_TARGET_CORE_LOCAL, NULL, 0);

perf_cnt_stamp(&sch->pcd, perf_ll_sched_trace, sch);
perf_cnt_stamp(&sch->pcd, perf_ll_sched_trace, 0 /* ignored */);

spin_lock(&domain->lock);

Expand Down
24 changes: 15 additions & 9 deletions src/trace/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -37,33 +37,38 @@ config TRACE_FILTERING
depends on TRACE
default y
help
Filtering of trace messages based on their verbosity level and frequency.
Filtering of trace messages based on their verbosity level and/or frequency.

config TRACE_FILTERING_VERBOSITY
bool "Filter by verbosity"
depends on TRACE_FILTERING
default y
help
Filtering by log verbosity level, where maximum verbosity allowed is specified for each
context and may be adjusted in runtime.
context and may be adjusted in runtime. Most basic feature found in every logger.

config TRACE_FILTERING_ADAPTIVE
bool "Adaptive rate limiting"
depends on TRACE_FILTERING
default y
help
Adaptive filtering of trace messages, tracking up to CONFIG_TRACE_RECENT_ENTRIES_COUNT,
suppressing all repeated messages for up to CONFIG_TRACE_RECENT_TIME_THRESHOLD cycles.
Adaptive filtering of repeating trace messages, tracking up to
CONFIG_TRACE_RECENT_ENTRIES_COUNT and suppressing messages repeated in less than
CONFIG_TRACE_RECENT_TIME_THRESHOLD microseconds. This filtering and suppression are
turned off as soon as the user makes any runtime configuration change using the
sof-logger -F flag. Warning: log message parameters (formatted with a conversion
specifier %_) are ignored when comparing repeated messages and they are dropped by
this filtering.

config TRACE_RECENT_ENTRIES_COUNT
int "Amount of entries considered recent"
int "How many recent log messages are stored"
depends on TRACE_FILTERING_ADAPTIVE
default 5
help
Recent entries are used to determine whether currently processed message was sent in the
past. Size of that filtering window affects effectiveness and performance. More recent
entries allow to better filter repetitive messeges out, but also slightly decrease
performance due to increased number of comparisions necessary.
The most recent log messages are stored to match the currently processed
message. The size of that window affects effectiveness and performance. More recent
entries allow to better filter repetitive messages out, but also slightly decrease
performance due to the increased number of comparisons necessary.

config TRACE_RECENT_TIME_THRESHOLD
int "Period of time considered recent (microseconds)"
Expand All @@ -72,6 +77,7 @@ config TRACE_RECENT_TIME_THRESHOLD
range 1 TRACE_RECENT_MAX_TIME
help
Period of time during which entries are tracked and will be suppressed if reported again.
Entries expire after this time.

config TRACE_RECENT_MAX_TIME
int "Maximum period of time that message can be suppressed (microseconds)"
Expand Down
43 changes: 26 additions & 17 deletions src/trace/trace.c
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,8 @@ static inline bool trace_filter_verbosity(uint32_t lvl, const struct tr_ctx *ctx
#endif /* CONFIG_TRACE_FILTERING_VERBOSITY */

#if CONFIG_TRACE_FILTERING_ADAPTIVE
static void emit_recent_entry(struct recent_log_entry *entry)
/** Report how many times an entry was suppressed and clear it. */
static void emit_suppressed_entry(struct recent_log_entry *entry)
{
_log_message(trace_log_unfiltered, false, LOG_LEVEL_INFO, _TRACE_INV_CLASS, &dt_tr,
_TRACE_INV_ID, _TRACE_INV_ID, "Suppressed %u similar messages: %pQ",
Expand All @@ -139,6 +140,9 @@ static void emit_recent_entry(struct recent_log_entry *entry)
memset(entry, 0, sizeof(*entry));
}

/** Flush entries that have not been seen again in the last
* CONFIG_TRACE_RECENT_TIME_THRESHOLD microseconds before current_ts.
*/
static void emit_recent_entries(uint64_t current_ts)
{
struct trace *trace = trace_get();
Expand All @@ -152,7 +156,7 @@ static void emit_recent_entries(uint64_t current_ts)
if (current_ts - recent_entries[i].message_ts >
CONFIG_TRACE_RECENT_TIME_THRESHOLD) {
if (recent_entries[i].trigger_count > CONFIG_TRACE_BURST_COUNT)
emit_recent_entry(&recent_entries[i]);
emit_suppressed_entry(&recent_entries[i]);
else
memset(&recent_entries[i], 0, sizeof(recent_entries[i]));
}
Expand All @@ -166,7 +170,7 @@ static void emit_recent_entries(uint64_t current_ts)
* \param log_level log verbosity level
* \param entry ID of log entry
* \param message_ts message timestamp
* \return false when trace is filtered out, true otherwise
* \return true when the message must be printed by the caller because it was not filtered
*/
static bool trace_filter_flood(uint32_t log_level, uint32_t entry, uint64_t message_ts)
{
Expand All @@ -175,44 +179,50 @@ static bool trace_filter_flood(uint32_t log_level, uint32_t entry, uint64_t mess
trace->trace_core_context[cpu_get_id()].recent_entries;
struct recent_log_entry *oldest_entry = recent_entries;
int i;
bool ret;

/* don't attempt to suppress debug messages using this method, it would be uneffective */
if (log_level >= LOG_LEVEL_DEBUG)
return true;

/* check if same log entry was sent recently */
for (i = 0; i < CONFIG_TRACE_RECENT_ENTRIES_COUNT; i++) {
/* in the meantime identify the oldest or an empty entry, for reuse later */
/* Identify the oldest (or empty) entry to evict if we have no match */
if (oldest_entry->first_suppression_ts > recent_entries[i].first_suppression_ts)
oldest_entry = &recent_entries[i];

if (recent_entries[i].entry_id == entry) {
/* We have a match but include this message in this burst only if the
* burst:
- 1. hasn't lasted for too long;
- 2. hasn't been quiet for too long.
*/
if (message_ts - recent_entries[i].first_suppression_ts <
CONFIG_TRACE_RECENT_MAX_TIME &&
message_ts - recent_entries[i].message_ts <
CONFIG_TRACE_RECENT_TIME_THRESHOLD) {
recent_entries[i].trigger_count++;
/* refresh suppression timer */
/* Refresh last seen time */
recent_entries[i].message_ts = message_ts;

ret = recent_entries[i].trigger_count <= CONFIG_TRACE_BURST_COUNT;

return ret;
/* Allow the start of a burst to be printed normally */
return recent_entries[i].trigger_count <= CONFIG_TRACE_BURST_COUNT;
}

/* Emit and clear this burst */
if (recent_entries[i].trigger_count > CONFIG_TRACE_BURST_COUNT)
emit_recent_entry(&recent_entries[i]);
emit_suppressed_entry(&recent_entries[i]);
else
memset(&recent_entries[i], 0, sizeof(recent_entries[i]));

return true;
}
}

/* Make room for tracking new entry, by emitting the oldest one in the filter */
if (oldest_entry->entry_id && oldest_entry->trigger_count > CONFIG_TRACE_BURST_COUNT)
emit_recent_entry(oldest_entry);
emit_suppressed_entry(oldest_entry);

/* Start a new burst */
oldest_entry->entry_id = entry;
oldest_entry->message_ts = message_ts;
oldest_entry->first_suppression_ts = message_ts;
Expand Down Expand Up @@ -265,10 +275,6 @@ void trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr
{
struct trace *trace = trace_get();

#if CONFIG_TRACE_FILTERING_ADAPTIVE
uint64_t current_ts;
#endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */

if (!trace->enable) {
return;
}
Expand All @@ -280,7 +286,7 @@ void trace_log_filtered(bool send_atomic, const void *log_entry, const struct tr

#if CONFIG_TRACE_FILTERING_ADAPTIVE
if (!trace->user_filter_override) {
current_ts = platform_safe_get_time(timer_get());
const uint64_t current_ts = platform_safe_get_time(timer_get());

emit_recent_entries(current_ts);

Expand Down Expand Up @@ -424,7 +430,10 @@ int trace_filter_update(const struct trace_filter *filter)
#if CONFIG_TRACE_FILTERING_ADAPTIVE
struct trace *trace = trace_get();

trace->user_filter_override = true;
if (!trace->user_filter_override) {
trace->user_filter_override = true;
tr_info(&ipc_tr, "Adaptive filtering disabled by user");
}
#endif /* CONFIG_TRACE_FILTERING_ADAPTIVE */

/* validate log level, LOG_LEVEL_CRITICAL has low value, LOG_LEVEL_VERBOSE high */
Expand Down