diff --git a/src/schedule/Kconfig b/src/schedule/Kconfig index 18867f3abdf2..40e077101686 100644 --- a/src/schedule/Kconfig +++ b/src/schedule/Kconfig @@ -11,3 +11,32 @@ config SCHEDULE_DMA_MULTI_CHANNEL default n help Enable multi-channel DMA scheduler + +config SCHEDULE_LL_STATS_LOG + bool "Log low-latency scheduler statistics" + default y + help + Log statistics from low-latency scheduler. This is a low overhead + mechanism to gather average and worst-case execution times of + the low-latency scheduler invocations. A report is printed to + logging subsystem (rate defined via SCHEDULE_LL_STATS_LOG_WINDOW_SIZE). + +config SCHEDULE_LL_STATS_LOG_EVERY_OTHER_WINDOW + bool "Log only every other low-latency report" + depends on SCHEDULE_LL_STATS_LOG + help + Output statistics for every other statistics gathering window. + This is useful to filter out impact of the reporting itself. With many + logging implementations, the first iteration has a spike in + execution caused by logging out results for the previous statistics + window. By skipping every other window, the reporting overhead + can be excluded. + +config SCHEDULE_LL_STATS_LOG_WINDOW_SIZE + int "Low latency statistics window size" + default 10 + depends on SCHEDULE_LL_STATS_LOG + help + Size of the statistics window as a power of two. The window size + setting also impacts the rate of reporting. With 1ms scheduler tick, + default of 10 results in 1024msec window size. diff --git a/src/schedule/zephyr_domain.c b/src/schedule/zephyr_domain.c index ff5dbc018921..6413976e6017 100644 --- a/src/schedule/zephyr_domain.c +++ b/src/schedule/zephyr_domain.c @@ -58,21 +58,38 @@ struct zephyr_domain { }; /* perf measurement windows size 2^x */ -#define CYCLES_WINDOW_SIZE 10 +#define CYCLES_WINDOW_SIZE CONFIG_SCHEDULE_LL_STATS_LOG_WINDOW_SIZE + +#ifdef CONFIG_SCHEDULE_LL_STATS_LOG +static inline void stats_report(unsigned int runs, int core, unsigned int cycles_sum, + unsigned int cycles_max, unsigned int overruns) +{ +#ifdef CONFIG_SCHEDULE_LL_STATS_LOG_EVERY_OTHER_WINDOW + if (runs & BIT(CYCLES_WINDOW_SIZE)) + return; +#endif + tr_info(&ll_tr, "ll core %u timer avg %u, max %u, overruns %u", + core, cycles_sum, cycles_max, overruns); +} +#endif /* CONFIG_SCHEDULE_LL_STATS_LOG */ static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3) { struct zephyr_domain *zephyr_domain = p1; int core = cpu_get_id(); struct zephyr_domain_thread *dt = zephyr_domain->domain_thread + core; +#ifdef CONFIG_SCHEDULE_LL_STATS_LOG unsigned int runs = 0, overruns = 0, cycles_sum = 0, cycles_max = 0; unsigned int cycles0, cycles1, diff, timer_fired; +#endif for (;;) { /* immediately go to sleep, waiting to be woken up by the timer */ k_sem_take(&dt->sem, K_FOREVER); +#ifdef CONFIG_SCHEDULE_LL_STATS_LOG cycles0 = k_cycle_get_32(); +#endif #if CONFIG_CROSS_CORE_STREAM /* @@ -95,6 +112,8 @@ static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3) #endif dt->handler(dt->arg); + +#ifdef CONFIG_SCHEDULE_LL_STATS_LOG cycles1 = k_cycle_get_32(); /* This handles wrapping correctly too */ @@ -107,15 +126,14 @@ static void zephyr_domain_thread_fn(void *p1, void *p2, void *p3) cycles_sum += diff; cycles_max = diff > cycles_max ? diff : cycles_max; - if (++runs == 1 << CYCLES_WINDOW_SIZE) { + if (!(++runs & MASK(CYCLES_WINDOW_SIZE - 1, 0))) { cycles_sum >>= CYCLES_WINDOW_SIZE; - tr_info(&ll_tr, "ll core %u timer avg %u, max %u, overruns %u", - core, cycles_sum, cycles_max, overruns); + stats_report(runs, core, cycles_sum, cycles_max, overruns); cycles_sum = 0; cycles_max = 0; - runs = 0; overruns = 0; } +#endif /* CONFIG_SCHEDULE_LL_STATS_LOG */ /* Feed the watchdog */ watchdog_feed(core);