diff --git a/arch/x86/core/intel64/cpu.c b/arch/x86/core/intel64/cpu.c index 16d39ea1f1a3b5..2a845960b87b91 100644 --- a/arch/x86/core/intel64/cpu.c +++ b/arch/x86/core/intel64/cpu.c @@ -208,4 +208,6 @@ FUNC_NORETURN void z_x86_cpu_init(struct x86_cpuboot *cpuboot) /* Enter kernel, never return */ cpuboot->ready++; cpuboot->fn(cpuboot->arg); + + CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ } diff --git a/boards/x86/qemu_x86/qemu_x86_64.dts b/boards/x86/qemu_x86/qemu_x86_64.dts index 83bf2c7fd0f2a7..fc2521047735f3 100644 --- a/boards/x86/qemu_x86/qemu_x86_64.dts +++ b/boards/x86/qemu_x86/qemu_x86_64.dts @@ -5,6 +5,17 @@ #include "qemu_x86.dts" +/ { + cpus { + cpu@1 { + device_type = "cpu"; + compatible = "intel,x86"; + d-cache-line-size = <64>; + reg = <1>; + }; + }; +}; + &pcie0 { smbus0: smbus0 { compatible = "intel,pch-smbus"; diff --git a/boards/x86/qemu_x86/qemu_x86_64_nokpti.dts b/boards/x86/qemu_x86/qemu_x86_64_nokpti.dts index 92522ab8a4ea1c..8a5f2d51154585 100644 --- a/boards/x86/qemu_x86/qemu_x86_64_nokpti.dts +++ b/boards/x86/qemu_x86/qemu_x86_64_nokpti.dts @@ -4,3 +4,14 @@ */ #include "qemu_x86.dts" + +/ { + cpus { + cpu@1 { + device_type = "cpu"; + compatible = "intel,x86"; + d-cache-line-size = <64>; + reg = <1>; + }; + }; +}; diff --git a/include/zephyr/kernel/internal/smp.h b/include/zephyr/kernel/internal/smp.h index d4f70d3c07b6dc..e2b3ae15aec2a3 100644 --- a/include/zephyr/kernel/internal/smp.h +++ b/include/zephyr/kernel/internal/smp.h @@ -6,18 +6,6 @@ #ifndef ZEPHYR_INCLUDE_KERNEL_INTERNAL_SMP_H_ #define ZEPHYR_INCLUDE_KERNEL_INTERNAL_SMP_H_ -struct k_thread; - -/** - * @internal - */ -#ifdef CONFIG_SOF -void z_smp_thread_init(void *arg, struct k_thread *thread); -void z_smp_thread_swap(void); -#endif - -void z_init_cpu(int id); void z_sched_ipi(void); -void z_smp_start_cpu(int id); #endif diff --git a/include/zephyr/kernel/smp.h b/include/zephyr/kernel/smp.h new file mode 100644 index 00000000000000..883f4820a749f1 --- /dev/null +++ b/include/zephyr/kernel/smp.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2023 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef ZEPHYR_INCLUDE_KERNEL_SMP_H_ +#define ZEPHYR_INCLUDE_KERNEL_SMP_H_ + +#include + +typedef void (*smp_init_fn)(void *arg); + +/** + * @brief Start a CPU. + * + * This routine is used to manually start the CPU specified + * by @a id. It may be called to restart a CPU that had been + * stopped or powered down, as well as some other scenario. + * After the CPU has finished initialization, the CPU will be + * ready to participate in thread scheduling and execution. + * + * @note This function must not be used on currently running + * CPU. The target CPU must be in off state, or in + * certain architectural state(s) where the CPU is + * permitted to go through the power up process. + * Detection of such state(s) must be provided by + * the platform layers. + * + * @note This initializes per-CPU kernel structs and also + * initializes timers needed for MP operations. + * Use @ref k_smp_cpu_resume if these are not + * desired. + * + * @param id ID of target CPU. + * @param fn Function to be called before letting scheduler + * run. + * @param arg Argument to @a fn. + */ +void k_smp_cpu_start(int id, smp_init_fn fn, void *arg); + +/** + * @brief Resume a previously suspended CPU. + * + * This function works like @ref k_smp_cpu_start, but does not + * re-initialize the kernel's internal tracking data for + * the target CPU. Therefore, @ref k_smp_cpu_start must have + * previously been called for the target CPU, and it must have + * verifiably reached an idle/off state (detection of which + * must be provided by the platform layers). It may be used + * in cases where platform layers require, for example, that + * data on the interrupt or idle stack be preserved. + * + * @note This function must not be used on currently running + * CPU. The target CPU must be in suspended state, or + * in certain architectural state(s) where the CPU is + * permitted to go through the resume process. + * Detection of such state(s) must be provided by + * the platform layers. + * + * @param id ID of target CPU. + * @param fn Function to be called before resuming context. + * @param arg Argument to @a fn. + * @param reinit_timer True if timer needs to be re-initialized. + * @param invoke_sched True if scheduler is invoked after the CPU + * has started. + */ +void k_smp_cpu_resume(int id, smp_init_fn fn, void *arg, + bool reinit_timer, bool invoke_sched); + +#endif /* ZEPHYR_INCLUDE_KERNEL_SMP_H_ */ diff --git a/include/zephyr/sys/arch_interface.h b/include/zephyr/sys/arch_interface.h index dd5755c1e231fb..0ffc95c663bc45 100644 --- a/include/zephyr/sys/arch_interface.h +++ b/include/zephyr/sys/arch_interface.h @@ -216,7 +216,7 @@ void arch_cpu_atomic_idle(unsigned int key); * * @param data context parameter, implementation specific */ -typedef FUNC_NORETURN void (*arch_cpustart_t)(void *data); +typedef void (*arch_cpustart_t)(void *data); /** * @brief Start a numbered CPU on a MP-capable system diff --git a/kernel/Kconfig b/kernel/Kconfig index a01f40551e14e9..1620a3c9aa4c69 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -1177,8 +1177,9 @@ config SMP_BOOT_DELAY depends on SMP help By default Zephyr will boot all available CPUs during start up. - Select this option to skip this and allow architecture code boot - secondary CPUs at a later time. + Select this option to skip this and allow custom code + (architecture/SoC/board/application) to boot secondary CPUs at + a later time. config MP_NUM_CPUS int "Number of CPUs/cores [DEPRECATED]" diff --git a/kernel/include/kernel_internal.h b/kernel/include/kernel_internal.h index 711313b5ce7969..16efb3b54e8ae0 100644 --- a/kernel/include/kernel_internal.h +++ b/kernel/include/kernel_internal.h @@ -24,6 +24,9 @@ extern "C" { #endif +/* Initialize per-CPU kernel data */ +void z_init_cpu(int id); + /* Initialize a thread */ void z_init_thread_base(struct _thread_base *thread_base, int priority, uint32_t initial_state, unsigned int options); diff --git a/kernel/smp.c b/kernel/smp.c index dc30bd5f9edf17..45ec956dff92ac 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -4,14 +4,56 @@ #include #include +#include #include #include #include static atomic_t global_lock; + +/** + * Flag to tell recently powered up CPU to start + * initialization routine. + * + * 0 to tell powered up CPU to wait. + * 1 to tell powered up CPU to continue initialization. + */ static atomic_t cpu_start_flag; + +/** + * Flag to tell caller that the target CPU is now + * powered up and ready to be initialized. + * + * 0 if target CPU is not yet ready. + * 1 if target CPU has powered up and ready to be initialized. + */ static atomic_t ready_flag; +/** + * Struct holding the function to be called before handing off + * to schedule and its argument. + */ +static struct cpu_start_cb { + /** + * Function to be called before handing off to scheduler. + * Can be NULL. + */ + smp_init_fn fn; + + /** Argument to @ref cpu_start_fn.fn. */ + void *arg; + + /** Invoke scheduler after CPU has started if true. */ + bool invoke_sched; + +#ifdef CONFIG_SYS_CLOCK_EXISTS + /** True if smp_timer_init() needs to be called. */ + bool reinit_timer; +#endif +} cpu_start_fn; + +static struct k_spinlock cpu_start_lock; + unsigned int z_smp_global_lock(void) { unsigned int key = arch_irq_lock(); @@ -64,62 +106,135 @@ static void wait_for_start_signal(atomic_t *start_flag) } } -/* Legacy interfaces for early-version SOF CPU bringup. To be removed */ -#ifdef CONFIG_SOF -void z_smp_thread_init(void *arg, struct k_thread *thread) -{ - z_dummy_thread_init(thread); - wait_for_start_signal(arg); -} -void z_smp_thread_swap(void) -{ - z_swap_unlocked(); -} -#endif - -static inline FUNC_NORETURN void smp_init_top(void *arg) +static inline void smp_init_top(void *arg) { struct k_thread dummy_thread; + struct cpu_start_cb *csc = arg; + /* Let start_cpu() know that this CPU has powered up. */ (void)atomic_set(&ready_flag, 1); - wait_for_start_signal(arg); - z_dummy_thread_init(&dummy_thread); + /* Wait for the CPU start caller to signal that + * we can start initialization. + */ + wait_for_start_signal(&cpu_start_flag); + #ifdef CONFIG_SYS_CLOCK_EXISTS - smp_timer_init(); + if ((csc == NULL) || csc->reinit_timer) { + smp_timer_init(); + } #endif + /* Do additional initialization steps if needed. */ + if ((csc != NULL) && (csc->fn != NULL)) { + csc->fn(csc->arg); + } + + if ((csc != NULL) && !csc->invoke_sched) { + /* Don't invoke scheduler. */ + return; + } + + /* Initialize the dummy thread struct so that + * the scheduler can schedule actual threads to run. + */ + z_dummy_thread_init(&dummy_thread); + + /* Let scheduler decide what thread to run next. */ z_swap_unlocked(); CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ } -static void start_cpu(int id, atomic_t *start_flag) +static void start_cpu(int id, struct cpu_start_cb *csc) { - z_init_cpu(id); + /* Clear the ready flag so the newly powered up CPU can + * signal that it has powered up. + */ (void)atomic_clear(&ready_flag); + + /* Power up the CPU */ arch_start_cpu(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE, - smp_init_top, start_flag); + smp_init_top, csc); + + /* Wait until the newly powered up CPU to signal that + * it has powered up. + */ while (!atomic_get(&ready_flag)) { local_delay(); } } -void z_smp_start_cpu(int id) +void k_smp_cpu_start(int id, smp_init_fn fn, void *arg) { + k_spinlock_key_t key = k_spin_lock(&cpu_start_lock); + + cpu_start_fn.fn = fn; + cpu_start_fn.arg = arg; + cpu_start_fn.invoke_sched = true; + +#ifdef CONFIG_SYS_CLOCK_EXISTS + cpu_start_fn.reinit_timer = true; +#endif + + /* We are only starting one CPU so we do not need to synchronize + * across all CPUs using the start_flag. So just set it to 1. + */ (void)atomic_set(&cpu_start_flag, 1); /* async, don't care */ - start_cpu(id, &cpu_start_flag); + + /* Initialize various CPU structs related to this CPU. */ + z_init_cpu(id); + + /* Start the CPU! */ + start_cpu(id, &cpu_start_fn); + + k_spin_unlock(&cpu_start_lock, key); +} + +void k_smp_cpu_resume(int id, smp_init_fn fn, void *arg, + bool reinit_timer, bool invoke_sched) +{ + k_spinlock_key_t key = k_spin_lock(&cpu_start_lock); + + cpu_start_fn.fn = fn; + cpu_start_fn.arg = arg; + cpu_start_fn.invoke_sched = invoke_sched; + +#ifdef CONFIG_SYS_CLOCK_EXISTS + cpu_start_fn.reinit_timer = reinit_timer; +#else + ARG_UNUSED(reinit_timer); +#endif + + /* We are only starting one CPU so we do not need to synchronize + * across all CPUs using the start_flag. So just set it to 1. + */ + (void)atomic_set(&cpu_start_flag, 1); + + /* Start the CPU! */ + start_cpu(id, &cpu_start_fn); + + k_spin_unlock(&cpu_start_lock, key); } void z_smp_init(void) { + /* We are powering up all CPUs and we want to synchronize their + * entry into scheduler. So set the start flag to 0 here. + */ (void)atomic_clear(&cpu_start_flag); + /* Just start CPUs one by one. */ unsigned int num_cpus = arch_num_cpus(); for (int i = 1; i < num_cpus; i++) { - start_cpu(i, &cpu_start_flag); + z_init_cpu(i); + start_cpu(i, NULL); } + + /* Let loose those CPUs so they can start scheduling + * threads to run. + */ (void)atomic_set(&cpu_start_flag, 1); } diff --git a/soc/xtensa/intel_adsp/ace/power.c b/soc/xtensa/intel_adsp/ace/power.c index e35443d4cf8b7a..4ef8575645a702 100644 --- a/soc/xtensa/intel_adsp/ace/power.c +++ b/soc/xtensa/intel_adsp/ace/power.c @@ -150,6 +150,7 @@ static ALWAYS_INLINE void _restore_core_context(void) } void dsp_restore_vector(void); +void mp_resume_entry(void); void power_gate_entry(uint32_t core_id) { @@ -180,6 +181,11 @@ void power_gate_exit(void) cpu_early_init(); sys_cache_data_flush_and_invd_all(); _restore_core_context(); + + /* Secondary core is resumed by set_dx */ + if (arch_proc_id()) { + mp_resume_entry(); + } } __asm__(".align 4\n\t" diff --git a/soc/xtensa/intel_adsp/cavs/power.c b/soc/xtensa/intel_adsp/cavs/power.c index c0a75f5c0ecec4..3ae758ced617a1 100644 --- a/soc/xtensa/intel_adsp/cavs/power.c +++ b/soc/xtensa/intel_adsp/cavs/power.c @@ -52,6 +52,7 @@ LOG_MODULE_REGISTER(soc); * @biref FW entry point called by ROM during normal boot flow */ extern void rom_entry(void); +void mp_resume_entry(void); struct core_state { uint32_t a0; @@ -104,6 +105,11 @@ void power_gate_exit(void) cpu_early_init(); sys_cache_data_flush_and_invd_all(); _restore_core_context(); + + /* Secondary core is resumed by set_dx */ + if (arch_proc_id()) { + mp_resume_entry(); + } } __asm__(".align 4\n\t" diff --git a/soc/xtensa/intel_adsp/common/multiprocessing.c b/soc/xtensa/intel_adsp/common/multiprocessing.c index de76fa37eac0cb..79d7d1883e0651 100644 --- a/soc/xtensa/intel_adsp/common/multiprocessing.c +++ b/soc/xtensa/intel_adsp/common/multiprocessing.c @@ -110,6 +110,11 @@ __imr void z_mp_entry(void) __ASSERT(false, "arch_start_cpu() handler should never return"); } +void mp_resume_entry(void) +{ + start_rec.fn(start_rec.arg); +} + bool arch_cpu_active(int cpu_num) { return soc_cpus_active[cpu_num]; diff --git a/submanifests/optional.yaml b/submanifests/optional.yaml index b50e9479933cc5..d1b3157722548a 100644 --- a/submanifests/optional.yaml +++ b/submanifests/optional.yaml @@ -34,7 +34,7 @@ manifest: groups: - optional - name: sof - revision: e7cb489d430dc2181e4a5f7f953ed1eaeec6668d + revision: 37dd5e62663fcedca659c2104043529b3855a1b0 path: modules/audio/sof remote: upstream groups: diff --git a/tests/boards/intel_adsp/smoke/src/cpus.c b/tests/boards/intel_adsp/smoke/src/cpus.c index c3852bfb45940c..2c3f30af513e15 100644 --- a/tests/boards/intel_adsp/smoke/src/cpus.c +++ b/tests/boards/intel_adsp/smoke/src/cpus.c @@ -3,6 +3,7 @@ */ #include #include +#include #include #include @@ -188,7 +189,7 @@ static void halt_and_restart(int cpu) k_msleep(50); } - z_smp_start_cpu(cpu); + k_smp_cpu_start(cpu, NULL, NULL); /* Startup can be slow */ k_msleep(50); diff --git a/tests/boards/intel_adsp/smoke/src/smpboot.c b/tests/boards/intel_adsp/smoke/src/smpboot.c index bd9aad34be9a75..c54110e8b2d425 100644 --- a/tests/boards/intel_adsp/smoke/src/smpboot.c +++ b/tests/boards/intel_adsp/smoke/src/smpboot.c @@ -3,6 +3,7 @@ */ #include +#include #include #include "tests.h" @@ -25,8 +26,6 @@ volatile bool mp_flag; struct k_thread cpu_thr; K_THREAD_STACK_DEFINE(thr_stack, STACKSZ); -extern void z_smp_start_cpu(int id); - static void thread_fn(void *a, void *b, void *c) { int cpuid = (int) a; @@ -62,7 +61,7 @@ ZTEST(intel_adsp_boot, test_1st_smp_boot_delay) zassert_false(mp_flag, "cpu %d must not be running yet", i); /* Start the second CPU */ - z_smp_start_cpu(i); + k_smp_cpu_start(i, NULL, NULL); /* Verify the thread ran */ k_busy_wait(CPU_START_DELAY); diff --git a/tests/kernel/smp_boot_delay/boards/qemu_x86_64.conf b/tests/kernel/smp_boot_delay/boards/qemu_x86_64.conf new file mode 100644 index 00000000000000..b4360eba46feb8 --- /dev/null +++ b/tests/kernel/smp_boot_delay/boards/qemu_x86_64.conf @@ -0,0 +1,5 @@ +# Copyright (c) 2023 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 + +CONFIG_MP_MAX_NUM_CPUS=4 diff --git a/tests/kernel/smp_boot_delay/boards/qemu_x86_64.overlay b/tests/kernel/smp_boot_delay/boards/qemu_x86_64.overlay new file mode 100644 index 00000000000000..0c9b50c57b0644 --- /dev/null +++ b/tests/kernel/smp_boot_delay/boards/qemu_x86_64.overlay @@ -0,0 +1,23 @@ +/* + * Copyright (c) 2023 Intel Corporation + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/ { + cpus { + cpu@2 { + device_type = "cpu"; + compatible = "intel,x86"; + d-cache-line-size = <64>; + reg = <2>; + }; + + cpu@3 { + device_type = "cpu"; + compatible = "intel,x86"; + d-cache-line-size = <64>; + reg = <3>; + }; + }; +}; diff --git a/tests/kernel/smp_boot_delay/prj.conf b/tests/kernel/smp_boot_delay/prj.conf index a7c6e63161a855..30b23341a23d5e 100644 --- a/tests/kernel/smp_boot_delay/prj.conf +++ b/tests/kernel/smp_boot_delay/prj.conf @@ -1,3 +1,4 @@ CONFIG_ZTEST=y CONFIG_SMP=y CONFIG_SMP_BOOT_DELAY=y +CONFIG_SCHED_CPU_MASK=y diff --git a/tests/kernel/smp_boot_delay/src/main.c b/tests/kernel/smp_boot_delay/src/main.c index dfa0572f1faf6f..4a1e52559151c0 100644 --- a/tests/kernel/smp_boot_delay/src/main.c +++ b/tests/kernel/smp_boot_delay/src/main.c @@ -3,6 +3,7 @@ */ #include +#include #include /* Experimentally 10ms is enough time to get the second CPU to run on @@ -11,11 +12,10 @@ #define CPU_START_DELAY 10000 /* IPIs happen much faster than CPU startup */ -#define CPU_IPI_DELAY 100 +#define CPU_IPI_DELAY 1000 BUILD_ASSERT(CONFIG_SMP); BUILD_ASSERT(CONFIG_SMP_BOOT_DELAY); -BUILD_ASSERT(CONFIG_KERNEL_COHERENCE); BUILD_ASSERT(CONFIG_MP_MAX_NUM_CPUS > 1); #define STACKSZ 2048 @@ -23,11 +23,9 @@ char stack[STACKSZ]; volatile bool mp_flag; -struct k_thread cpu1_thr; +struct k_thread cpu_thr; K_THREAD_STACK_DEFINE(thr_stack, STACKSZ); -extern void z_smp_start_cpu(int id); - static void thread_fn(void *a, void *b, void *c) { mp_flag = true; @@ -39,7 +37,7 @@ ZTEST(smp_boot_delay, test_smp_boot_delay) * another CPU if it was available, but will not preempt us * unless we block (which we do not). */ - k_thread_create(&cpu1_thr, thr_stack, K_THREAD_STACK_SIZEOF(thr_stack), + k_thread_create(&cpu_thr, thr_stack, K_THREAD_STACK_SIZEOF(thr_stack), thread_fn, NULL, NULL, NULL, 1, 0, K_NO_WAIT); @@ -48,13 +46,14 @@ ZTEST(smp_boot_delay, test_smp_boot_delay) zassert_false(mp_flag, "CPU1 must not be running yet"); /* Start the second CPU */ - z_smp_start_cpu(1); + k_smp_cpu_start(1, NULL, NULL); /* Verify the thread ran */ k_busy_wait(CPU_START_DELAY); zassert_true(mp_flag, "CPU1 did not start"); - k_thread_abort(&cpu1_thr); + k_thread_abort(&cpu_thr); + k_thread_join(&cpu_thr, K_FOREVER); /* Spawn the same thread to do the same thing, but this time * expect that the thread is going to run synchronously on the @@ -62,12 +61,67 @@ ZTEST(smp_boot_delay, test_smp_boot_delay) * IPIs were correctly set up on the runtime-launched CPU. */ mp_flag = false; - k_thread_create(&cpu1_thr, thr_stack, K_THREAD_STACK_SIZEOF(thr_stack), + k_thread_create(&cpu_thr, thr_stack, K_THREAD_STACK_SIZEOF(thr_stack), thread_fn, NULL, NULL, NULL, 1, 0, K_NO_WAIT); k_busy_wait(CPU_IPI_DELAY); + + k_thread_abort(&cpu_thr); + k_thread_join(&cpu_thr, K_FOREVER); + zassert_true(mp_flag, "CPU1 did not start thread via IPI"); } +volatile bool custom_init_flag; + +void custom_init_fn(void *arg) +{ + volatile bool *flag = (void *)arg; + + *flag = true; +} + +ZTEST(smp_boot_delay, test_smp_custom_start) +{ + k_tid_t thr; + + if (CONFIG_MP_MAX_NUM_CPUS <= 2) { + /* CPU#1 has been started in test_smp_boot_delay + * so we need another CPU for this test. + */ + ztest_test_skip(); + } + + mp_flag = false; + custom_init_flag = false; + + /* Create a thread pinned on CPU#2 so that it will not + * run on other CPUs. + */ + thr = k_thread_create(&cpu_thr, thr_stack, K_THREAD_STACK_SIZEOF(thr_stack), + thread_fn, NULL, NULL, NULL, + 1, 0, K_FOREVER); + (void)k_thread_cpu_pin(thr, 2); + k_thread_start(thr); + + /* Make sure that thread has not run (because the cpu is halted) */ + k_busy_wait(CPU_START_DELAY); + zassert_false(mp_flag, "CPU2 must not be running yet"); + + /* Start the third CPU */ + k_smp_cpu_start(2, custom_init_fn, (void *)&custom_init_flag); + + /* Verify the thread ran */ + k_busy_wait(CPU_START_DELAY); + zassert_true(mp_flag, "CPU2 did not start"); + + /* Verify that the custom init function has been called. */ + zassert_true(custom_init_flag, "Custom init function has not been called."); + + k_thread_abort(&cpu_thr); + k_thread_join(&cpu_thr, K_FOREVER); +} + + ZTEST_SUITE(smp_boot_delay, NULL, NULL, NULL, NULL, NULL); diff --git a/tests/kernel/smp_boot_delay/testcase.yaml b/tests/kernel/smp_boot_delay/testcase.yaml index 4b4954aef37d22..3aef08bc67e099 100644 --- a/tests/kernel/smp_boot_delay/testcase.yaml +++ b/tests/kernel/smp_boot_delay/testcase.yaml @@ -3,17 +3,17 @@ tests: tags: - kernel - smp - platform_allow: intel_adsp_cavs25 + platform_allow: intel_adsp_cavs25 qemu_x86_64 integration_platforms: - - intel_adsp_cavs25 + - qemu_x86_64 kernel.multiprocessing.smp_boot_delay.minimallibc: filter: CONFIG_MINIMAL_LIBC_SUPPORTED tags: - kernel - smp - libc - platform_allow: intel_adsp_cavs25 + platform_allow: intel_adsp_cavs25 qemu_x86_64 integration_platforms: - - intel_adsp_cavs25 + - qemu_x86_64 extra_configs: - CONFIG_MINIMAL_LIBC=y