diff --git a/include/zephyr/kernel/smp.h b/include/zephyr/kernel/smp.h index 7cad1d0f5e9ea02..c6db32e3709d5b1 100644 --- a/include/zephyr/kernel/smp.h +++ b/include/zephyr/kernel/smp.h @@ -7,6 +7,8 @@ #ifndef ZEPHYR_INCLUDE_KERNEL_SMP_H_ #define ZEPHYR_INCLUDE_KERNEL_SMP_H_ +typedef void (*smp_custom_init_fn)(void *arg); + /** * @brief Start one CPU. * @@ -28,4 +30,30 @@ */ void k_smp_cpu_start(int id); +/** + * @brief Start one CPU with additional initialization callback. + * + * This starts (powers up/starts up) a CPU that has stopped + * or powered down. After the CPU is started, the provided + * function (@a fn) will be called to perform any additional + * initialization (e.g. setting up data structures). After + * that, the scheduler will run, and the CPU will participate + * in execution of threads. + * + * @note This unconditionally starts a CPU regardless of + * its current state. Depending on the architecture, + * it may have no effect or very bad side effect. + * Use this with caution. + * + * @warning Parallel calls to this function will result + * in undesired behavior. Please make sure only + * one call to this function at a time. + * + * @param id ID of target CPU. + * @param fn Function to be called before letting scheduler + * run. + * @param arg Argument to @a fn. + */ +void k_smp_cpu_custom_start(int id, smp_custom_init_fn fn, void *arg); + #endif /* ZEPHYR_INCLUDE_KERNEL_SMP_H_ */ diff --git a/kernel/Kconfig b/kernel/Kconfig index e282737ae1fca0f..05a5b2944250678 100644 --- a/kernel/Kconfig +++ b/kernel/Kconfig @@ -1142,6 +1142,12 @@ config SMP_BOOT_DELAY (architecture/SoC/board/application) to boot secondary CPUs at a later time. +config SMP_NEED_CUSTOM_START_FUNC + bool "Allow use of k_smp_cpu_custom_start()" + depends on SMP_BOOT_DELAY + help + This allows k_smp_cpu_custom_start() to be used. + config MP_NUM_CPUS int "Number of CPUs/cores [DEPRECATED]" default MP_MAX_NUM_CPUS diff --git a/kernel/smp.c b/kernel/smp.c index a7a298f544291ef..f26cf57287c0556 100644 --- a/kernel/smp.c +++ b/kernel/smp.c @@ -10,8 +10,40 @@ #include static atomic_t global_lock; -static atomic_t cpu_start_flag; -static atomic_t ready_flag; + +/** + * Struct related to powering up one CPU. + */ +static struct cpu_start { + /** + * Flag to tell recently powered up CPU to start + * initialization routine. + * + * 0 to tell powered up CPU to wait. + * 1 to tell powered up CPU to continue initialization. + */ + atomic_t start_flag; + + /** + * Flag to tell caller that the target CPU is now + * powered up and ready to be initialized. + * + * 0 if target CPU is not yet ready. + * 1 if target CPU has powered up and ready to be initialized. + */ + atomic_t ready_flag; + +#ifdef CONFIG_SMP_NEED_CUSTOM_START_FUNC + /** + * Function to be called before handing off to scheduler. + * Can be NULL. + */ + smp_custom_init_fn fn; + + /** Argument to @a fn. */ + void *arg; +#endif +} cpu_start_data; unsigned int z_smp_global_lock(void) { @@ -81,47 +113,71 @@ void z_smp_thread_swap(void) static inline FUNC_NORETURN void smp_init_top(void *arg) { struct k_thread dummy_thread; + struct cpu_start *csd = arg; - (void)atomic_set(&ready_flag, 1); + (void)atomic_set(&cpu_start_data.ready_flag, 1); - wait_for_start_signal(arg); + wait_for_start_signal(&csd->start_flag); z_dummy_thread_init(&dummy_thread); #ifdef CONFIG_SYS_CLOCK_EXISTS smp_timer_init(); #endif +#ifdef CONFIG_SMP_NEED_CUSTOM_START_FUNC + /* Do additional initialization steps if needed. */ + if (csd->fn != NULL) { + csd->fn(csd->arg); + } +#endif + z_swap_unlocked(); CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ } -static void start_cpu(int id, atomic_t *start_flag) +static void start_cpu(int id, smp_custom_init_fn fn, void *arg) { +#ifdef CONFIG_SMP_NEED_CUSTOM_START_FUNC + cpu_start_data.fn = fn; + cpu_start_data.arg = arg; +#else + ARG_UNUSED(fn); + ARG_UNUSED(arg); +#endif + z_init_cpu(id); - (void)atomic_clear(&ready_flag); + (void)atomic_clear(&cpu_start_data.ready_flag); arch_start_cpu(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE, - smp_init_top, start_flag); - while (!atomic_get(&ready_flag)) { + smp_init_top, &cpu_start_data); + while (!atomic_get(&cpu_start_data.ready_flag)) { local_delay(); } } +#ifdef CONFIG_SMP_NEED_CUSTOM_START_FUNC +void k_smp_cpu_custom_start(int id, smp_custom_init_fn fn, void *arg) +{ + (void)atomic_set(&cpu_start_data.start_flag, 1); /* async, don't care */ + start_cpu(id, fn, arg); +} +#endif + void k_smp_cpu_start(int id) { - (void)atomic_set(&cpu_start_flag, 1); /* async, don't care */ - start_cpu(id, &cpu_start_flag); + (void)atomic_set(&cpu_start_data.start_flag, 1); /* async, don't care */ + start_cpu(id, NULL, NULL); } void z_smp_init(void) { - (void)atomic_clear(&cpu_start_flag); + (void)atomic_clear(&cpu_start_data.start_flag); unsigned int num_cpus = arch_num_cpus(); for (int i = 1; i < num_cpus; i++) { - start_cpu(i, &cpu_start_flag); + start_cpu(i, NULL, NULL); } - (void)atomic_set(&cpu_start_flag, 1); + (void)atomic_set(&cpu_start_data.start_flag, 1); } bool z_smp_cpu_mobile(void)