-
Notifications
You must be signed in to change notification settings - Fork 6.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
kernel: introduce k_smp_cpu_start()
and k_smp_cpu_resume()
#64755
Changes from all commits
a459556
211e865
177db88
a19943d
9eec311
dc5d342
4290747
4475cb8
c1e1865
fd792cc
69f1dc3
b235db2
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,71 @@ | ||
/* | ||
* Copyright (c) 2023 Intel Corporation | ||
* | ||
* SPDX-License-Identifier: Apache-2.0 | ||
*/ | ||
|
||
#ifndef ZEPHYR_INCLUDE_KERNEL_SMP_H_ | ||
#define ZEPHYR_INCLUDE_KERNEL_SMP_H_ | ||
|
||
#include <stdbool.h> | ||
|
||
typedef void (*smp_init_fn)(void *arg); | ||
|
||
/** | ||
* @brief Start a CPU. | ||
* | ||
* This routine is used to manually start the CPU specified | ||
* by @a id. It may be called to restart a CPU that had been | ||
* stopped or powered down, as well as some other scenario. | ||
* After the CPU has finished initialization, the CPU will be | ||
* ready to participate in thread scheduling and execution. | ||
* | ||
* @note This function must not be used on currently running | ||
* CPU. The target CPU must be in off state, or in | ||
* certain architectural state(s) where the CPU is | ||
* permitted to go through the power up process. | ||
* Detection of such state(s) must be provided by | ||
* the platform layers. | ||
* | ||
* @note This initializes per-CPU kernel structs and also | ||
* initializes timers needed for MP operations. | ||
* Use @ref k_smp_cpu_resume if these are not | ||
* desired. | ||
* | ||
* @param id ID of target CPU. | ||
* @param fn Function to be called before letting scheduler | ||
* run. | ||
* @param arg Argument to @a fn. | ||
*/ | ||
void k_smp_cpu_start(int id, smp_init_fn fn, void *arg); | ||
|
||
/** | ||
* @brief Resume a previously suspended CPU. | ||
* | ||
* This function works like @ref k_smp_cpu_start, but does not | ||
* re-initialize the kernel's internal tracking data for | ||
* the target CPU. Therefore, @ref k_smp_cpu_start must have | ||
* previously been called for the target CPU, and it must have | ||
* verifiably reached an idle/off state (detection of which | ||
* must be provided by the platform layers). It may be used | ||
* in cases where platform layers require, for example, that | ||
* data on the interrupt or idle stack be preserved. | ||
* | ||
* @note This function must not be used on currently running | ||
* CPU. The target CPU must be in suspended state, or | ||
* in certain architectural state(s) where the CPU is | ||
* permitted to go through the resume process. | ||
* Detection of such state(s) must be provided by | ||
* the platform layers. | ||
* | ||
* @param id ID of target CPU. | ||
* @param fn Function to be called before resuming context. | ||
* @param arg Argument to @a fn. | ||
* @param reinit_timer True if timer needs to be re-initialized. | ||
* @param invoke_sched True if scheduler is invoked after the CPU | ||
* has started. | ||
*/ | ||
void k_smp_cpu_resume(int id, smp_init_fn fn, void *arg, | ||
bool reinit_timer, bool invoke_sched); | ||
|
||
#endif /* ZEPHYR_INCLUDE_KERNEL_SMP_H_ */ |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,14 +4,56 @@ | |
|
||
#include <zephyr/kernel.h> | ||
#include <zephyr/kernel_structs.h> | ||
#include <zephyr/kernel/smp.h> | ||
#include <zephyr/spinlock.h> | ||
#include <kswap.h> | ||
#include <kernel_internal.h> | ||
|
||
static atomic_t global_lock; | ||
|
||
/** | ||
* Flag to tell recently powered up CPU to start | ||
* initialization routine. | ||
* | ||
* 0 to tell powered up CPU to wait. | ||
* 1 to tell powered up CPU to continue initialization. | ||
*/ | ||
static atomic_t cpu_start_flag; | ||
|
||
/** | ||
* Flag to tell caller that the target CPU is now | ||
* powered up and ready to be initialized. | ||
* | ||
* 0 if target CPU is not yet ready. | ||
* 1 if target CPU has powered up and ready to be initialized. | ||
*/ | ||
static atomic_t ready_flag; | ||
|
||
/** | ||
* Struct holding the function to be called before handing off | ||
* to schedule and its argument. | ||
*/ | ||
static struct cpu_start_cb { | ||
/** | ||
* Function to be called before handing off to scheduler. | ||
* Can be NULL. | ||
*/ | ||
smp_init_fn fn; | ||
|
||
/** Argument to @ref cpu_start_fn.fn. */ | ||
void *arg; | ||
|
||
/** Invoke scheduler after CPU has started if true. */ | ||
bool invoke_sched; | ||
|
||
#ifdef CONFIG_SYS_CLOCK_EXISTS | ||
/** True if smp_timer_init() needs to be called. */ | ||
bool reinit_timer; | ||
#endif | ||
} cpu_start_fn; | ||
|
||
static struct k_spinlock cpu_start_lock; | ||
|
||
unsigned int z_smp_global_lock(void) | ||
{ | ||
unsigned int key = arch_irq_lock(); | ||
|
@@ -64,62 +106,135 @@ static void wait_for_start_signal(atomic_t *start_flag) | |
} | ||
} | ||
|
||
/* Legacy interfaces for early-version SOF CPU bringup. To be removed */ | ||
#ifdef CONFIG_SOF | ||
void z_smp_thread_init(void *arg, struct k_thread *thread) | ||
{ | ||
z_dummy_thread_init(thread); | ||
wait_for_start_signal(arg); | ||
} | ||
void z_smp_thread_swap(void) | ||
{ | ||
z_swap_unlocked(); | ||
} | ||
#endif | ||
|
||
static inline FUNC_NORETURN void smp_init_top(void *arg) | ||
static inline void smp_init_top(void *arg) | ||
{ | ||
struct k_thread dummy_thread; | ||
struct cpu_start_cb *csc = arg; | ||
|
||
/* Let start_cpu() know that this CPU has powered up. */ | ||
(void)atomic_set(&ready_flag, 1); | ||
|
||
wait_for_start_signal(arg); | ||
z_dummy_thread_init(&dummy_thread); | ||
/* Wait for the CPU start caller to signal that | ||
* we can start initialization. | ||
*/ | ||
wait_for_start_signal(&cpu_start_flag); | ||
|
||
#ifdef CONFIG_SYS_CLOCK_EXISTS | ||
smp_timer_init(); | ||
if ((csc == NULL) || csc->reinit_timer) { | ||
smp_timer_init(); | ||
} | ||
#endif | ||
|
||
/* Do additional initialization steps if needed. */ | ||
if ((csc != NULL) && (csc->fn != NULL)) { | ||
csc->fn(csc->arg); | ||
} | ||
|
||
if ((csc != NULL) && !csc->invoke_sched) { | ||
/* Don't invoke scheduler. */ | ||
return; | ||
} | ||
|
||
/* Initialize the dummy thread struct so that | ||
* the scheduler can schedule actual threads to run. | ||
*/ | ||
z_dummy_thread_init(&dummy_thread); | ||
|
||
/* Let scheduler decide what thread to run next. */ | ||
z_swap_unlocked(); | ||
|
||
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ | ||
} | ||
|
||
static void start_cpu(int id, atomic_t *start_flag) | ||
static void start_cpu(int id, struct cpu_start_cb *csc) | ||
{ | ||
z_init_cpu(id); | ||
/* Clear the ready flag so the newly powered up CPU can | ||
* signal that it has powered up. | ||
*/ | ||
(void)atomic_clear(&ready_flag); | ||
|
||
/* Power up the CPU */ | ||
arch_start_cpu(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE, | ||
smp_init_top, start_flag); | ||
smp_init_top, csc); | ||
|
||
/* Wait until the newly powered up CPU to signal that | ||
* it has powered up. | ||
*/ | ||
while (!atomic_get(&ready_flag)) { | ||
local_delay(); | ||
} | ||
} | ||
|
||
void z_smp_start_cpu(int id) | ||
void k_smp_cpu_start(int id, smp_init_fn fn, void *arg) | ||
{ | ||
k_spinlock_key_t key = k_spin_lock(&cpu_start_lock); | ||
|
||
cpu_start_fn.fn = fn; | ||
cpu_start_fn.arg = arg; | ||
cpu_start_fn.invoke_sched = true; | ||
|
||
#ifdef CONFIG_SYS_CLOCK_EXISTS | ||
cpu_start_fn.reinit_timer = true; | ||
#endif | ||
|
||
/* We are only starting one CPU so we do not need to synchronize | ||
* across all CPUs using the start_flag. So just set it to 1. | ||
*/ | ||
(void)atomic_set(&cpu_start_flag, 1); /* async, don't care */ | ||
start_cpu(id, &cpu_start_flag); | ||
|
||
/* Initialize various CPU structs related to this CPU. */ | ||
z_init_cpu(id); | ||
|
||
/* Start the CPU! */ | ||
start_cpu(id, &cpu_start_fn); | ||
|
||
k_spin_unlock(&cpu_start_lock, key); | ||
} | ||
|
||
void k_smp_cpu_resume(int id, smp_init_fn fn, void *arg, | ||
bool reinit_timer, bool invoke_sched) | ||
{ | ||
k_spinlock_key_t key = k_spin_lock(&cpu_start_lock); | ||
|
||
cpu_start_fn.fn = fn; | ||
cpu_start_fn.arg = arg; | ||
cpu_start_fn.invoke_sched = invoke_sched; | ||
|
||
#ifdef CONFIG_SYS_CLOCK_EXISTS | ||
cpu_start_fn.reinit_timer = reinit_timer; | ||
#else | ||
ARG_UNUSED(reinit_timer); | ||
#endif | ||
|
||
/* We are only starting one CPU so we do not need to synchronize | ||
* across all CPUs using the start_flag. So just set it to 1. | ||
*/ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This comment is really just a longer version of the original ("async, don't care"). Suggest dropping the old one. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Removed. |
||
(void)atomic_set(&cpu_start_flag, 1); | ||
|
||
/* Start the CPU! */ | ||
start_cpu(id, &cpu_start_fn); | ||
|
||
k_spin_unlock(&cpu_start_lock, key); | ||
} | ||
|
||
void z_smp_init(void) | ||
{ | ||
/* We are powering up all CPUs and we want to synchronize their | ||
* entry into scheduler. So set the start flag to 0 here. | ||
*/ | ||
(void)atomic_clear(&cpu_start_flag); | ||
|
||
/* Just start CPUs one by one. */ | ||
unsigned int num_cpus = arch_num_cpus(); | ||
|
||
for (int i = 1; i < num_cpus; i++) { | ||
start_cpu(i, &cpu_start_flag); | ||
z_init_cpu(i); | ||
start_cpu(i, NULL); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Passing the NULL here requires you to add tests in a bunch of spaces in smp_init_top() to handle the NULL pointer. Since the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is done so that if |
||
} | ||
|
||
/* Let loose those CPUs so they can start scheduling | ||
* threads to run. | ||
*/ | ||
(void)atomic_set(&cpu_start_flag, 1); | ||
} | ||
|
||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -150,6 +150,7 @@ static ALWAYS_INLINE void _restore_core_context(void) | |
} | ||
|
||
void dsp_restore_vector(void); | ||
void mp_resume_entry(void); | ||
|
||
void power_gate_entry(uint32_t core_id) | ||
{ | ||
|
@@ -180,6 +181,11 @@ void power_gate_exit(void) | |
cpu_early_init(); | ||
sys_cache_data_flush_and_invd_all(); | ||
_restore_core_context(); | ||
|
||
/* Secondary core is resumed by set_dx */ | ||
if (arch_proc_id()) { | ||
mp_resume_entry(); | ||
} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This has been causing regressions and test failures (thesofproject/sof#8818). Tentative removal of this part in: |
||
} | ||
|
||
__asm__(".align 4\n\t" | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
@dcpleung isn't this racy in general? On all CPUs
csc
points tocpu_start_fn
, right? And the CPU gets here after signalling the initiator CPU, the one that started this one. So the initiator CPU can go ahead and start yet another CPU potentially with different parameters. So far that shouldn't be a problem for SOF, because there.fn
is the same for all and.arg
is alwaysNULL
, but in general this seems wrong?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, there is potential for conflicts here. So the caller must take precautions to avoid that situation. Or we can implement a per-CPU data struct (but this is going to take up some space).