-
Notifications
You must be signed in to change notification settings - Fork 6.7k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
kernel: introduce k_smp_cpu_start()
and k_smp_cpu_resume()
#64755
Changes from 5 commits
a459556
211e865
177db88
a19943d
9eec311
dc5d342
4290747
4475cb8
c1e1865
fd792cc
69f1dc3
b235db2
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
/* | ||
* Copyright (c) 2023 Intel Corporation | ||
* | ||
* SPDX-License-Identifier: Apache-2.0 | ||
*/ | ||
|
||
#ifndef ZEPHYR_INCLUDE_KERNEL_SMP_H_ | ||
#define ZEPHYR_INCLUDE_KERNEL_SMP_H_ | ||
|
||
typedef void (*smp_init_fn)(void *arg); | ||
|
||
/** | ||
* @brief Start a CPU. | ||
* | ||
* This routine is used to manually start the CPU specified | ||
* by @a id. It may be called to restart a CPU that had been | ||
* stopped or powered down, as well as some other scenario. | ||
* After the CPU has finished initialization, the CPU will be | ||
* ready to participate in thread scheduling and execution. | ||
* | ||
* @note This function must not be used on currently running | ||
* CPU. The target CPU must be in off state, or in | ||
* certain architectural state(s) where the CPU is | ||
* permitted to go through the power up process. | ||
* Detection of such state(s) must be provided by | ||
* the platform layers. | ||
* | ||
* @param id ID of target CPU. | ||
* @param fn Function to be called before letting scheduler | ||
* run. | ||
* @param arg Argument to @a fn. | ||
*/ | ||
void k_smp_cpu_start(int id, smp_init_fn fn, void *arg); | ||
|
||
#endif /* ZEPHYR_INCLUDE_KERNEL_SMP_H_ */ |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,6 +4,7 @@ | |
|
||
#include <zephyr/kernel.h> | ||
#include <zephyr/kernel_structs.h> | ||
#include <zephyr/kernel/smp.h> | ||
#include <zephyr/spinlock.h> | ||
#include <kswap.h> | ||
#include <kernel_internal.h> | ||
|
@@ -12,6 +13,23 @@ static atomic_t global_lock; | |
static atomic_t cpu_start_flag; | ||
static atomic_t ready_flag; | ||
|
||
/** | ||
* Struct holding the function to be called before handing off | ||
* to schedule and its argument. | ||
*/ | ||
static struct cpu_start_cb { | ||
/** | ||
* Function to be called before handing off to scheduler. | ||
* Can be NULL. | ||
*/ | ||
smp_init_fn fn; | ||
|
||
/** Argument to @ref cpu_start_fn.fn. */ | ||
void *arg; | ||
} cpu_start_fn; | ||
|
||
static struct k_spinlock cpu_start_lock; | ||
|
||
unsigned int z_smp_global_lock(void) | ||
{ | ||
unsigned int key = arch_irq_lock(); | ||
|
@@ -80,35 +98,48 @@ void z_smp_thread_swap(void) | |
static inline FUNC_NORETURN void smp_init_top(void *arg) | ||
{ | ||
struct k_thread dummy_thread; | ||
struct cpu_start_cb *csc = arg; | ||
|
||
(void)atomic_set(&ready_flag, 1); | ||
|
||
wait_for_start_signal(arg); | ||
wait_for_start_signal(&cpu_start_flag); | ||
z_dummy_thread_init(&dummy_thread); | ||
#ifdef CONFIG_SYS_CLOCK_EXISTS | ||
smp_timer_init(); | ||
#endif | ||
|
||
/* Do additional initialization steps if needed. */ | ||
if ((csc != NULL) && (csc->fn != NULL)) { | ||
csc->fn(csc->arg); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @dcpleung isn't this racy in general? On all CPUs There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, there is potential for conflicts here. So the caller must take precautions to avoid that situation. Or we can implement a per-CPU data struct (but this is going to take up some space). |
||
} | ||
|
||
z_swap_unlocked(); | ||
|
||
CODE_UNREACHABLE; /* LCOV_EXCL_LINE */ | ||
} | ||
|
||
static void start_cpu(int id, atomic_t *start_flag) | ||
static void start_cpu(int id, struct cpu_start_cb *csc) | ||
{ | ||
z_init_cpu(id); | ||
(void)atomic_clear(&ready_flag); | ||
arch_start_cpu(id, z_interrupt_stacks[id], CONFIG_ISR_STACK_SIZE, | ||
smp_init_top, start_flag); | ||
smp_init_top, csc); | ||
while (!atomic_get(&ready_flag)) { | ||
local_delay(); | ||
} | ||
} | ||
|
||
void z_smp_start_cpu(int id) | ||
void k_smp_cpu_start(int id, smp_init_fn fn, void *arg) | ||
{ | ||
k_spinlock_key_t key = k_spin_lock(&cpu_start_lock); | ||
|
||
cpu_start_fn.fn = fn; | ||
cpu_start_fn.arg = arg; | ||
|
||
(void)atomic_set(&cpu_start_flag, 1); /* async, don't care */ | ||
start_cpu(id, &cpu_start_flag); | ||
start_cpu(id, &cpu_start_fn); | ||
|
||
k_spin_unlock(&cpu_start_lock, key); | ||
} | ||
|
||
void z_smp_init(void) | ||
|
@@ -118,7 +149,7 @@ void z_smp_init(void) | |
unsigned int num_cpus = arch_num_cpus(); | ||
|
||
for (int i = 1; i < num_cpus; i++) { | ||
start_cpu(i, &cpu_start_flag); | ||
start_cpu(i, NULL); | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Passing the NULL here requires you to add tests in a bunch of spaces in smp_init_top() to handle the NULL pointer. Since the There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is done so that if |
||
} | ||
(void)atomic_set(&cpu_start_flag, 1); | ||
} | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
what about those internal APIs? I see they are still being used in SOF.