axtask/
api.rs

1//! Task APIs for multi-task configuration.
2
3use alloc::{
4    string::String,
5    sync::{Arc, Weak},
6};
7
8use kernel_guard::NoPreemptIrqSave;
9
10pub(crate) use crate::run_queue::{current_run_queue, select_run_queue};
11
12#[doc(cfg(feature = "multitask"))]
13pub use crate::task::{CurrentTask, TaskId, TaskInner};
14#[doc(cfg(feature = "multitask"))]
15pub use crate::task_ext::{TaskExtMut, TaskExtRef};
16#[doc(cfg(feature = "multitask"))]
17pub use crate::wait_queue::WaitQueue;
18
19/// The reference type of a task.
20pub type AxTaskRef = Arc<AxTask>;
21
22/// The weak reference type of a task.
23pub type WeakAxTaskRef = Weak<AxTask>;
24
25pub use crate::task::TaskState;
26
27/// The wrapper type for [`cpumask::CpuMask`] with SMP configuration.
28pub type AxCpuMask = cpumask::CpuMask<{ axconfig::SMP }>;
29
30cfg_if::cfg_if! {
31    if #[cfg(feature = "sched_rr")] {
32        const MAX_TIME_SLICE: usize = 5;
33        pub(crate) type AxTask = scheduler::RRTask<TaskInner, MAX_TIME_SLICE>;
34        pub(crate) type Scheduler = scheduler::RRScheduler<TaskInner, MAX_TIME_SLICE>;
35    } else if #[cfg(feature = "sched_cfs")] {
36        pub(crate) type AxTask = scheduler::CFSTask<TaskInner>;
37        pub(crate) type Scheduler = scheduler::CFScheduler<TaskInner>;
38    } else {
39        // If no scheduler features are set, use FIFO as the default.
40        pub(crate) type AxTask = scheduler::FifoTask<TaskInner>;
41        pub(crate) type Scheduler = scheduler::FifoScheduler<TaskInner>;
42    }
43}
44
45#[cfg(feature = "preempt")]
46struct KernelGuardIfImpl;
47
48#[cfg(feature = "preempt")]
49#[crate_interface::impl_interface]
50impl kernel_guard::KernelGuardIf for KernelGuardIfImpl {
51    fn disable_preempt() {
52        if let Some(curr) = current_may_uninit() {
53            curr.disable_preempt();
54        }
55    }
56
57    fn enable_preempt() {
58        if let Some(curr) = current_may_uninit() {
59            curr.enable_preempt(true);
60        }
61    }
62}
63
64/// Gets the current task, or returns [`None`] if the current task is not
65/// initialized.
66pub fn current_may_uninit() -> Option<CurrentTask> {
67    CurrentTask::try_get()
68}
69
70/// Gets the current task.
71///
72/// # Panics
73///
74/// Panics if the current task is not initialized.
75pub fn current() -> CurrentTask {
76    CurrentTask::get()
77}
78
79/// Initializes the task scheduler (for the primary CPU).
80pub fn init_scheduler() {
81    info!("Initialize scheduling...");
82
83    crate::run_queue::init();
84    #[cfg(feature = "irq")]
85    crate::timers::init();
86
87    info!("  use {} scheduler.", Scheduler::scheduler_name());
88}
89
90/// Initializes the task scheduler for secondary CPUs.
91pub fn init_scheduler_secondary() {
92    crate::run_queue::init_secondary();
93    #[cfg(feature = "irq")]
94    crate::timers::init();
95}
96
97/// Handles periodic timer ticks for the task manager.
98///
99/// For example, advance scheduler states, checks timed events, etc.
100#[cfg(feature = "irq")]
101#[doc(cfg(feature = "irq"))]
102pub fn on_timer_tick() {
103    use kernel_guard::NoOp;
104    crate::timers::check_events();
105    // Since irq and preemption are both disabled here,
106    // we can get current run queue with the default `kernel_guard::NoOp`.
107    current_run_queue::<NoOp>().scheduler_timer_tick();
108}
109
110/// Adds the given task to the run queue, returns the task reference.
111pub fn spawn_task(task: TaskInner) -> AxTaskRef {
112    let task_ref = task.into_arc();
113    select_run_queue::<NoPreemptIrqSave>(&task_ref).add_task(task_ref.clone());
114    task_ref
115}
116
117/// Spawns a new task with the given parameters.
118///
119/// Returns the task reference.
120pub fn spawn_raw<F>(f: F, name: String, stack_size: usize) -> AxTaskRef
121where
122    F: FnOnce() + Send + 'static,
123{
124    spawn_task(TaskInner::new(f, name, stack_size))
125}
126
127/// Spawns a new task with the default parameters.
128///
129/// The default task name is an empty string. The default task stack size is
130/// [`axconfig::TASK_STACK_SIZE`].
131///
132/// Returns the task reference.
133pub fn spawn<F>(f: F) -> AxTaskRef
134where
135    F: FnOnce() + Send + 'static,
136{
137    spawn_raw(f, "".into(), axconfig::TASK_STACK_SIZE)
138}
139
140/// Set the priority for current task.
141///
142/// The range of the priority is dependent on the underlying scheduler. For
143/// example, in the [CFS] scheduler, the priority is the nice value, ranging from
144/// -20 to 19.
145///
146/// Returns `true` if the priority is set successfully.
147///
148/// [CFS]: https://en.wikipedia.org/wiki/Completely_Fair_Scheduler
149pub fn set_priority(prio: isize) -> bool {
150    current_run_queue::<NoPreemptIrqSave>().set_current_priority(prio)
151}
152
153/// Set the affinity for the current task.
154/// [`AxCpuMask`] is used to specify the CPU affinity.
155/// Returns `true` if the affinity is set successfully.
156///
157/// TODO: support set the affinity for other tasks.
158pub fn set_current_affinity(cpumask: AxCpuMask) -> bool {
159    if cpumask.is_empty() {
160        false
161    } else {
162        let curr = current().clone();
163
164        curr.set_cpumask(cpumask);
165        // After setting the affinity, we need to check if current cpu matches
166        // the affinity. If not, we need to migrate the task to the correct CPU.
167        #[cfg(feature = "smp")]
168        if !cpumask.get(axhal::cpu::this_cpu_id()) {
169            const MIGRATION_TASK_STACK_SIZE: usize = 4096;
170            // Spawn a new migration task for migrating.
171            let migration_task = TaskInner::new(
172                move || crate::run_queue::migrate_entry(curr),
173                "migration-task".into(),
174                MIGRATION_TASK_STACK_SIZE,
175            )
176            .into_arc();
177
178            // Migrate the current task to the correct CPU using the migration task.
179            current_run_queue::<NoPreemptIrqSave>().migrate_current(migration_task);
180
181            assert!(cpumask.get(axhal::cpu::this_cpu_id()), "Migration failed");
182        }
183        true
184    }
185}
186
187/// Current task gives up the CPU time voluntarily, and switches to another
188/// ready task.
189pub fn yield_now() {
190    current_run_queue::<NoPreemptIrqSave>().yield_current()
191}
192
193/// Current task is going to sleep for the given duration.
194///
195/// If the feature `irq` is not enabled, it uses busy-wait instead.
196pub fn sleep(dur: core::time::Duration) {
197    sleep_until(axhal::time::wall_time() + dur);
198}
199
200/// Current task is going to sleep, it will be woken up at the given deadline.
201///
202/// If the feature `irq` is not enabled, it uses busy-wait instead.
203pub fn sleep_until(deadline: axhal::time::TimeValue) {
204    #[cfg(feature = "irq")]
205    current_run_queue::<NoPreemptIrqSave>().sleep_until(deadline);
206    #[cfg(not(feature = "irq"))]
207    axhal::time::busy_wait_until(deadline);
208}
209
210/// Exits the current task.
211pub fn exit(exit_code: i32) -> ! {
212    current_run_queue::<NoPreemptIrqSave>().exit_current(exit_code)
213}
214
215/// The idle task routine.
216///
217/// It runs an infinite loop that keeps calling [`yield_now()`].
218pub fn run_idle() -> ! {
219    loop {
220        yield_now();
221        debug!("idle task: waiting for IRQs...");
222        #[cfg(feature = "irq")]
223        axhal::arch::wait_for_irqs();
224    }
225}