axtask/
run_queue.rs

1use alloc::collections::VecDeque;
2use alloc::sync::Arc;
3use core::mem::MaybeUninit;
4
5#[cfg(feature = "smp")]
6use alloc::sync::Weak;
7
8use kernel_guard::BaseGuard;
9use kspin::SpinRaw;
10use lazyinit::LazyInit;
11use scheduler::BaseScheduler;
12
13use axhal::cpu::this_cpu_id;
14
15use crate::task::{CurrentTask, TaskState};
16use crate::wait_queue::WaitQueueGuard;
17use crate::{AxCpuMask, AxTaskRef, Scheduler, TaskInner, WaitQueue};
18
19macro_rules! percpu_static {
20    ($(
21        $(#[$comment:meta])*
22        $name:ident: $ty:ty = $init:expr
23    ),* $(,)?) => {
24        $(
25            $(#[$comment])*
26            #[percpu::def_percpu]
27            static $name: $ty = $init;
28        )*
29    };
30}
31
32percpu_static! {
33    RUN_QUEUE: LazyInit<AxRunQueue> = LazyInit::new(),
34    EXITED_TASKS: VecDeque<AxTaskRef> = VecDeque::new(),
35    WAIT_FOR_EXIT: WaitQueue = WaitQueue::new(),
36    IDLE_TASK: LazyInit<AxTaskRef> = LazyInit::new(),
37    /// Stores the weak reference to the previous task that is running on this CPU.
38    #[cfg(feature = "smp")]
39    PREV_TASK: Weak<crate::AxTask> = Weak::new(),
40}
41
42/// An array of references to run queues, one for each CPU, indexed by cpu_id.
43///
44/// This static variable holds references to the run queues for each CPU in the system.
45///
46/// # Safety
47///
48/// Access to this variable is marked as `unsafe` because it contains `MaybeUninit` references,
49/// which require careful handling to avoid undefined behavior. The array should be fully
50/// initialized before being accessed to ensure safe usage.
51static mut RUN_QUEUES: [MaybeUninit<&'static mut AxRunQueue>; axconfig::SMP] =
52    [ARRAY_REPEAT_VALUE; axconfig::SMP];
53#[allow(clippy::declare_interior_mutable_const)] // It's ok because it's used only for initialization `RUN_QUEUES`.
54const ARRAY_REPEAT_VALUE: MaybeUninit<&'static mut AxRunQueue> = MaybeUninit::uninit();
55
56/// Returns a reference to the current run queue in [`CurrentRunQueueRef`].
57///
58/// ## Safety
59///
60/// This function returns a static reference to the current run queue, which
61/// is inherently unsafe. It assumes that the `RUN_QUEUE` has been properly
62/// initialized and is not accessed concurrently in a way that could cause
63/// data races or undefined behavior.
64///
65/// ## Returns
66///
67/// * [`CurrentRunQueueRef`] - a static reference to the current [`AxRunQueue`].
68#[inline(always)]
69pub(crate) fn current_run_queue<G: BaseGuard>() -> CurrentRunQueueRef<'static, G> {
70    let irq_state = G::acquire();
71    CurrentRunQueueRef {
72        inner: unsafe { RUN_QUEUE.current_ref_mut_raw() },
73        current_task: crate::current(),
74        state: irq_state,
75        _phantom: core::marker::PhantomData,
76    }
77}
78
79/// Selects the run queue index based on a CPU set bitmap and load balancing.
80///
81/// This function filters the available run queues based on the provided `cpumask` and
82/// selects the run queue index for the next task. The selection is based on a round-robin algorithm.
83///
84/// ## Arguments
85///
86/// * `cpumask` - A bitmap representing the CPUs that are eligible for task execution.
87///
88/// ## Returns
89///
90/// The index (cpu_id) of the selected run queue.
91///
92/// ## Panics
93///
94/// This function will panic if `cpu_mask` is empty, indicating that there are no available CPUs for task execution.
95///
96#[cfg(feature = "smp")]
97// The modulo operation is safe here because `axconfig::SMP` is always greater than 1 with "smp" enabled.
98#[allow(clippy::modulo_one)]
99#[inline]
100fn select_run_queue_index(cpumask: AxCpuMask) -> usize {
101    use core::sync::atomic::{AtomicUsize, Ordering};
102    static RUN_QUEUE_INDEX: AtomicUsize = AtomicUsize::new(0);
103
104    assert!(!cpumask.is_empty(), "No available CPU for task execution");
105
106    // Round-robin selection of the run queue index.
107    loop {
108        let index = RUN_QUEUE_INDEX.fetch_add(1, Ordering::SeqCst) % axconfig::SMP;
109        if cpumask.get(index) {
110            return index;
111        }
112    }
113}
114
115/// Retrieves a `'static` reference to the run queue corresponding to the given index.
116///
117/// This function asserts that the provided index is within the range of available CPUs
118/// and returns a reference to the corresponding run queue.
119///
120/// ## Arguments
121///
122/// * `index` - The index of the run queue to retrieve.
123///
124/// ## Returns
125///
126/// A reference to the `AxRunQueue` corresponding to the provided index.
127///
128/// ## Panics
129///
130/// This function will panic if the index is out of bounds.
131///
132#[cfg(feature = "smp")]
133#[inline]
134fn get_run_queue(index: usize) -> &'static mut AxRunQueue {
135    unsafe { RUN_QUEUES[index].assume_init_mut() }
136}
137
138/// Selects the appropriate run queue for the provided task.
139///
140/// * In a single-core system, this function always returns a reference to the global run queue.
141/// * In a multi-core system, this function selects the run queue based on the task's CPU affinity and load balance.
142///
143/// ## Arguments
144///
145/// * `task` - A reference to the task for which a run queue is being selected.
146///
147/// ## Returns
148///
149/// * [`AxRunQueueRef`] - a static reference to the selected [`AxRunQueue`] (current or remote).
150///
151/// ## TODO
152///
153/// 1. Implement better load balancing across CPUs for more efficient task distribution.
154/// 2. Use a more generic load balancing algorithm that can be customized or replaced.
155///
156#[inline]
157pub(crate) fn select_run_queue<G: BaseGuard>(task: &AxTaskRef) -> AxRunQueueRef<'static, G> {
158    let irq_state = G::acquire();
159    #[cfg(not(feature = "smp"))]
160    {
161        let _ = task;
162        // When SMP is disabled, all tasks are scheduled on the same global run queue.
163        AxRunQueueRef {
164            inner: unsafe { RUN_QUEUE.current_ref_mut_raw() },
165            state: irq_state,
166            _phantom: core::marker::PhantomData,
167        }
168    }
169    #[cfg(feature = "smp")]
170    {
171        // When SMP is enabled, select the run queue based on the task's CPU affinity and load balance.
172        let index = select_run_queue_index(task.cpumask());
173        AxRunQueueRef {
174            inner: get_run_queue(index),
175            state: irq_state,
176            _phantom: core::marker::PhantomData,
177        }
178    }
179}
180
181/// [`AxRunQueue`] represents a run queue for global system or a specific CPU.
182pub(crate) struct AxRunQueue {
183    /// The ID of the CPU this run queue is associated with.
184    cpu_id: usize,
185    /// The core scheduler of this run queue.
186    /// Since irq and preempt are preserved by the kernel guard hold by `AxRunQueueRef`,
187    /// we just use a simple raw spin lock here.
188    scheduler: SpinRaw<Scheduler>,
189}
190
191/// A reference to the run queue with specific guard.
192///
193/// Note:
194/// [`AxRunQueueRef`] is used to get a reference to the run queue on current CPU
195/// or a remote CPU, which is used to add tasks to the run queue or unblock tasks.
196/// If you want to perform scheduling operations on the current run queue,
197/// see [`CurrentRunQueueRef`].
198pub(crate) struct AxRunQueueRef<'a, G: BaseGuard> {
199    inner: &'a mut AxRunQueue,
200    state: G::State,
201    _phantom: core::marker::PhantomData<G>,
202}
203
204impl<G: BaseGuard> Drop for AxRunQueueRef<'_, G> {
205    fn drop(&mut self) {
206        G::release(self.state);
207    }
208}
209
210/// A reference to the current run queue with specific guard.
211///
212/// Note:
213/// [`CurrentRunQueueRef`] is used to get a reference to the run queue on current CPU,
214/// in which scheduling operations can be performed.
215pub(crate) struct CurrentRunQueueRef<'a, G: BaseGuard> {
216    inner: &'a mut AxRunQueue,
217    current_task: CurrentTask,
218    state: G::State,
219    _phantom: core::marker::PhantomData<G>,
220}
221
222impl<G: BaseGuard> Drop for CurrentRunQueueRef<'_, G> {
223    fn drop(&mut self) {
224        G::release(self.state);
225    }
226}
227
228/// Management operations for run queue, including adding tasks, unblocking tasks, etc.
229impl<G: BaseGuard> AxRunQueueRef<'_, G> {
230    /// Adds a task to the scheduler.
231    ///
232    /// This function is used to add a new task to the scheduler.
233    pub fn add_task(&mut self, task: AxTaskRef) {
234        debug!(
235            "task add: {} on run_queue {}",
236            task.id_name(),
237            self.inner.cpu_id
238        );
239        assert!(task.is_ready());
240        self.inner.scheduler.lock().add_task(task);
241    }
242
243    /// Unblock one task by inserting it into the run queue.
244    ///
245    /// This function does nothing if the task is not in [`TaskState::Blocked`],
246    /// which means the task is already unblocked by other cores.
247    pub fn unblock_task(&mut self, task: AxTaskRef, resched: bool) {
248        let task_id_name = task.id_name();
249        // Try to change the state of the task from `Blocked` to `Ready`,
250        // if successful, the task will be put into this run queue,
251        // otherwise, the task is already unblocked by other cores.
252        // Note:
253        // target task can not be insert into the run queue until it finishes its scheduling process.
254        if self
255            .inner
256            .put_task_with_state(task, TaskState::Blocked, resched)
257        {
258            // Since now, the task to be unblocked is in the `Ready` state.
259            let cpu_id = self.inner.cpu_id;
260            debug!("task unblock: {} on run_queue {}", task_id_name, cpu_id);
261            // Note: when the task is unblocked on another CPU's run queue,
262            // we just ingiore the `resched` flag.
263            if resched && cpu_id == this_cpu_id() {
264                #[cfg(feature = "preempt")]
265                crate::current().set_preempt_pending(true);
266            }
267        }
268    }
269}
270
271/// Core functions of run queue.
272impl<G: BaseGuard> CurrentRunQueueRef<'_, G> {
273    #[cfg(feature = "irq")]
274    pub fn scheduler_timer_tick(&mut self) {
275        let curr = &self.current_task;
276        if !curr.is_idle() && self.inner.scheduler.lock().task_tick(curr.as_task_ref()) {
277            #[cfg(feature = "preempt")]
278            curr.set_preempt_pending(true);
279        }
280    }
281
282    /// Yield the current task and reschedule.
283    /// This function will put the current task into this run queue with `Ready` state,
284    /// and reschedule to the next task on this run queue.
285    pub fn yield_current(&mut self) {
286        let curr = &self.current_task;
287        trace!("task yield: {}", curr.id_name());
288        assert!(curr.is_running());
289
290        self.inner
291            .put_task_with_state(curr.clone(), TaskState::Running, false);
292
293        self.inner.resched();
294    }
295
296    /// Migrate the current task to a new run queue matching its CPU affinity and reschedule.
297    /// This function will spawn a new `migration_task` to perform the migration, which will set
298    /// current task to `Ready` state and select a proper run queue for it according to its CPU affinity,
299    /// switch to the migration task immediately after migration task is prepared.
300    ///
301    /// Note: the ownership if migrating task (which is current task) is handed over to the migration task,
302    /// before the migration task inserted it into the target run queue.
303    #[cfg(feature = "smp")]
304    pub fn migrate_current(&mut self, migration_task: AxTaskRef) {
305        let curr = &self.current_task;
306        trace!("task migrate: {}", curr.id_name());
307        assert!(curr.is_running());
308
309        // Mark current task's state as `Ready`,
310        // but, do not put current task to the scheduler of this run queue.
311        curr.set_state(TaskState::Ready);
312
313        // Call `switch_to` to reschedule to the migration task that performs the migration directly.
314        self.inner.switch_to(crate::current(), migration_task);
315    }
316
317    /// Preempts the current task and reschedules.
318    /// This function is used to preempt the current task and reschedule
319    /// to next task on current run queue.
320    ///
321    /// This function is called by `current_check_preempt_pending` with IRQs and preemption disabled.
322    ///
323    /// Note:
324    /// preemption may happened in `enable_preempt`, which is called
325    /// each time a [`kspin::NoPreemptGuard`] is dropped.
326    #[cfg(feature = "preempt")]
327    pub fn preempt_resched(&mut self) {
328        // There is no need to disable IRQ and preemption here, because
329        // they both have been disabled in `current_check_preempt_pending`.
330        let curr = &self.current_task;
331        assert!(curr.is_running());
332
333        // When we call `preempt_resched()`, both IRQs and preemption must
334        // have been disabled by `kernel_guard::NoPreemptIrqSave`. So we need
335        // to set `current_disable_count` to 1 in `can_preempt()` to obtain
336        // the preemption permission.
337        let can_preempt = curr.can_preempt(1);
338
339        debug!(
340            "current task is to be preempted: {}, allow={}",
341            curr.id_name(),
342            can_preempt
343        );
344        if can_preempt {
345            self.inner
346                .put_task_with_state(curr.clone(), TaskState::Running, true);
347            self.inner.resched();
348        } else {
349            curr.set_preempt_pending(true);
350        }
351    }
352
353    /// Exit the current task with the specified exit code.
354    /// This function will never return.
355    pub fn exit_current(&mut self, exit_code: i32) -> ! {
356        let curr = &self.current_task;
357        debug!("task exit: {}, exit_code={}", curr.id_name(), exit_code);
358        assert!(curr.is_running(), "task is not running: {:?}", curr.state());
359        assert!(!curr.is_idle());
360        if curr.is_init() {
361            // Safety: it is called from `current_run_queue::<NoPreemptIrqSave>().exit_current(exit_code)`,
362            // which disabled IRQs and preemption.
363            unsafe {
364                EXITED_TASKS.current_ref_mut_raw().clear();
365            }
366            axhal::misc::terminate();
367        } else {
368            curr.set_state(TaskState::Exited);
369
370            // Notify the joiner task.
371            curr.notify_exit(exit_code);
372
373            // Safety: it is called from `current_run_queue::<NoPreemptIrqSave>().exit_current(exit_code)`,
374            // which disabled IRQs and preemption.
375            unsafe {
376                // Push current task to the `EXITED_TASKS` list, which will be consumed by the GC task.
377                EXITED_TASKS.current_ref_mut_raw().push_back(curr.clone());
378                // Wake up the GC task to drop the exited tasks.
379                WAIT_FOR_EXIT.current_ref_mut_raw().notify_one(false);
380            }
381
382            // Schedule to next task.
383            self.inner.resched();
384        }
385        unreachable!("task exited!");
386    }
387
388    /// Block the current task, put current task into the wait queue and reschedule.
389    /// Mark the state of current task as `Blocked`, set the `in_wait_queue` flag as true.
390    /// Note:
391    ///     1. The caller must hold the lock of the wait queue.
392    ///     2. The caller must ensure that the current task is in the running state.
393    ///     3. The caller must ensure that the current task is not the idle task.
394    ///     4. The lock of the wait queue will be released explicitly after current task is pushed into it.
395    pub fn blocked_resched(&mut self, mut wq_guard: WaitQueueGuard) {
396        let curr = &self.current_task;
397        assert!(curr.is_running());
398        assert!(!curr.is_idle());
399        // we must not block current task with preemption disabled.
400        // Current expected preempt count is 2.
401        // 1 for `NoPreemptIrqSave`, 1 for wait queue's `SpinNoIrq`.
402        #[cfg(feature = "preempt")]
403        assert!(curr.can_preempt(2));
404
405        // Mark the task as blocked, this has to be done before adding it to the wait queue
406        // while holding the lock of the wait queue.
407        curr.set_state(TaskState::Blocked);
408        curr.set_in_wait_queue(true);
409
410        wq_guard.push_back(curr.clone());
411        // Drop the lock of wait queue explictly.
412        drop(wq_guard);
413
414        // Current task's state has been changed to `Blocked` and added to the wait queue.
415        // Note that the state may have been set as `Ready` in `unblock_task()`,
416        // see `unblock_task()` for details.
417
418        debug!("task block: {}", curr.id_name());
419        self.inner.resched();
420    }
421
422    #[cfg(feature = "irq")]
423    pub fn sleep_until(&mut self, deadline: axhal::time::TimeValue) {
424        let curr = &self.current_task;
425        debug!("task sleep: {}, deadline={:?}", curr.id_name(), deadline);
426        assert!(curr.is_running());
427        assert!(!curr.is_idle());
428
429        let now = axhal::time::wall_time();
430        if now < deadline {
431            crate::timers::set_alarm_wakeup(deadline, curr.clone());
432            curr.set_state(TaskState::Blocked);
433            self.inner.resched();
434        }
435    }
436
437    pub fn set_current_priority(&mut self, prio: isize) -> bool {
438        self.inner
439            .scheduler
440            .lock()
441            .set_priority(self.current_task.as_task_ref(), prio)
442    }
443}
444
445impl AxRunQueue {
446    /// Create a new run queue for the specified CPU.
447    /// The run queue is initialized with a per-CPU gc task in its scheduler.
448    fn new(cpu_id: usize) -> Self {
449        let gc_task = TaskInner::new(gc_entry, "gc".into(), axconfig::TASK_STACK_SIZE).into_arc();
450        // gc task should be pinned to the current CPU.
451        gc_task.set_cpumask(AxCpuMask::one_shot(cpu_id));
452
453        let mut scheduler = Scheduler::new();
454        scheduler.add_task(gc_task);
455        Self {
456            cpu_id,
457            scheduler: SpinRaw::new(scheduler),
458        }
459    }
460
461    /// Puts target task into current run queue with `Ready` state
462    /// if its state matches `current_state` (except idle task).
463    ///
464    /// If `preempt`, keep current task's time slice, otherwise reset it.
465    ///
466    /// Returns `true` if the target task is put into this run queue successfully,
467    /// otherwise `false`.
468    fn put_task_with_state(
469        &mut self,
470        task: AxTaskRef,
471        current_state: TaskState,
472        preempt: bool,
473    ) -> bool {
474        // If the task's state matches `current_state`, set its state to `Ready` and
475        // put it back to the run queue (except idle task).
476        if task.transition_state(current_state, TaskState::Ready) && !task.is_idle() {
477            // If the task is blocked, wait for the task to finish its scheduling process.
478            // See `unblock_task()` for details.
479            if current_state == TaskState::Blocked {
480                // Wait for next task's scheduling process to complete.
481                // If the owning (remote) CPU is still in the middle of schedule() with
482                // this task (next task) as prev, wait until it's done referencing the task.
483                //
484                // Pairs with the `clear_prev_task_on_cpu()`.
485                //
486                // Note:
487                // 1. This should be placed after the judgement of `TaskState::Blocked,`,
488                //    because the task may have been woken up by other cores.
489                // 2. This can be placed in the front of `switch_to()`
490                #[cfg(feature = "smp")]
491                while task.on_cpu() {
492                    // Wait for the task to finish its scheduling process.
493                    core::hint::spin_loop();
494                }
495            }
496            // TODO: priority
497            self.scheduler.lock().put_prev_task(task, preempt);
498            true
499        } else {
500            false
501        }
502    }
503
504    /// Core reschedule subroutine.
505    /// Pick the next task to run and switch to it.
506    fn resched(&mut self) {
507        let next = self
508            .scheduler
509            .lock()
510            .pick_next_task()
511            .unwrap_or_else(|| unsafe {
512                // Safety: IRQs must be disabled at this time.
513                IDLE_TASK.current_ref_raw().get_unchecked().clone()
514            });
515        assert!(
516            next.is_ready(),
517            "next {} is not ready: {:?}",
518            next.id_name(),
519            next.state()
520        );
521        self.switch_to(crate::current(), next);
522    }
523
524    fn switch_to(&mut self, prev_task: CurrentTask, next_task: AxTaskRef) {
525        // Make sure that IRQs are disabled by kernel guard or other means.
526        #[cfg(all(not(test), feature = "irq"))] // Note: irq is faked under unit tests.
527        assert!(
528            !axhal::arch::irqs_enabled(),
529            "IRQs must be disabled during scheduling"
530        );
531        trace!(
532            "context switch: {} -> {}",
533            prev_task.id_name(),
534            next_task.id_name()
535        );
536        #[cfg(feature = "preempt")]
537        next_task.set_preempt_pending(false);
538        next_task.set_state(TaskState::Running);
539        if prev_task.ptr_eq(&next_task) {
540            return;
541        }
542
543        // Claim the task as running, we do this before switching to it
544        // such that any running task will have this set.
545        #[cfg(feature = "smp")]
546        next_task.set_on_cpu(true);
547
548        unsafe {
549            let prev_ctx_ptr = prev_task.ctx_mut_ptr();
550            let next_ctx_ptr = next_task.ctx_mut_ptr();
551
552            // Store the weak pointer of **prev_task** in percpu variable `PREV_TASK`.
553            #[cfg(feature = "smp")]
554            {
555                *PREV_TASK.current_ref_mut_raw() = Arc::downgrade(prev_task.as_task_ref());
556            }
557
558            // The strong reference count of `prev_task` will be decremented by 1,
559            // but won't be dropped until `gc_entry()` is called.
560            assert!(Arc::strong_count(prev_task.as_task_ref()) > 1);
561            assert!(Arc::strong_count(&next_task) >= 1);
562
563            CurrentTask::set_current(prev_task, next_task);
564
565            (*prev_ctx_ptr).switch_to(&*next_ctx_ptr);
566
567            // Current it's **next_task** running on this CPU, clear the `prev_task`'s `on_cpu` field
568            // to indicate that it has finished its scheduling process and no longer running on this CPU.
569            #[cfg(feature = "smp")]
570            clear_prev_task_on_cpu();
571        }
572    }
573}
574
575fn gc_entry() {
576    loop {
577        // Drop all exited tasks and recycle resources.
578        let n = EXITED_TASKS.with_current(|exited_tasks| exited_tasks.len());
579        for _ in 0..n {
580            // Do not do the slow drops in the critical section.
581            let task = EXITED_TASKS.with_current(|exited_tasks| exited_tasks.pop_front());
582            if let Some(task) = task {
583                if Arc::strong_count(&task) == 1 {
584                    // If I'm the last holder of the task, drop it immediately.
585                    drop(task);
586                } else {
587                    // Otherwise (e.g, `switch_to` is not compeleted, held by the
588                    // joiner, etc), push it back and wait for them to drop first.
589                    EXITED_TASKS.with_current(|exited_tasks| exited_tasks.push_back(task));
590                }
591            }
592        }
593        // Note: we cannot block current task with preemption disabled,
594        // use `current_ref_raw` to get the `WAIT_FOR_EXIT`'s reference here to avoid the use of `NoPreemptGuard`.
595        // Since gc task is pinned to the current CPU, there is no affection if the gc task is preempted during the process.
596        unsafe { WAIT_FOR_EXIT.current_ref_raw() }.wait();
597    }
598}
599
600/// The task routine for migrating the current task to the correct CPU.
601///
602/// It calls `select_run_queue` to get the correct run queue for the task, and
603/// then puts the task to the scheduler of target run queue.
604#[cfg(feature = "smp")]
605pub(crate) fn migrate_entry(migrated_task: AxTaskRef) {
606    select_run_queue::<kernel_guard::NoPreemptIrqSave>(&migrated_task)
607        .inner
608        .scheduler
609        .lock()
610        .put_prev_task(migrated_task, false)
611}
612
613/// Clear the `on_cpu` field of previous task running on this CPU.
614#[cfg(feature = "smp")]
615pub(crate) unsafe fn clear_prev_task_on_cpu() {
616    unsafe {
617        PREV_TASK
618            .current_ref_raw()
619            .upgrade()
620            .expect("Invalid prev_task pointer or prev_task has been dropped")
621            .set_on_cpu(false);
622    }
623}
624pub(crate) fn init() {
625    let cpu_id = this_cpu_id();
626
627    // Create the `idle` task (not current task).
628    const IDLE_TASK_STACK_SIZE: usize = 4096;
629    let idle_task = TaskInner::new(|| crate::run_idle(), "idle".into(), IDLE_TASK_STACK_SIZE);
630    // idle task should be pinned to the current CPU.
631    idle_task.set_cpumask(AxCpuMask::one_shot(cpu_id));
632    IDLE_TASK.with_current(|i| {
633        i.init_once(idle_task.into_arc());
634    });
635
636    // Put the subsequent execution into the `main` task.
637    let main_task = TaskInner::new_init("main".into()).into_arc();
638    main_task.set_state(TaskState::Running);
639    unsafe { CurrentTask::init_current(main_task) }
640
641    RUN_QUEUE.with_current(|rq| {
642        rq.init_once(AxRunQueue::new(cpu_id));
643    });
644    unsafe {
645        RUN_QUEUES[cpu_id].write(RUN_QUEUE.current_ref_mut_raw());
646    }
647}
648
649pub(crate) fn init_secondary() {
650    let cpu_id = this_cpu_id();
651
652    // Put the subsequent execution into the `idle` task.
653    let idle_task = TaskInner::new_init("idle".into()).into_arc();
654    idle_task.set_state(TaskState::Running);
655    IDLE_TASK.with_current(|i| {
656        i.init_once(idle_task.clone());
657    });
658    unsafe { CurrentTask::init_current(idle_task) }
659
660    RUN_QUEUE.with_current(|rq| {
661        rq.init_once(AxRunQueue::new(cpu_id));
662    });
663    unsafe {
664        RUN_QUEUES[cpu_id].write(RUN_QUEUE.current_ref_mut_raw());
665    }
666}