starry_core/
task.rs

1//! User task management.
2
3use core::{
4    alloc::Layout,
5    cell::RefCell,
6    sync::atomic::{AtomicUsize, Ordering},
7    time::Duration,
8};
9
10use alloc::{
11    string::String,
12    sync::{Arc, Weak},
13    vec::Vec,
14};
15use axerrno::{LinuxError, LinuxResult};
16use axhal::{
17    arch::UspaceContext,
18    time::{NANOS_PER_MICROS, NANOS_PER_SEC, monotonic_time_nanos},
19};
20use axmm::{AddrSpace, kernel_aspace};
21use axns::{AxNamespace, AxNamespaceIf};
22use axprocess::{Pid, Process, ProcessGroup, Session, Thread};
23use axsignal::{
24    Signo,
25    api::{ProcessSignalManager, SignalActions, ThreadSignalManager},
26};
27use axsync::{Mutex, RawMutex};
28use axtask::{TaskExtRef, TaskInner, WaitQueue, current};
29use memory_addr::VirtAddrRange;
30use spin::{Once, RwLock};
31use weak_map::WeakMap;
32
33use crate::{futex::FutexTable, time::TimeStat};
34
35/// Create a new user task.
36pub fn new_user_task(
37    name: &str,
38    uctx: UspaceContext,
39    set_child_tid: Option<&'static mut Pid>,
40) -> TaskInner {
41    TaskInner::new(
42        move || {
43            let curr = axtask::current();
44            if let Some(tid) = set_child_tid {
45                *tid = curr.id().as_u64() as Pid;
46            }
47
48            let kstack_top = curr.kernel_stack_top().unwrap();
49            info!(
50                "Enter user space: entry={:#x}, ustack={:#x}, kstack={:#x}",
51                uctx.ip(),
52                uctx.sp(),
53                kstack_top,
54            );
55            unsafe { uctx.enter_uspace(kstack_top) }
56        },
57        name.into(),
58        axconfig::plat::KERNEL_STACK_SIZE,
59    )
60}
61
62/// Task extended data for the monolithic kernel.
63pub struct TaskExt {
64    /// The time statistics
65    pub time: RefCell<TimeStat>,
66    /// The thread
67    pub thread: Arc<Thread>,
68}
69
70impl TaskExt {
71    /// Create a new [`TaskExt`].
72    pub fn new(thread: Arc<Thread>) -> Self {
73        Self {
74            time: RefCell::new(TimeStat::new()),
75            thread,
76        }
77    }
78
79    pub(crate) fn time_stat_from_kernel_to_user(&self, current_tick: usize) {
80        self.time.borrow_mut().switch_into_user_mode(current_tick);
81    }
82
83    pub(crate) fn time_stat_from_user_to_kernel(&self, current_tick: usize) {
84        self.time.borrow_mut().switch_into_kernel_mode(current_tick);
85    }
86
87    pub(crate) fn time_stat_output(&self) -> (usize, usize) {
88        self.time.borrow().output()
89    }
90
91    /// Get the [`ThreadData`] associated with this task.
92    pub fn thread_data(&self) -> &ThreadData {
93        self.thread.data().unwrap()
94    }
95
96    /// Get the [`ProcessData`] associated with this task.
97    pub fn process_data(&self) -> &ProcessData {
98        self.thread.process().data().unwrap()
99    }
100}
101
102axtask::def_task_ext!(TaskExt);
103
104/// Update the time statistics to reflect a switch from kernel mode to user mode.
105pub fn time_stat_from_kernel_to_user() {
106    let curr_task = current();
107    curr_task
108        .task_ext()
109        .time_stat_from_kernel_to_user(monotonic_time_nanos() as usize);
110}
111
112/// Update the time statistics to reflect a switch from user mode to kernel mode.
113pub fn time_stat_from_user_to_kernel() {
114    let curr_task = current();
115    curr_task
116        .task_ext()
117        .time_stat_from_user_to_kernel(monotonic_time_nanos() as usize);
118}
119
120/// Get the time statistics for the current task.
121pub fn time_stat_output() -> (usize, usize, usize, usize) {
122    let curr_task = current();
123    let (utime_ns, stime_ns) = curr_task.task_ext().time_stat_output();
124    (
125        utime_ns / NANOS_PER_SEC as usize,
126        utime_ns / NANOS_PER_MICROS as usize,
127        stime_ns / NANOS_PER_SEC as usize,
128        stime_ns / NANOS_PER_MICROS as usize,
129    )
130}
131
132#[doc(hidden)]
133pub struct WaitQueueWrapper(WaitQueue);
134impl Default for WaitQueueWrapper {
135    fn default() -> Self {
136        Self(WaitQueue::new())
137    }
138}
139impl axsignal::api::WaitQueue for WaitQueueWrapper {
140    fn wait_timeout(&self, timeout: Option<Duration>) -> bool {
141        if let Some(timeout) = timeout {
142            self.0.wait_timeout(timeout)
143        } else {
144            self.0.wait();
145            true
146        }
147    }
148
149    fn notify_one(&self) -> bool {
150        self.0.notify_one(false)
151    }
152}
153
154/// Extended data for [`Thread`].
155pub struct ThreadData {
156    /// The clear thread tid field
157    ///
158    /// See <https://manpages.debian.org/unstable/manpages-dev/set_tid_address.2.en.html#clear_child_tid>
159    ///
160    /// When the thread exits, the kernel clears the word at this address if it is not NULL.
161    pub clear_child_tid: AtomicUsize,
162
163    /// The thread-level signal manager
164    pub signal: ThreadSignalManager<RawMutex, WaitQueueWrapper>,
165}
166
167impl ThreadData {
168    /// Create a new [`ThreadData`].
169    #[allow(clippy::new_without_default)]
170    pub fn new(proc: &ProcessData) -> Self {
171        Self {
172            clear_child_tid: AtomicUsize::new(0),
173
174            signal: ThreadSignalManager::new(proc.signal.clone()),
175        }
176    }
177
178    /// Get the clear child tid field.
179    pub fn clear_child_tid(&self) -> usize {
180        self.clear_child_tid.load(Ordering::Relaxed)
181    }
182
183    /// Set the clear child tid field.
184    pub fn set_clear_child_tid(&self, clear_child_tid: usize) {
185        self.clear_child_tid
186            .store(clear_child_tid, Ordering::Relaxed);
187    }
188}
189
190/// Extended data for [`Process`].
191pub struct ProcessData {
192    /// The executable path
193    pub exe_path: RwLock<String>,
194    /// The virtual memory address space.
195    pub aspace: Arc<Mutex<AddrSpace>>,
196    /// The resource namespace
197    pub ns: AxNamespace,
198    /// The user heap bottom
199    heap_bottom: AtomicUsize,
200    /// The user heap top
201    heap_top: AtomicUsize,
202
203    /// The child exit wait queue
204    pub child_exit_wq: WaitQueue,
205    /// The exit signal of the thread
206    pub exit_signal: Option<Signo>,
207
208    /// The process signal manager
209    pub signal: Arc<ProcessSignalManager<RawMutex, WaitQueueWrapper>>,
210
211    /// The futex table.
212    pub futex_table: FutexTable,
213}
214
215impl ProcessData {
216    /// Create a new [`ProcessData`].
217    pub fn new(
218        exe_path: String,
219        aspace: Arc<Mutex<AddrSpace>>,
220        signal_actions: Arc<Mutex<SignalActions>>,
221        exit_signal: Option<Signo>,
222    ) -> Self {
223        Self {
224            exe_path: RwLock::new(exe_path),
225            aspace,
226            ns: AxNamespace::new_thread_local(),
227            heap_bottom: AtomicUsize::new(axconfig::plat::USER_HEAP_BASE),
228            heap_top: AtomicUsize::new(axconfig::plat::USER_HEAP_BASE),
229
230            child_exit_wq: WaitQueue::new(),
231            exit_signal,
232
233            signal: Arc::new(ProcessSignalManager::new(
234                signal_actions,
235                axconfig::plat::SIGNAL_TRAMPOLINE,
236            )),
237
238            futex_table: FutexTable::new(),
239        }
240    }
241
242    /// Get the bottom address of the user heap.
243    pub fn get_heap_bottom(&self) -> usize {
244        self.heap_bottom.load(Ordering::Acquire)
245    }
246
247    /// Set the bottom address of the user heap.
248    pub fn set_heap_bottom(&self, bottom: usize) {
249        self.heap_bottom.store(bottom, Ordering::Release)
250    }
251
252    /// Get the top address of the user heap.
253    pub fn get_heap_top(&self) -> usize {
254        self.heap_top.load(Ordering::Acquire)
255    }
256
257    /// Set the top address of the user heap.
258    pub fn set_heap_top(&self, top: usize) {
259        self.heap_top.store(top, Ordering::Release)
260    }
261
262    /// Linux manual: A "clone" child is one which delivers no signal, or a
263    /// signal other than SIGCHLD to its parent upon termination.
264    pub fn is_clone_child(&self) -> bool {
265        self.exit_signal != Some(Signo::SIGCHLD)
266    }
267}
268
269impl Drop for ProcessData {
270    fn drop(&mut self) {
271        if !cfg!(target_arch = "aarch64") && !cfg!(target_arch = "loongarch64") {
272            // See [`crate::new_user_aspace`]
273            let kernel = kernel_aspace().lock();
274            self.aspace
275                .lock()
276                .clear_mappings(VirtAddrRange::from_start_size(kernel.base(), kernel.size()));
277        }
278    }
279}
280
281struct AxNamespaceImpl;
282#[crate_interface::impl_interface]
283impl AxNamespaceIf for AxNamespaceImpl {
284    fn current_namespace_base() -> *mut u8 {
285        // Namespace for kernel task
286        static KERNEL_NS_BASE: Once<usize> = Once::new();
287        let current = axtask::current();
288        // Safety: We only check whether the task extended data is null and do not access it.
289        if unsafe { current.task_ext_ptr() }.is_null() {
290            return *(KERNEL_NS_BASE.call_once(|| {
291                let global_ns = AxNamespace::global();
292                let layout = Layout::from_size_align(global_ns.size(), 64).unwrap();
293                // Safety: The global namespace is a static readonly variable and will not be dropped.
294                let dst = unsafe { alloc::alloc::alloc(layout) };
295                let src = global_ns.base();
296                unsafe { core::ptr::copy_nonoverlapping(src, dst, global_ns.size()) };
297                dst as usize
298            })) as *mut u8;
299        }
300        current.task_ext().process_data().ns.base()
301    }
302}
303
304static THREAD_TABLE: RwLock<WeakMap<Pid, Weak<Thread>>> = RwLock::new(WeakMap::new());
305static PROCESS_TABLE: RwLock<WeakMap<Pid, Weak<Process>>> = RwLock::new(WeakMap::new());
306static PROCESS_GROUP_TABLE: RwLock<WeakMap<Pid, Weak<ProcessGroup>>> = RwLock::new(WeakMap::new());
307static SESSION_TABLE: RwLock<WeakMap<Pid, Weak<Session>>> = RwLock::new(WeakMap::new());
308
309/// Add the thread and possibly its process, process group and session to the
310/// corresponding tables.
311pub fn add_thread_to_table(thread: &Arc<Thread>) {
312    let mut thread_table = THREAD_TABLE.write();
313    thread_table.insert(thread.tid(), thread);
314
315    let mut process_table = PROCESS_TABLE.write();
316    let process = thread.process();
317    if process_table.contains_key(&process.pid()) {
318        return;
319    }
320    process_table.insert(process.pid(), process);
321
322    let mut process_group_table = PROCESS_GROUP_TABLE.write();
323    let process_group = process.group();
324    if process_group_table.contains_key(&process_group.pgid()) {
325        return;
326    }
327    process_group_table.insert(process_group.pgid(), &process_group);
328
329    let mut session_table = SESSION_TABLE.write();
330    let session = process_group.session();
331    if session_table.contains_key(&session.sid()) {
332        return;
333    }
334    session_table.insert(session.sid(), &session);
335}
336
337/// Lists all processes.
338pub fn processes() -> Vec<Arc<Process>> {
339    PROCESS_TABLE.read().values().collect()
340}
341
342/// Finds the thread with the given TID.
343pub fn get_thread(tid: Pid) -> LinuxResult<Arc<Thread>> {
344    THREAD_TABLE.read().get(&tid).ok_or(LinuxError::ESRCH)
345}
346/// Finds the process with the given PID.
347pub fn get_process(pid: Pid) -> LinuxResult<Arc<Process>> {
348    PROCESS_TABLE.read().get(&pid).ok_or(LinuxError::ESRCH)
349}
350/// Finds the process group with the given PGID.
351pub fn get_process_group(pgid: Pid) -> LinuxResult<Arc<ProcessGroup>> {
352    PROCESS_GROUP_TABLE
353        .read()
354        .get(&pgid)
355        .ok_or(LinuxError::ESRCH)
356}
357/// Finds the session with the given SID.
358pub fn get_session(sid: Pid) -> LinuxResult<Arc<Session>> {
359    SESSION_TABLE.read().get(&sid).ok_or(LinuxError::ESRCH)
360}