use core::{
alloc::Layout,
cell::RefCell,
sync::atomic::{AtomicUsize, Ordering},
};
use alloc::{
string::String,
sync::{Arc, Weak},
};
use axhal::{
arch::UspaceContext,
time::{NANOS_PER_MICROS, NANOS_PER_SEC, monotonic_time_nanos},
};
use axmm::{AddrSpace, kernel_aspace};
use axns::{AxNamespace, AxNamespaceIf};
use axprocess::{Pid, Process, ProcessGroup, Session, Thread};
use axsync::Mutex;
use axtask::{TaskExtRef, TaskInner, current};
use memory_addr::VirtAddrRange;
use spin::{Once, RwLock};
use weak_map::WeakMap;
use crate::time::TimeStat;
pub fn new_user_task(
name: &str,
uctx: UspaceContext,
set_child_tid: Option<&'static mut Pid>,
) -> TaskInner {
TaskInner::new(
move || {
let curr = axtask::current();
if let Some(tid) = set_child_tid {
*tid = curr.id().as_u64() as Pid;
}
let kstack_top = curr.kernel_stack_top().unwrap();
info!(
"Enter user space: entry={:#x}, ustack={:#x}, kstack={:#x}",
uctx.ip(),
uctx.sp(),
kstack_top,
);
unsafe { uctx.enter_uspace(kstack_top) }
},
name.into(),
axconfig::plat::KERNEL_STACK_SIZE,
)
}
pub struct TaskExt {
pub time: RefCell<TimeStat>,
pub thread: Arc<Thread>,
}
impl TaskExt {
pub fn new(thread: Arc<Thread>) -> Self {
Self {
time: RefCell::new(TimeStat::new()),
thread,
}
}
pub(crate) fn time_stat_from_kernel_to_user(&self, current_tick: usize) {
self.time.borrow_mut().switch_into_user_mode(current_tick);
}
pub(crate) fn time_stat_from_user_to_kernel(&self, current_tick: usize) {
self.time.borrow_mut().switch_into_kernel_mode(current_tick);
}
pub(crate) fn time_stat_output(&self) -> (usize, usize) {
self.time.borrow().output()
}
pub fn thread_data(&self) -> &ThreadData {
self.thread.data().unwrap()
}
pub fn process_data(&self) -> &ProcessData {
self.thread.process().data().unwrap()
}
}
axtask::def_task_ext!(TaskExt);
pub fn time_stat_from_kernel_to_user() {
let curr_task = current();
curr_task
.task_ext()
.time_stat_from_kernel_to_user(monotonic_time_nanos() as usize);
}
pub fn time_stat_from_user_to_kernel() {
let curr_task = current();
curr_task
.task_ext()
.time_stat_from_user_to_kernel(monotonic_time_nanos() as usize);
}
pub fn time_stat_output() -> (usize, usize, usize, usize) {
let curr_task = current();
let (utime_ns, stime_ns) = curr_task.task_ext().time_stat_output();
(
utime_ns / NANOS_PER_SEC as usize,
utime_ns / NANOS_PER_MICROS as usize,
stime_ns / NANOS_PER_SEC as usize,
stime_ns / NANOS_PER_MICROS as usize,
)
}
pub struct ThreadData {
pub clear_child_tid: AtomicUsize,
}
impl ThreadData {
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
Self {
clear_child_tid: AtomicUsize::new(0),
}
}
pub fn clear_child_tid(&self) -> usize {
self.clear_child_tid.load(Ordering::Relaxed)
}
pub fn set_clear_child_tid(&self, clear_child_tid: usize) {
self.clear_child_tid
.store(clear_child_tid, Ordering::Relaxed);
}
}
pub struct ProcessData {
pub exe_path: RwLock<String>,
pub aspace: Arc<Mutex<AddrSpace>>,
pub ns: AxNamespace,
heap_bottom: AtomicUsize,
heap_top: AtomicUsize,
}
impl ProcessData {
pub fn new(exe_path: String, aspace: Arc<Mutex<AddrSpace>>) -> Self {
Self {
exe_path: RwLock::new(exe_path),
aspace,
ns: AxNamespace::new_thread_local(),
heap_bottom: AtomicUsize::new(axconfig::plat::USER_HEAP_BASE),
heap_top: AtomicUsize::new(axconfig::plat::USER_HEAP_BASE),
}
}
pub fn get_heap_bottom(&self) -> usize {
self.heap_bottom.load(Ordering::Acquire)
}
pub fn set_heap_bottom(&self, bottom: usize) {
self.heap_bottom.store(bottom, Ordering::Release)
}
pub fn get_heap_top(&self) -> usize {
self.heap_top.load(Ordering::Acquire)
}
pub fn set_heap_top(&self, top: usize) {
self.heap_top.store(top, Ordering::Release)
}
}
impl Drop for ProcessData {
fn drop(&mut self) {
if !cfg!(target_arch = "aarch64") && !cfg!(target_arch = "loongarch64") {
let kernel = kernel_aspace().lock();
self.aspace
.lock()
.clear_mappings(VirtAddrRange::from_start_size(kernel.base(), kernel.size()));
}
}
}
struct AxNamespaceImpl;
#[crate_interface::impl_interface]
impl AxNamespaceIf for AxNamespaceImpl {
fn current_namespace_base() -> *mut u8 {
static KERNEL_NS_BASE: Once<usize> = Once::new();
let current = axtask::current();
if unsafe { current.task_ext_ptr() }.is_null() {
return *(KERNEL_NS_BASE.call_once(|| {
let global_ns = AxNamespace::global();
let layout = Layout::from_size_align(global_ns.size(), 64).unwrap();
let dst = unsafe { alloc::alloc::alloc(layout) };
let src = global_ns.base();
unsafe { core::ptr::copy_nonoverlapping(src, dst, global_ns.size()) };
dst as usize
})) as *mut u8;
}
current.task_ext().process_data().ns.base()
}
}
static THREAD_TABLE: RwLock<WeakMap<Pid, Weak<Thread>>> = RwLock::new(WeakMap::new());
static PROCESS_TABLE: RwLock<WeakMap<Pid, Weak<Process>>> = RwLock::new(WeakMap::new());
static PROCESS_GROUP_TABLE: RwLock<WeakMap<Pid, Weak<ProcessGroup>>> = RwLock::new(WeakMap::new());
static SESSION_TABLE: RwLock<WeakMap<Pid, Weak<Session>>> = RwLock::new(WeakMap::new());
pub fn add_thread_to_table(thread: &Arc<Thread>) {
let mut thread_table = THREAD_TABLE.write();
thread_table.insert(thread.tid(), thread);
let mut process_table = PROCESS_TABLE.write();
let process = thread.process();
if process_table.contains_key(&process.pid()) {
return;
}
process_table.insert(process.pid(), process);
let mut process_group_table = PROCESS_GROUP_TABLE.write();
let process_group = process.group();
if process_group_table.contains_key(&process_group.pgid()) {
return;
}
process_group_table.insert(process_group.pgid(), &process_group);
let mut session_table = SESSION_TABLE.write();
let session = process_group.session();
if session_table.contains_key(&session.sid()) {
return;
}
session_table.insert(session.sid(), &session);
}