axmm/backend/
alloc.rs

1use crate::page_iter_wrapper::{PAGE_SIZE_4K, PageIterWrapper};
2use axalloc::global_allocator;
3use axhal::mem::{phys_to_virt, virt_to_phys};
4use axhal::paging::{MappingFlags, PageSize, PageTable};
5use memory_addr::{PhysAddr, VirtAddr};
6
7#[cfg(feature = "cow")]
8use crate::frameinfo::frame_table;
9
10use super::Backend;
11
12/// Allocates a physical frame, with an option to zero it out.
13///
14/// This function allocates physical memory with the specified alignment and
15/// returns the corresponding physical address. If allocation fails, it returns `None`.
16///
17/// # Parameters
18/// - `zeroed`: If `true`, the allocated memory will be zero-initialized.
19/// - `align`: Alignment requirement for the allocated memory, must be a multiple of 4KiB.
20///
21/// # Returns
22/// - `Some(PhysAddr)`: The physical address if the allocation is successful.
23/// - `None`: Returned if the memory allocation fails.
24///
25/// # Notes
26/// - This function uses the global memory allocator to allocate memory, with the size
27///   determined by the `align` parameter (in page units).
28/// - If `zeroed` is `true`, the function uses `unsafe` operations to zero out the memory.
29/// - The allocated memory must be accessed via its physical address, which requires
30///   conversion using `virt_to_phys`.
31pub(crate) fn alloc_frame(zeroed: bool, align: PageSize) -> Option<PhysAddr> {
32    let page_size: usize = align.into();
33    let num_pages = page_size / PAGE_SIZE_4K;
34    let vaddr = VirtAddr::from(global_allocator().alloc_pages(num_pages, page_size).ok()?);
35    if zeroed {
36        unsafe { core::ptr::write_bytes(vaddr.as_mut_ptr(), 0, page_size) };
37    }
38    let paddr = virt_to_phys(vaddr);
39
40    #[cfg(feature = "cow")]
41    frame_table().inc_ref(paddr);
42
43    Some(paddr)
44}
45
46/// Frees a physical frame of memory with the specified alignment.
47///
48/// This function converts the given physical address to a virtual address,
49/// and then frees the corresponding memory pages using the global memory allocator.
50/// The size of the memory to be freed is determined by the `align` parameter,
51/// which must be a multiple of 4KiB.
52///
53/// If `cow` feature is enabled, this function decreases the reference count associated with the frame.
54/// When the reference count reaches 1, it actually frees the frame memory.
55///
56/// # Parameters
57/// - `frame`: The physical address of the memory to be freed.
58/// - `align`: The alignment requirement for the memory, must be a multiple of 4KiB.
59///
60/// # Notes
61/// - This function assumes that the provided `frame` was allocated using `alloc_frame`,
62///   otherwise undefined behavior may occur.
63/// - If the deallocation fails, the function will call `panic!`. Details about
64///   the failure can be obtained from the global memory allocator’s error messages.
65pub(crate) fn dealloc_frame(frame: PhysAddr, align: PageSize) {
66    #[cfg(feature = "cow")]
67    if frame_table().dec_ref(frame) > 1 {
68        return;
69    }
70
71    let vaddr = phys_to_virt(frame);
72    let page_size: usize = align.into();
73    let num_pages = page_size / PAGE_SIZE_4K;
74    global_allocator().dealloc_pages(vaddr.as_usize(), num_pages);
75}
76
77impl Backend {
78    /// Creates a new allocation mapping backend.
79    pub const fn new_alloc(populate: bool, align: PageSize) -> Self {
80        Self::Alloc { populate, align }
81    }
82
83    pub(crate) fn map_alloc(
84        start: VirtAddr,
85        size: usize,
86        flags: MappingFlags,
87        pt: &mut PageTable,
88        populate: bool,
89        align: PageSize,
90    ) -> bool {
91        debug!(
92            "map_alloc: [{:#x}, {:#x}) {:?} (populate={})",
93            start,
94            start + size,
95            flags,
96            populate
97        );
98        if populate {
99            // allocate all possible physical frames for populated mapping.
100            if let Some(iter) = PageIterWrapper::new(start, start + size, align) {
101                for addr in iter {
102                    if let Some(frame) = alloc_frame(true, align) {
103                        if let Ok(tlb) = pt.map(addr, frame, align, flags) {
104                            tlb.ignore(); // TLB flush on map is unnecessary, as there are no outdated mappings.
105                        } else {
106                            return false;
107                        }
108                    }
109                }
110            }
111        } else {
112            // create mapping entries on demand later in `handle_page_fault_alloc`.
113        }
114        true
115    }
116
117    pub(crate) fn unmap_alloc(
118        start: VirtAddr,
119        size: usize,
120        pt: &mut PageTable,
121        _populate: bool,
122        align: PageSize,
123    ) -> bool {
124        debug!("unmap_alloc: [{:#x}, {:#x})", start, start + size);
125        if let Some(iter) = PageIterWrapper::new(start, start + size, align) {
126            for addr in iter {
127                if let Ok((frame, _page_size, tlb)) = pt.unmap(addr) {
128                    // Deallocate the physical frame if there is a mapping in the
129                    // page table.
130                    tlb.flush();
131                    dealloc_frame(frame, align);
132                } else {
133                    // Deallocation is needn't if the page is not mapped.
134                }
135            }
136        }
137        true
138    }
139
140    pub(crate) fn handle_page_fault_alloc(
141        vaddr: VirtAddr,
142        orig_flags: MappingFlags,
143        pt: &mut PageTable,
144        populate: bool,
145        align: PageSize,
146    ) -> bool {
147        if populate {
148            false // Populated mappings should not trigger page faults.
149        } else if let Some(frame) = alloc_frame(true, align) {
150            // Allocate a physical frame lazily and map it to the fault address.
151            // `vaddr` does not need to be aligned. It will be automatically
152            // aligned during `pt.map` regardless of the page size.
153            pt.map(vaddr, frame, align, orig_flags)
154                .map(|tlb| tlb.flush())
155                .is_ok()
156        } else {
157            false
158        }
159    }
160}