axmm/backend/
mod.rs

1//! Memory mapping backends.
2
3use axhal::paging::{MappingFlags, PageSize, PageTable};
4use memory_addr::VirtAddr;
5use memory_set::MappingBackend;
6mod alloc;
7mod linear;
8
9#[allow(unused_imports)]
10pub(crate) use alloc::{alloc_frame, dealloc_frame};
11
12/// A unified enum type for different memory mapping backends.
13///
14/// Currently, two backends are implemented:
15///
16/// - **Linear**: used for linear mappings. The target physical frames are
17///   contiguous and their addresses should be known when creating the mapping.
18/// - **Allocation**: used in general, or for lazy mappings. The target physical
19///   frames are obtained from the global allocator.
20#[derive(Clone)]
21pub enum Backend {
22    /// Linear mapping backend.
23    ///
24    /// The offset between the virtual address and the physical address is
25    /// constant, which is specified by `pa_va_offset`. For example, the virtual
26    /// address `vaddr` is mapped to the physical address `vaddr - pa_va_offset`.
27    Linear {
28        /// `vaddr - paddr`.
29        pa_va_offset: usize,
30        /// Alignment parameters for the starting address and memory range.
31        align: PageSize,
32    },
33    /// Allocation mapping backend.
34    ///
35    /// If `populate` is `true`, all physical frames are allocated when the
36    /// mapping is created, and no page faults are triggered during the memory
37    /// access. Otherwise, the physical frames are allocated on demand (by
38    /// handling page faults).
39    Alloc {
40        /// Whether to populate the physical frames when creating the mapping.
41        populate: bool,
42        /// Alignment parameters for the starting address and memory range.
43        align: PageSize,
44    },
45}
46
47impl MappingBackend for Backend {
48    type Addr = VirtAddr;
49    type Flags = MappingFlags;
50    type PageTable = PageTable;
51    fn map(&self, start: VirtAddr, size: usize, flags: MappingFlags, pt: &mut PageTable) -> bool {
52        match *self {
53            Self::Linear {
54                pa_va_offset,
55                align: _,
56            } => Self::map_linear(start, size, flags, pt, pa_va_offset),
57            Self::Alloc { populate, align } => {
58                Self::map_alloc(start, size, flags, pt, populate, align)
59            }
60        }
61    }
62
63    fn unmap(&self, start: VirtAddr, size: usize, pt: &mut PageTable) -> bool {
64        match *self {
65            Self::Linear {
66                pa_va_offset,
67                align: _,
68            } => Self::unmap_linear(start, size, pt, pa_va_offset),
69            Self::Alloc { populate, align } => Self::unmap_alloc(start, size, pt, populate, align),
70        }
71    }
72
73    fn protect(
74        &self,
75        start: Self::Addr,
76        size: usize,
77        new_flags: Self::Flags,
78        page_table: &mut Self::PageTable,
79    ) -> bool {
80        page_table
81            .protect_region(start, size, new_flags, true)
82            .map(|tlb| tlb.ignore())
83            .is_ok()
84    }
85}
86
87impl Backend {
88    pub(crate) fn handle_page_fault(
89        &self,
90        vaddr: VirtAddr,
91        orig_flags: MappingFlags,
92        page_table: &mut PageTable,
93    ) -> bool {
94        match *self {
95            Self::Linear { .. } => false, // Linear mappings should not trigger page faults.
96            Self::Alloc { populate, align } => {
97                Self::handle_page_fault_alloc(vaddr, orig_flags, page_table, populate, align)
98            }
99        }
100    }
101}