axmm/
aspace.rs

1use core::fmt;
2
3use axerrno::{AxError, AxResult, ax_err};
4use axhal::mem::phys_to_virt;
5use axhal::paging::{MappingFlags, PageSize, PageTable, PagingError};
6use memory_addr::{MemoryAddr, PhysAddr, VirtAddr, VirtAddrRange, is_aligned};
7use memory_set::{MemoryArea, MemorySet};
8
9use crate::backend::Backend;
10use crate::mapping_err_to_ax_err;
11use crate::page_iter_wrapper::{PAGE_SIZE_4K, PageIterWrapper};
12
13#[cfg(feature = "cow")]
14use crate::backend::{alloc_frame, dealloc_frame};
15#[cfg(feature = "cow")]
16use crate::frameinfo::frame_table;
17
18/// The virtual memory address space.
19pub struct AddrSpace {
20    va_range: VirtAddrRange,
21    areas: MemorySet<Backend>,
22    pt: PageTable,
23}
24
25impl AddrSpace {
26    /// Returns the address space base.
27    pub const fn base(&self) -> VirtAddr {
28        self.va_range.start
29    }
30
31    /// Returns the address space end.
32    pub const fn end(&self) -> VirtAddr {
33        self.va_range.end
34    }
35
36    /// Returns the address space size.
37    pub fn size(&self) -> usize {
38        self.va_range.size()
39    }
40
41    /// Returns the reference to the inner page table.
42    pub const fn page_table(&self) -> &PageTable {
43        &self.pt
44    }
45
46    /// Returns the root physical address of the inner page table.
47    pub const fn page_table_root(&self) -> PhysAddr {
48        self.pt.root_paddr()
49    }
50
51    /// Checks if the address space contains the given address range.
52    pub fn contains_range(&self, start: VirtAddr, size: usize) -> bool {
53        self.va_range
54            .contains_range(VirtAddrRange::from_start_size(start, size))
55    }
56
57    /// Creates a new empty address space.
58    pub fn new_empty(base: VirtAddr, size: usize) -> AxResult<Self> {
59        Ok(Self {
60            va_range: VirtAddrRange::from_start_size(base, size),
61            areas: MemorySet::new(),
62            pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?,
63        })
64    }
65
66    /// Copies page table mappings from another address space.
67    ///
68    /// It copies the page table entries only rather than the memory regions,
69    /// usually used to copy a portion of the kernel space mapping to the
70    /// user space.
71    ///
72    /// Note that on dropping, the copied PTEs will also be cleared, which could
73    /// taint the original page table. For workaround, you can use
74    /// [`AddrSpace::clear_mappings`].
75    ///
76    /// Returns an error if the two address spaces overlap.
77    pub fn copy_mappings_from(&mut self, other: &AddrSpace) -> AxResult {
78        if self.va_range.overlaps(other.va_range) {
79            return ax_err!(InvalidInput, "address space overlap");
80        }
81        self.pt.copy_from(&other.pt, other.base(), other.size());
82        Ok(())
83    }
84
85    /// Clears the page table mappings in the given address range.
86    ///
87    /// This should be used in pair with [`AddrSpace::copy_mappings_from`].
88    pub fn clear_mappings(&mut self, range: VirtAddrRange) {
89        self.pt.clear_copy_range(range.start, range.size());
90    }
91
92    /// The page table hardware can only map address ranges that are page-aligned.
93    /// During the memory region validation in AddrSpace,
94    /// the system enforces address alignment,
95    /// ensuring that all memory operations comply with page boundary requirements.
96    fn validate_region(&self, start: VirtAddr, size: usize, align: PageSize) -> AxResult {
97        if !self.contains_range(start, size) {
98            return ax_err!(InvalidInput, "address out of range");
99        }
100        if !start.is_aligned(align) || !is_aligned(size, align.into()) {
101            return ax_err!(InvalidInput, "address not aligned");
102        }
103        Ok(())
104    }
105
106    /// Searches for a contiguous free region in the virtual address space
107    ///
108    /// This function searches for available virtual address space within a specified address range,
109    /// based on the current memory region layout, that satisfies the size and alignment requirements.
110    ///
111    /// # Parameters
112    /// - `hint`: Suggested starting address for the search (may be adjusted due to alignment or overlapping regions)
113    /// - `size`: Size of the contiguous address space to allocate (in bytes)
114    /// - `limit`: Boundary of the allowed address range (inclusive of start and end addresses)
115    /// - `align`: Address alignment requirement (e.g., page alignment like 4KB/2MB)
116    ///
117    /// # Return Value
118    /// - `Some(VirtAddr)`: A starting virtual address that meets all requirements was found
119    /// - `None`: No sufficient space was found within the specified range
120    ///
121    /// # Implementation Logic
122    /// 1. Initialize `last_end` to the maximum aligned value between the hint and the start of the limit range
123    /// 2. First pass: handle regions before the hint to determine the initial search position
124    /// 3. Second pass: check gaps between regions:
125    ///    - Skip overlapping and already occupied regions
126    ///    - Check whether the gap between regions satisfies the `size + alignment` requirement
127    /// 4. Finally, verify that the found address is within the specified `limit` range
128    ///
129    /// # Notes
130    /// - Alignment is strictly enforced on candidate addresses (ensured via `align_up`)
131    /// - The region must be fully contained within the `limit` range (`end <= limit.end`)
132    /// - The search may ignore the `hint` if a better space is found in later regions
133    pub fn find_free_area(
134        &self,
135        hint: VirtAddr,
136        size: usize,
137        limit: VirtAddrRange,
138        align: PageSize,
139    ) -> Option<VirtAddr> {
140        let mut last_end = hint.max(limit.start).align_up(align);
141        for area in self.areas.iter() {
142            if area.end() <= last_end {
143                last_end = last_end.max(area.end().align_up(align));
144            } else {
145                break;
146            }
147        }
148        for area in self.areas.iter() {
149            let area_start = area.start();
150            if area_start < last_end {
151                continue;
152            }
153            if last_end
154                .checked_add(size)
155                .is_some_and(|end| end <= area_start)
156            {
157                return Some(last_end);
158            }
159            last_end = area.end().align_up(align);
160        }
161
162        if last_end
163            .checked_add(size)
164            .is_some_and(|end| end <= limit.end)
165        {
166            Some(last_end)
167        } else {
168            None
169        }
170    }
171
172    /// Add a new linear mapping.
173    ///
174    /// See [`Backend`] for more details about the mapping backends.
175    ///
176    /// The `flags` parameter indicates the mapping permissions and attributes.
177    ///
178    /// Returns an error if the address range is out of the address space or not
179    /// aligned.
180    pub fn map_linear(
181        &mut self,
182        start_vaddr: VirtAddr,
183        start_paddr: PhysAddr,
184        size: usize,
185        flags: MappingFlags,
186        align: PageSize,
187    ) -> AxResult {
188        self.validate_region(start_vaddr, size, align)?;
189
190        if !start_paddr.is_aligned(align) {
191            return ax_err!(InvalidInput, "address not aligned");
192        }
193
194        let offset = start_vaddr.as_usize() - start_paddr.as_usize();
195        let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset, align));
196        self.areas
197            .map(area, &mut self.pt, false)
198            .map_err(mapping_err_to_ax_err)?;
199        Ok(())
200    }
201
202    /// Add a new allocation mapping.
203    ///
204    /// See [`Backend`] for more details about the mapping backends.
205    ///
206    /// The `flags` parameter indicates the mapping permissions and attributes.
207    ///
208    /// Returns an error if the address range is out of the address space or not
209    /// aligned.
210    pub fn map_alloc(
211        &mut self,
212        start: VirtAddr,
213        size: usize,
214        flags: MappingFlags,
215        populate: bool,
216        align: PageSize,
217    ) -> AxResult {
218        self.validate_region(start, size, align)?;
219
220        let area = MemoryArea::new(start, size, flags, Backend::new_alloc(populate, align));
221        self.areas
222            .map(area, &mut self.pt, false)
223            .map_err(mapping_err_to_ax_err)?;
224        Ok(())
225    }
226
227    /// Ensures that the specified virtual memory region is fully mapped.
228    ///
229    /// This function walks through the given virtual address range and attempts to ensure
230    /// that every page is mapped. If a page is not mapped and the corresponding area allows
231    /// on-demand population (`populate == false`), it will trigger a page fault to map it.
232    /// If `access_flags` contains `WRITE`, it will handle copy-on-write (COW) logic for already
233    /// mapped pages that may require COW due to write intentions.
234    ///
235    /// # Parameters
236    ///
237    /// - `start`: The starting virtual address of the region to map, which must be page-aligned.
238    /// - `size`: The size (in bytes) of the region, which must also be page-aligned.
239    /// - `access_flags` indicates the access type
240    ///
241    /// # Returns
242    ///
243    /// Returns `Ok(())` if the entire region is successfully mapped, or an appropriate
244    /// `AxError` variant (`NoMemory`, `BadAddress`) on failure.
245    ///
246    /// # Errors
247    ///
248    /// - `AxError::NoMemory`: Failed to allocate.
249    /// - `AxError::BadAddress`: An invalid mapping state was detected.
250    pub fn populate_area(
251        &mut self,
252        mut start: VirtAddr,
253        size: usize,
254        _access_flags: MappingFlags,
255    ) -> AxResult {
256        self.validate_region(start, size, PageSize::Size4K)?;
257        let end = start + size;
258
259        for area in self.areas.iter() {
260            if start >= area.end() {
261                continue;
262            }
263
264            if start < area.start() {
265                // If the area is not fully mapped, we return ENOMEM.
266                return ax_err!(NoMemory);
267            }
268
269            let backend = area.backend();
270            if let Backend::Alloc { populate, align } = *backend {
271                for addr in PageIterWrapper::new(
272                    start.align_down(align),
273                    end.align_up(align).min(area.end()),
274                    align,
275                )
276                .unwrap()
277                {
278                    match self.pt.query(addr) {
279                        #[allow(unused_variables)]
280                        Ok((paddr, flags, page_size)) => {
281                            #[cfg(feature = "cow")]
282                            {
283                                // if the page is already mapped and write intentions, try cow.
284                                if flags.contains(MappingFlags::WRITE) {
285                                    continue;
286                                } else if _access_flags.contains(MappingFlags::WRITE)
287                                    && !Self::handle_cow_fault(
288                                        addr,
289                                        paddr,
290                                        // Add write to flags (area.flags contains write)
291                                        area.flags(),
292                                        page_size,
293                                        &mut self.pt,
294                                    )
295                                {
296                                    return Err(AxError::NoMemory);
297                                }
298                            }
299                        }
300                        // If the page is not mapped, try map it.
301                        Err(PagingError::NotMapped) => {
302                            if !populate {
303                                if !backend.handle_page_fault(addr, area.flags(), &mut self.pt) {
304                                    return Err(AxError::NoMemory);
305                                }
306                            } else {
307                                return Err(AxError::BadAddress);
308                            }
309                        }
310                        Err(_) => return Err(AxError::BadAddress),
311                    };
312                }
313            }
314            start = area.end();
315            assert!(start.is_aligned(PageSize::Size4K));
316            if start >= end {
317                return Ok(());
318            }
319        }
320
321        // start < end
322        // If the area is not fully mapped, we return ENOMEM.
323        ax_err!(NoMemory)
324    }
325
326    /// Removes mappings within the specified virtual address range.
327    ///
328    /// Returns an error if the address range is out of the address space or not
329    /// aligned.
330    pub fn unmap(&mut self, start: VirtAddr, size: usize) -> AxResult {
331        self.validate_region(start, size, PageSize::Size4K)?;
332
333        let end = start + size;
334        for area in self
335            .areas
336            .iter()
337            .skip_while(move |a| a.end() <= start)
338            .take_while(move |a| a.start() < end)
339        {
340            let area_align = match *area.backend() {
341                Backend::Alloc { populate: _, align } => align,
342                Backend::Linear {
343                    pa_va_offset: _,
344                    align,
345                } => align,
346            };
347
348            let unmap_start = start.max(area.start());
349            let unmap_size = end.min(area.end()) - unmap_start;
350            if !unmap_start.is_aligned(area_align) || !is_aligned(unmap_size, area_align.into()) {
351                return ax_err!(InvalidInput, "address not aligned");
352            }
353        }
354
355        self.areas
356            .unmap(start, size, &mut self.pt)
357            .map_err(mapping_err_to_ax_err)?;
358        Ok(())
359    }
360
361    /// To remove user area mappings from address space.
362    pub fn unmap_user_areas(&mut self) -> AxResult {
363        self.areas.clear(&mut self.pt).unwrap();
364        Ok(())
365    }
366
367    /// To process data in this area with the given function.
368    ///
369    /// Now it supports reading and writing data in the given interval.
370    ///
371    /// # Arguments
372    /// - `start`: The start virtual address to process.
373    /// - `size`: The size of the data to process.
374    /// - `f`: The function to process the data, whose arguments are the start virtual address,
375    ///   the offset and the size of the data.
376    ///
377    /// # Notes
378    ///   The caller must ensure that the permission of the operation is allowed.
379    fn process_area_data<F>(&self, start: VirtAddr, size: usize, align: PageSize, f: F) -> AxResult
380    where
381        F: FnMut(VirtAddr, usize, usize),
382    {
383        Self::process_area_data_with_page_table(&self.pt, &self.va_range, start, size, align, f)
384    }
385
386    fn process_area_data_with_page_table<F>(
387        pt: &PageTable,
388        va_range: &VirtAddrRange,
389        start: VirtAddr,
390        size: usize,
391        align: PageSize,
392        mut f: F,
393    ) -> AxResult
394    where
395        F: FnMut(VirtAddr, usize, usize),
396    {
397        if !va_range.contains_range(VirtAddrRange::from_start_size(start, size)) {
398            return ax_err!(InvalidInput, "address out of range");
399        }
400        let mut cnt = 0;
401        // If start is aligned to 4K, start_align_down will be equal to start_align_up.
402        let end_align_up = (start + size).align_up(align);
403        let start_addr = start.align_down(align);
404        for vaddr in PageIterWrapper::new(start_addr, end_align_up, align)
405            .expect("Failed to create page iterator")
406        {
407            let (mut paddr, _, _) = pt.query(vaddr).map_err(|_| AxError::BadAddress)?;
408
409            let mut copy_size = (size - cnt).min(PAGE_SIZE_4K);
410
411            if copy_size == 0 {
412                break;
413            }
414            if vaddr == start.align_down(align) && start.align_offset(align) != 0 {
415                let align_offset = start.align_offset(align);
416                copy_size = copy_size.min(align as usize - align_offset);
417                paddr += align_offset;
418            }
419            f(phys_to_virt(paddr), cnt, copy_size);
420            cnt += copy_size;
421        }
422        Ok(())
423    }
424
425    /// To read data from the address space.
426    ///
427    /// # Arguments
428    ///
429    /// * `start` - The start virtual address to read.
430    /// * `buf` - The buffer to store the data.
431    pub fn read(&self, start: VirtAddr, align: PageSize, buf: &mut [u8]) -> AxResult {
432        self.process_area_data(start, buf.len(), align, |src, offset, read_size| unsafe {
433            core::ptr::copy_nonoverlapping(src.as_ptr(), buf.as_mut_ptr().add(offset), read_size);
434        })
435    }
436
437    /// To write data to the address space.
438    ///
439    /// # Arguments
440    ///
441    /// * `start_vaddr` - The start virtual address to write.
442    /// * `buf` - The buffer to write to the address space.
443    pub fn write(&self, start: VirtAddr, align: PageSize, buf: &[u8]) -> AxResult {
444        self.process_area_data(start, buf.len(), align, |dst, offset, write_size| unsafe {
445            core::ptr::copy_nonoverlapping(buf.as_ptr().add(offset), dst.as_mut_ptr(), write_size);
446        })
447    }
448
449    /// Updates mapping within the specified virtual address range.
450    ///
451    /// Returns an error if the address range is out of the address space or not
452    /// aligned.
453    pub fn protect(&mut self, start: VirtAddr, size: usize, flags: MappingFlags) -> AxResult {
454        // Populate the area first, which also checks the address range for us.
455        self.populate_area(start, size, flags)?;
456
457        self.areas
458            .protect(start, size, |_| Some(flags), &mut self.pt)
459            .map_err(mapping_err_to_ax_err)?;
460
461        Ok(())
462    }
463
464    /// Removes all mappings in the address space.
465    pub fn clear(&mut self) {
466        self.areas.clear(&mut self.pt).unwrap();
467    }
468
469    /// Checks whether an access to the specified memory region is valid.
470    ///
471    /// Returns `true` if the memory region given by `range` is all mapped and
472    /// has proper permission flags (i.e. containing `access_flags`).
473    pub fn check_region_access(
474        &self,
475        mut range: VirtAddrRange,
476        access_flags: MappingFlags,
477    ) -> bool {
478        for area in self.areas.iter() {
479            if area.end() <= range.start {
480                continue;
481            }
482            if area.start() > range.start {
483                return false;
484            }
485
486            // This area overlaps with the memory region
487            if !area.flags().contains(access_flags) {
488                return false;
489            }
490
491            range.start = area.end();
492            if range.is_empty() {
493                return true;
494            }
495        }
496
497        false
498    }
499
500    /// Handles a page fault at the given address.
501    ///
502    /// `access_flags` indicates the access type that caused the page fault.
503    ///
504    /// Returns `true` if the page fault is handled successfully (not a real
505    /// fault).
506    pub fn handle_page_fault(&mut self, vaddr: VirtAddr, access_flags: MappingFlags) -> bool {
507        if !self.va_range.contains(vaddr) {
508            return false;
509        }
510        if let Some(area) = self.areas.find(vaddr) {
511            let orig_flags = area.flags();
512            if orig_flags.contains(access_flags) {
513                // Two cases enter the branch:
514                // - shared pages (If there is a shared page in the vma)
515                // - cow
516                #[cfg(feature = "cow")]
517                if access_flags.contains(MappingFlags::WRITE)
518                    && let Ok((paddr, _, page_size)) = self.pt.query(vaddr)
519                {
520                    // 1. page fault caused by write
521                    // 2. pte exists
522                    // 3. Not shared memory
523                    return Self::handle_cow_fault(
524                        vaddr,
525                        paddr,
526                        orig_flags,
527                        page_size,
528                        &mut self.pt,
529                    );
530                }
531
532                return area
533                    .backend()
534                    .handle_page_fault(vaddr, orig_flags, &mut self.pt);
535            }
536        }
537        false
538    }
539
540    /// Attempts to clone the current address space into a new one.
541    ///
542    /// This method creates a new empty address space with the same base and size,
543    /// then iterates over all memory areas in the original address space to copy or
544    /// share their mappings into the new one.
545    ///
546    /// ### Behavior with `cow` Feature Enabled
547    /// - For memory areas backed by [`Backend::Alloc`], the `populate` flag is forced
548    ///   to `false` to avoid preemptive physical allocation in the new space.
549    /// - All writable mappings have their `WRITE` flag removed, enforcing
550    ///   Copy-On-Write (COW) semantics.
551    /// - Shared pages increase their reference count via `frame_table().inc_ref()`,
552    ///   and both the original and the cloned page tables are updated:
553    ///   - The original page's protection flags are modified to remove write access.
554    ///   - The new address space maps the same physical page with the new flags.
555    ///
556    /// ### Behavior without `cow` Feature
557    /// - Each mapped page in the original address space is copied into the
558    ///   corresponding address in the new address space.
559    /// - If the target address in the new space is not mapped, a page fault will be
560    ///   handled, and memory is allocated before copying.
561    /// - The actual copying is done using [`core::ptr::copy_nonoverlapping`] at the
562    ///   physical address level.
563    pub fn try_clone(&mut self) -> AxResult<Self> {
564        let mut new_aspace = Self::new_empty(self.base(), self.size())?;
565
566        for area in self.areas.iter() {
567            let backend = match area.backend() {
568                #[cfg(feature = "cow")]
569                Backend::Alloc { populate: _, align } => {
570                    // Forcing `populate = false` is to prevent the subsequent `new_aspace.areas.map`
571                    // from mapping page table entries for the virtual addresses.
572                    Backend::new_alloc(false, *align)
573                }
574                other => other.clone(),
575            };
576
577            // Remap the memory area in the new address space.
578            let new_area = MemoryArea::new(area.start(), area.size(), area.flags(), backend);
579            new_aspace
580                .areas
581                .map(new_area, &mut new_aspace.pt, false)
582                .map_err(mapping_err_to_ax_err)?;
583
584            let align = match area.backend() {
585                Backend::Alloc { align, .. } => *align,
586                // Linear-backed regions are usually allocated by the kernel and are shared
587                Backend::Linear { .. } => continue,
588            };
589
590            #[cfg(feature = "cow")]
591            let cow_flags = area.flags() - MappingFlags::WRITE;
592
593            for vaddr in PageIterWrapper::new(area.start(), area.end(), align)
594                .expect("Failed to create page iterator")
595            {
596                // Copy data from old memory area to new memory area.
597                match self.pt.query(vaddr) {
598                    Ok((paddr, _, page_size)) => {
599                        #[cfg(not(feature = "cow"))]
600                        {
601                            let new_addr = match new_aspace.pt.query(vaddr) {
602                                Ok((paddr, _, _)) => paddr,
603                                // If the page is not mapped, try map it.
604                                Err(PagingError::NotMapped) => {
605                                    if !area.backend().handle_page_fault(
606                                        vaddr,
607                                        area.flags(),
608                                        &mut new_aspace.pt,
609                                    ) {
610                                        return Err(AxError::NoMemory);
611                                    }
612                                    match new_aspace.pt.query(vaddr) {
613                                        Ok((paddr, _, _)) => paddr,
614                                        Err(_) => return Err(AxError::BadAddress),
615                                    }
616                                }
617                                Err(_) => return Err(AxError::BadAddress),
618                            };
619                            unsafe {
620                                core::ptr::copy_nonoverlapping(
621                                    phys_to_virt(paddr).as_ptr(),
622                                    phys_to_virt(new_addr).as_mut_ptr(),
623                                    page_size.into(),
624                                )
625                            };
626                        }
627
628                        //If the page is mapped in the old page table:
629                        // - Update its permissions in the old page table using `flags`.
630                        // - Map the same physical page into the new page table at the same
631                        // virtual address, with the same page size and `flags`.
632                        #[cfg(feature = "cow")]
633                        {
634                            frame_table().inc_ref(paddr);
635
636                            self.pt
637                                .protect(vaddr, cow_flags)
638                                .map(|(_, tlb)| tlb.flush())
639                                .expect("protect failed");
640                            new_aspace
641                                .pt
642                                .map(vaddr, paddr, page_size, cow_flags)
643                                .map(|tlb| tlb.flush())
644                                .expect("map failed");
645
646                            continue;
647                        }
648                    }
649                    // If the page is not mapped, skip it.
650                    Err(PagingError::NotMapped) => continue,
651                    Err(_) => return Err(AxError::BadAddress),
652                };
653            }
654        }
655        Ok(new_aspace)
656    }
657
658    /// Handles a Copy-On-Write (COW) page fault.
659    ///
660    /// # Arguments
661    /// - `vaddr`: The virtual address that triggered the fault.
662    /// - `paddr`: The physical address that triggered the fault.
663    /// - `flags`: vma flags.
664    /// - `align`: Alignment requirement for the allocated memory, must be a multiple of 4KiB.
665    /// - `pt`: A mutable reference to the page table that should be updated.
666    ///
667    /// # Returns
668    /// - `true` if the page fault was handled successfully.
669    /// - `false` if the fault handling failed (e.g., allocation failed or invalid ref count).
670    #[cfg(feature = "cow")]
671    fn handle_cow_fault(
672        vaddr: VirtAddr,
673        paddr: PhysAddr,
674        flags: MappingFlags,
675        align: PageSize,
676        pt: &mut PageTable,
677    ) -> bool {
678        assert!(flags.contains(MappingFlags::WRITE));
679
680        let paddr = paddr.align_down(align);
681
682        match frame_table().ref_count(paddr) {
683            0 => unreachable!(),
684            // There is only one AddrSpace reference to the page,
685            // so there is no need to copy it.
686            1 => pt.protect(vaddr, flags).map(|(_, tlb)| tlb.flush()).is_ok(),
687            // Allocates the new page and copies the contents of the original page,
688            // remapping the virtual address to the physical address of the new page.
689            2.. => match alloc_frame(false, align) {
690                Some(new_frame) => {
691                    unsafe {
692                        core::ptr::copy_nonoverlapping(
693                            phys_to_virt(paddr).as_ptr(),
694                            phys_to_virt(new_frame).as_mut_ptr(),
695                            align.into(),
696                        )
697                    };
698
699                    dealloc_frame(paddr, align);
700
701                    pt.remap(vaddr, new_frame, flags)
702                        .map(|(_, tlb)| {
703                            tlb.flush();
704                        })
705                        .is_ok()
706                }
707                None => false,
708            },
709        }
710    }
711}
712
713impl fmt::Debug for AddrSpace {
714    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
715        f.debug_struct("AddrSpace")
716            .field("va_range", &self.va_range)
717            .field("page_table_root", &self.pt.root_paddr())
718            .field("areas", &self.areas)
719            .finish()
720    }
721}
722
723impl Drop for AddrSpace {
724    fn drop(&mut self) {
725        self.clear();
726    }
727}