axmm/aspace.rs
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437
use core::fmt;
use axerrno::{AxError, AxResult, ax_err};
use axhal::mem::phys_to_virt;
use axhal::paging::{MappingFlags, PageTable, PagingError};
use memory_addr::{
MemoryAddr, PAGE_SIZE_4K, PageIter4K, PhysAddr, VirtAddr, VirtAddrRange, is_aligned_4k,
};
use memory_set::{MemoryArea, MemorySet};
use crate::backend::Backend;
use crate::mapping_err_to_ax_err;
/// The virtual memory address space.
pub struct AddrSpace {
va_range: VirtAddrRange,
areas: MemorySet<Backend>,
pt: PageTable,
}
impl AddrSpace {
/// Returns the address space base.
pub const fn base(&self) -> VirtAddr {
self.va_range.start
}
/// Returns the address space end.
pub const fn end(&self) -> VirtAddr {
self.va_range.end
}
/// Returns the address space size.
pub fn size(&self) -> usize {
self.va_range.size()
}
/// Returns the reference to the inner page table.
pub const fn page_table(&self) -> &PageTable {
&self.pt
}
/// Returns the root physical address of the inner page table.
pub const fn page_table_root(&self) -> PhysAddr {
self.pt.root_paddr()
}
/// Checks if the address space contains the given address range.
pub fn contains_range(&self, start: VirtAddr, size: usize) -> bool {
self.va_range
.contains_range(VirtAddrRange::from_start_size(start, size))
}
/// Creates a new empty address space.
pub(crate) fn new_empty(base: VirtAddr, size: usize) -> AxResult<Self> {
Ok(Self {
va_range: VirtAddrRange::from_start_size(base, size),
areas: MemorySet::new(),
pt: PageTable::try_new().map_err(|_| AxError::NoMemory)?,
})
}
/// Copies page table mappings from another address space.
///
/// It copies the page table entries only rather than the memory regions,
/// usually used to copy a portion of the kernel space mapping to the
/// user space.
///
/// Returns an error if the two address spaces overlap.
pub fn copy_mappings_from(&mut self, other: &AddrSpace) -> AxResult {
if self.va_range.overlaps(other.va_range) {
return ax_err!(InvalidInput, "address space overlap");
}
self.pt.copy_from(&other.pt, other.base(), other.size());
Ok(())
}
fn validate_region(&self, start: VirtAddr, size: usize) -> AxResult {
if !self.contains_range(start, size) {
return ax_err!(InvalidInput, "address out of range");
}
if !start.is_aligned_4k() || !is_aligned_4k(size) {
return ax_err!(InvalidInput, "address not aligned");
}
Ok(())
}
/// Finds a free area that can accommodate the given size.
///
/// The search starts from the given hint address, and the area should be within the given limit range.
///
/// Returns the start address of the free area. Returns None if no such area is found.
pub fn find_free_area(
&self,
hint: VirtAddr,
size: usize,
limit: VirtAddrRange,
) -> Option<VirtAddr> {
self.areas.find_free_area(hint, size, limit)
}
/// Add a new linear mapping.
///
/// See [`Backend`] for more details about the mapping backends.
///
/// The `flags` parameter indicates the mapping permissions and attributes.
///
/// Returns an error if the address range is out of the address space or not
/// aligned.
pub fn map_linear(
&mut self,
start_vaddr: VirtAddr,
start_paddr: PhysAddr,
size: usize,
flags: MappingFlags,
) -> AxResult {
self.validate_region(start_vaddr, size)?;
if !start_paddr.is_aligned_4k() {
return ax_err!(InvalidInput, "address not aligned");
}
let offset = start_vaddr.as_usize() - start_paddr.as_usize();
let area = MemoryArea::new(start_vaddr, size, flags, Backend::new_linear(offset));
self.areas
.map(area, &mut self.pt, false)
.map_err(mapping_err_to_ax_err)?;
Ok(())
}
/// Add a new allocation mapping.
///
/// See [`Backend`] for more details about the mapping backends.
///
/// The `flags` parameter indicates the mapping permissions and attributes.
///
/// Returns an error if the address range is out of the address space or not
/// aligned.
pub fn map_alloc(
&mut self,
start: VirtAddr,
size: usize,
flags: MappingFlags,
populate: bool,
) -> AxResult {
self.validate_region(start, size)?;
let area = MemoryArea::new(start, size, flags, Backend::new_alloc(populate));
self.areas
.map(area, &mut self.pt, false)
.map_err(mapping_err_to_ax_err)?;
Ok(())
}
/// Populates the area with physical frames, returning false if the area
/// contains unmapped area.
pub fn populate_area(&mut self, mut start: VirtAddr, size: usize) -> AxResult {
self.validate_region(start, size)?;
let end = start + size;
while let Some(area) = self.areas.find(start) {
let backend = area.backend();
if let Backend::Alloc { populate } = backend {
if !*populate {
for addr in PageIter4K::new(start, area.end().min(end)).unwrap() {
match self.pt.query(addr) {
Ok(_) => {}
// If the page is not mapped, try map it.
Err(PagingError::NotMapped) => {
if !backend.handle_page_fault(addr, area.flags(), &mut self.pt) {
return Err(AxError::NoMemory);
}
}
Err(_) => return Err(AxError::BadAddress),
};
}
}
}
start = area.end();
assert!(start.is_aligned_4k());
if start >= end {
break;
}
}
if start < end {
// If the area is not fully mapped, we return ENOMEM.
return ax_err!(NoMemory);
}
Ok(())
}
/// Removes mappings within the specified virtual address range.
///
/// Returns an error if the address range is out of the address space or not
/// aligned.
pub fn unmap(&mut self, start: VirtAddr, size: usize) -> AxResult {
self.validate_region(start, size)?;
self.areas
.unmap(start, size, &mut self.pt)
.map_err(mapping_err_to_ax_err)?;
Ok(())
}
/// To remove user area mappings from address space.
pub fn unmap_user_areas(&mut self) -> AxResult {
for area in self.areas.iter() {
assert!(area.start().is_aligned_4k());
assert!(area.size() % PAGE_SIZE_4K == 0);
assert!(area.flags().contains(MappingFlags::USER));
assert!(
self.va_range
.contains_range(VirtAddrRange::from_start_size(area.start(), area.size())),
"MemorySet contains out-of-va-range area"
);
}
self.areas.clear(&mut self.pt).unwrap();
Ok(())
}
/// To process data in this area with the given function.
///
/// Now it supports reading and writing data in the given interval.
///
/// # Arguments
/// - `start`: The start virtual address to process.
/// - `size`: The size of the data to process.
/// - `f`: The function to process the data, whose arguments are the start virtual address,
/// the offset and the size of the data.
///
/// # Notes
/// The caller must ensure that the permission of the operation is allowed.
fn process_area_data<F>(&self, start: VirtAddr, size: usize, f: F) -> AxResult
where
F: FnMut(VirtAddr, usize, usize),
{
Self::process_area_data_with_page_table(&self.pt, &self.va_range, start, size, f)
}
fn process_area_data_with_page_table<F>(
pt: &PageTable,
va_range: &VirtAddrRange,
start: VirtAddr,
size: usize,
mut f: F,
) -> AxResult
where
F: FnMut(VirtAddr, usize, usize),
{
if !va_range.contains_range(VirtAddrRange::from_start_size(start, size)) {
return ax_err!(InvalidInput, "address out of range");
}
let mut cnt = 0;
// If start is aligned to 4K, start_align_down will be equal to start_align_up.
let end_align_up = (start + size).align_up_4k();
for vaddr in PageIter4K::new(start.align_down_4k(), end_align_up)
.expect("Failed to create page iterator")
{
let (mut paddr, _, _) = pt.query(vaddr).map_err(|_| AxError::BadAddress)?;
let mut copy_size = (size - cnt).min(PAGE_SIZE_4K);
if copy_size == 0 {
break;
}
if vaddr == start.align_down_4k() && start.align_offset_4k() != 0 {
let align_offset = start.align_offset_4k();
copy_size = copy_size.min(PAGE_SIZE_4K - align_offset);
paddr += align_offset;
}
f(phys_to_virt(paddr), cnt, copy_size);
cnt += copy_size;
}
Ok(())
}
/// To read data from the address space.
///
/// # Arguments
///
/// * `start` - The start virtual address to read.
/// * `buf` - The buffer to store the data.
pub fn read(&self, start: VirtAddr, buf: &mut [u8]) -> AxResult {
self.process_area_data(start, buf.len(), |src, offset, read_size| unsafe {
core::ptr::copy_nonoverlapping(src.as_ptr(), buf.as_mut_ptr().add(offset), read_size);
})
}
/// To write data to the address space.
///
/// # Arguments
///
/// * `start_vaddr` - The start virtual address to write.
/// * `buf` - The buffer to write to the address space.
pub fn write(&self, start: VirtAddr, buf: &[u8]) -> AxResult {
self.process_area_data(start, buf.len(), |dst, offset, write_size| unsafe {
core::ptr::copy_nonoverlapping(buf.as_ptr().add(offset), dst.as_mut_ptr(), write_size);
})
}
/// Updates mapping within the specified virtual address range.
///
/// Returns an error if the address range is out of the address space or not
/// aligned.
pub fn protect(&mut self, start: VirtAddr, size: usize, flags: MappingFlags) -> AxResult {
// Populate the area first, which also checks the address range for us.
self.populate_area(start, size)?;
self.areas
.protect(start, size, |_| Some(flags), &mut self.pt)
.map_err(mapping_err_to_ax_err)?;
Ok(())
}
/// Removes all mappings in the address space.
pub fn clear(&mut self) {
self.areas.clear(&mut self.pt).unwrap();
}
/// Checks whether an access to the specified memory region is valid.
///
/// Returns `true` if the memory region given by `range` is all mapped and
/// has proper permission flags (i.e. containing `access_flags`).
pub fn check_region_access(
&self,
mut range: VirtAddrRange,
access_flags: MappingFlags,
) -> bool {
for area in self.areas.iter() {
if area.end() <= range.start {
continue;
}
if area.start() > range.start {
return false;
}
// This area overlaps with the memory region
if !area.flags().contains(access_flags) {
return false;
}
range.start = area.end();
if range.is_empty() {
return true;
}
}
false
}
/// Handles a page fault at the given address.
///
/// `access_flags` indicates the access type that caused the page fault.
///
/// Returns `true` if the page fault is handled successfully (not a real
/// fault).
pub fn handle_page_fault(&mut self, vaddr: VirtAddr, access_flags: MappingFlags) -> bool {
if !self.va_range.contains(vaddr) {
return false;
}
if let Some(area) = self.areas.find(vaddr) {
let orig_flags = area.flags();
if orig_flags.contains(access_flags) {
return area
.backend()
.handle_page_fault(vaddr, orig_flags, &mut self.pt);
}
}
false
}
/// Clone a [`AddrSpace`] by re-mapping all [`MemoryArea`]s in a new page table and copying data in user space.
pub fn clone_or_err(&mut self) -> AxResult<Self> {
let mut new_aspace = crate::new_user_aspace(self.base(), self.size())?;
for area in self.areas.iter() {
let backend = area.backend();
// Remap the memory area in the new address space.
let new_area =
MemoryArea::new(area.start(), area.size(), area.flags(), backend.clone());
new_aspace
.areas
.map(new_area, &mut new_aspace.pt, false)
.map_err(mapping_err_to_ax_err)?;
// Copy data from old memory area to new memory area.
for vaddr in
PageIter4K::new(area.start(), area.end()).expect("Failed to create page iterator")
{
let addr = match self.pt.query(vaddr) {
Ok((paddr, _, _)) => paddr,
// If the page is not mapped, skip it.
Err(PagingError::NotMapped) => continue,
Err(_) => return Err(AxError::BadAddress),
};
let new_addr = match new_aspace.pt.query(vaddr) {
Ok((paddr, _, _)) => paddr,
// If the page is not mapped, try map it.
Err(PagingError::NotMapped) => {
if !backend.handle_page_fault(vaddr, area.flags(), &mut new_aspace.pt) {
return Err(AxError::NoMemory);
}
match new_aspace.pt.query(vaddr) {
Ok((paddr, _, _)) => paddr,
Err(_) => return Err(AxError::BadAddress),
}
}
Err(_) => return Err(AxError::BadAddress),
};
unsafe {
core::ptr::copy_nonoverlapping(
phys_to_virt(addr).as_ptr(),
phys_to_virt(new_addr).as_mut_ptr(),
PAGE_SIZE_4K,
)
};
}
}
Ok(new_aspace)
}
}
impl fmt::Debug for AddrSpace {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("AddrSpace")
.field("va_range", &self.va_range)
.field("page_table_root", &self.pt.root_paddr())
.field("areas", &self.areas)
.finish()
}
}
impl Drop for AddrSpace {
fn drop(&mut self) {
self.clear();
}
}