axdma/
dma.rs

1use core::{alloc::Layout, ptr::NonNull};
2
3use allocator::{AllocError, AllocResult, BaseAllocator, ByteAllocator};
4use axalloc::{DefaultByteAllocator, global_allocator};
5use axhal::{mem::virt_to_phys, paging::MappingFlags};
6use kspin::SpinNoIrq;
7use log::{debug, error};
8use memory_addr::{PAGE_SIZE_4K, VirtAddr, va};
9
10use crate::{BusAddr, DMAInfo, phys_to_bus};
11
12pub(crate) static ALLOCATOR: SpinNoIrq<DmaAllocator> = SpinNoIrq::new(DmaAllocator::new());
13
14pub(crate) struct DmaAllocator {
15    alloc: DefaultByteAllocator,
16}
17
18impl DmaAllocator {
19    pub const fn new() -> Self {
20        Self {
21            alloc: DefaultByteAllocator::new(),
22        }
23    }
24
25    /// Allocate arbitrary number of bytes. Returns the left bound of the
26    /// allocated region.
27    ///
28    /// It firstly tries to allocate from the coherent byte allocator. If there is no
29    /// memory, it asks the global page allocator for more memory and adds it to the
30    /// byte allocator.
31    pub unsafe fn alloc_coherent(&mut self, layout: Layout) -> AllocResult<DMAInfo> {
32        if layout.size() >= PAGE_SIZE_4K {
33            self.alloc_coherent_pages(layout)
34        } else {
35            self.alloc_coherent_bytes(layout)
36        }
37    }
38
39    fn alloc_coherent_bytes(&mut self, layout: Layout) -> AllocResult<DMAInfo> {
40        let mut is_expanded = false;
41        loop {
42            if let Ok(data) = self.alloc.alloc(layout) {
43                let cpu_addr = va!(data.as_ptr() as usize);
44                return Ok(DMAInfo {
45                    cpu_addr: data,
46                    bus_addr: virt_to_bus(cpu_addr),
47                });
48            } else {
49                if is_expanded {
50                    return Err(AllocError::NoMemory);
51                }
52                is_expanded = true;
53                let available_pages = global_allocator().available_pages();
54                // 4 pages or available pages.
55                let num_pages = 4.min(available_pages);
56                let expand_size = num_pages * PAGE_SIZE_4K;
57                let vaddr_raw = global_allocator().alloc_pages(num_pages, PAGE_SIZE_4K)?;
58                let vaddr = va!(vaddr_raw);
59                self.update_flags(
60                    vaddr,
61                    num_pages,
62                    MappingFlags::READ | MappingFlags::WRITE | MappingFlags::UNCACHED,
63                )?;
64                self.alloc
65                    .add_memory(vaddr_raw, expand_size)
66                    .inspect_err(|e| error!("add memory fail: {e:?}"))?;
67                debug!("expand memory @{vaddr:#X}, size: {expand_size:#X} bytes");
68            }
69        }
70    }
71
72    fn alloc_coherent_pages(&mut self, layout: Layout) -> AllocResult<DMAInfo> {
73        let num_pages = layout_pages(&layout);
74        let vaddr_raw =
75            global_allocator().alloc_pages(num_pages, PAGE_SIZE_4K.max(layout.align()))?;
76        let vaddr = va!(vaddr_raw);
77        self.update_flags(
78            vaddr,
79            num_pages,
80            MappingFlags::READ | MappingFlags::WRITE | MappingFlags::UNCACHED,
81        )?;
82        Ok(DMAInfo {
83            cpu_addr: unsafe { NonNull::new_unchecked(vaddr_raw as *mut u8) },
84            bus_addr: virt_to_bus(vaddr),
85        })
86    }
87
88    fn update_flags(
89        &mut self,
90        vaddr: VirtAddr,
91        num_pages: usize,
92        flags: MappingFlags,
93    ) -> AllocResult<()> {
94        let expand_size = num_pages * PAGE_SIZE_4K;
95        axmm::kernel_aspace()
96            .lock()
97            .protect(vaddr, expand_size, flags)
98            .map_err(|e| {
99                error!("change table flag fail: {e:?}");
100                AllocError::NoMemory
101            })
102    }
103
104    /// Gives back the allocated region to the byte allocator.
105    pub unsafe fn dealloc_coherent(&mut self, dma: DMAInfo, layout: Layout) {
106        if layout.size() >= PAGE_SIZE_4K {
107            let num_pages = layout_pages(&layout);
108            let virt_raw = dma.cpu_addr.as_ptr() as usize;
109            global_allocator().dealloc_pages(virt_raw, num_pages);
110            let _ = self.update_flags(
111                va!(virt_raw),
112                num_pages,
113                MappingFlags::READ | MappingFlags::WRITE,
114            );
115        } else {
116            self.alloc.dealloc(dma.cpu_addr, layout)
117        }
118    }
119}
120
121const fn virt_to_bus(addr: VirtAddr) -> BusAddr {
122    let paddr = virt_to_phys(addr);
123    phys_to_bus(paddr)
124}
125
126const fn layout_pages(layout: &Layout) -> usize {
127    memory_addr::align_up_4k(layout.size()) / PAGE_SIZE_4K
128}