axdma/
dma.rs

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
use core::{alloc::Layout, ptr::NonNull};

use allocator::{AllocError, AllocResult, BaseAllocator, ByteAllocator};
use axalloc::{DefaultByteAllocator, global_allocator};
use axhal::{mem::virt_to_phys, paging::MappingFlags};
use kspin::SpinNoIrq;
use log::{debug, error};
use memory_addr::{PAGE_SIZE_4K, VirtAddr, va};

use crate::{BusAddr, DMAInfo, phys_to_bus};

pub(crate) static ALLOCATOR: SpinNoIrq<DmaAllocator> = SpinNoIrq::new(DmaAllocator::new());

pub(crate) struct DmaAllocator {
    alloc: DefaultByteAllocator,
}

impl DmaAllocator {
    pub const fn new() -> Self {
        Self {
            alloc: DefaultByteAllocator::new(),
        }
    }

    /// Allocate arbitrary number of bytes. Returns the left bound of the
    /// allocated region.
    ///
    /// It firstly tries to allocate from the coherent byte allocator. If there is no
    /// memory, it asks the global page allocator for more memory and adds it to the
    /// byte allocator.
    pub unsafe fn alloc_coherent(&mut self, layout: Layout) -> AllocResult<DMAInfo> {
        if layout.size() >= PAGE_SIZE_4K {
            self.alloc_coherent_pages(layout)
        } else {
            self.alloc_coherent_bytes(layout)
        }
    }

    fn alloc_coherent_bytes(&mut self, layout: Layout) -> AllocResult<DMAInfo> {
        let mut is_expanded = false;
        loop {
            if let Ok(data) = self.alloc.alloc(layout) {
                let cpu_addr = va!(data.as_ptr() as usize);
                return Ok(DMAInfo {
                    cpu_addr: data,
                    bus_addr: virt_to_bus(cpu_addr),
                });
            } else {
                if is_expanded {
                    return Err(AllocError::NoMemory);
                }
                is_expanded = true;
                let available_pages = global_allocator().available_pages();
                // 4 pages or available pages.
                let num_pages = 4.min(available_pages);
                let expand_size = num_pages * PAGE_SIZE_4K;
                let vaddr_raw = global_allocator().alloc_pages(num_pages, PAGE_SIZE_4K)?;
                let vaddr = va!(vaddr_raw);
                self.update_flags(
                    vaddr,
                    num_pages,
                    MappingFlags::READ | MappingFlags::WRITE | MappingFlags::UNCACHED,
                )?;
                self.alloc
                    .add_memory(vaddr_raw, expand_size)
                    .inspect_err(|e| error!("add memory fail: {e:?}"))?;
                debug!("expand memory @{vaddr:#X}, size: {expand_size:#X} bytes");
            }
        }
    }

    fn alloc_coherent_pages(&mut self, layout: Layout) -> AllocResult<DMAInfo> {
        let num_pages = layout_pages(&layout);
        let vaddr_raw =
            global_allocator().alloc_pages(num_pages, PAGE_SIZE_4K.max(layout.align()))?;
        let vaddr = va!(vaddr_raw);
        self.update_flags(
            vaddr,
            num_pages,
            MappingFlags::READ | MappingFlags::WRITE | MappingFlags::UNCACHED,
        )?;
        Ok(DMAInfo {
            cpu_addr: unsafe { NonNull::new_unchecked(vaddr_raw as *mut u8) },
            bus_addr: virt_to_bus(vaddr),
        })
    }

    fn update_flags(
        &mut self,
        vaddr: VirtAddr,
        num_pages: usize,
        flags: MappingFlags,
    ) -> AllocResult<()> {
        let expand_size = num_pages * PAGE_SIZE_4K;
        axmm::kernel_aspace()
            .lock()
            .protect(vaddr, expand_size, flags)
            .map_err(|e| {
                error!("change table flag fail: {e:?}");
                AllocError::NoMemory
            })
    }

    /// Gives back the allocated region to the byte allocator.
    pub unsafe fn dealloc_coherent(&mut self, dma: DMAInfo, layout: Layout) {
        if layout.size() >= PAGE_SIZE_4K {
            let num_pages = layout_pages(&layout);
            let virt_raw = dma.cpu_addr.as_ptr() as usize;
            global_allocator().dealloc_pages(virt_raw, num_pages);
            let _ = self.update_flags(
                va!(virt_raw),
                num_pages,
                MappingFlags::READ | MappingFlags::WRITE,
            );
        } else {
            self.alloc.dealloc(dma.cpu_addr, layout)
        }
    }
}

const fn virt_to_bus(addr: VirtAddr) -> BusAddr {
    let paddr = virt_to_phys(addr);
    phys_to_bus(paddr)
}

const fn layout_pages(layout: &Layout) -> usize {
    memory_addr::align_up_4k(layout.size()) / PAGE_SIZE_4K
}