1use core::alloc::AllocError;
2use core::fmt;
3use core::sync::atomic::{AtomicUsize, Ordering};
4
5use align_address::Align;
6use free_list::{FreeList, PageLayout, PageRange, PageRangeError};
7use hermit_sync::InterruptTicketMutex;
8use memory_addresses::{PhysAddr, VirtAddr};
9
10#[cfg(target_arch = "x86_64")]
11use crate::arch::mm::paging::PageTableEntryFlagsExt;
12use crate::arch::mm::paging::{self, HugePageSize, PageSize, PageTableEntryFlags};
13use crate::env;
14use crate::mm::device_alloc::DeviceAlloc;
15use crate::mm::{PageRangeAllocator, PageRangeBox};
16
17static PHYSICAL_FREE_LIST: InterruptTicketMutex<FreeList<16>> =
18 InterruptTicketMutex::new(FreeList::new());
19pub static TOTAL_MEMORY: AtomicUsize = AtomicUsize::new(0);
20
21pub struct FrameAlloc;
22
23impl PageRangeAllocator for FrameAlloc {
24 unsafe fn init() {
25 unsafe {
26 init();
27 }
28 }
29
30 fn allocate(layout: PageLayout) -> Result<PageRange, AllocError> {
31 PHYSICAL_FREE_LIST
32 .lock()
33 .allocate(layout)
34 .map_err(|_| AllocError)
35 }
36
37 fn allocate_at(range: PageRange) -> Result<(), AllocError> {
38 PHYSICAL_FREE_LIST
39 .lock()
40 .allocate_at(range)
41 .map_err(|_| AllocError)
42 }
43
44 unsafe fn deallocate(range: PageRange) {
45 unsafe {
46 PHYSICAL_FREE_LIST.lock().deallocate(range).unwrap();
47 }
48 }
49}
50
51impl fmt::Display for FrameAlloc {
52 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
53 let free_list = PHYSICAL_FREE_LIST.lock();
54 write!(f, "FrameAlloc free list:\n{free_list}")
55 }
56}
57
58pub type FrameBox = PageRangeBox<FrameAlloc>;
59
60pub fn total_memory_size() -> usize {
61 TOTAL_MEMORY.load(Ordering::Relaxed)
62}
63
64pub unsafe fn map_frame_range(frame_range: PageRange) {
65 cfg_if::cfg_if! {
66 if #[cfg(target_arch = "aarch64")] {
67 type IdentityPageSize = crate::arch::mm::paging::BasePageSize;
68 } else if #[cfg(target_arch = "riscv64")] {
69 type IdentityPageSize = crate::arch::mm::paging::HugePageSize;
70 } else if #[cfg(target_arch = "x86_64")] {
71 type IdentityPageSize = crate::arch::mm::paging::LargePageSize;
72 }
73 }
74
75 let start = frame_range
76 .start()
77 .align_down(IdentityPageSize::SIZE.try_into().unwrap());
78 let end = frame_range
79 .end()
80 .align_up(IdentityPageSize::SIZE.try_into().unwrap());
81
82 (start..end)
83 .step_by(IdentityPageSize::SIZE.try_into().unwrap())
84 .map(|addr| PhysAddr::new(addr.try_into().unwrap()))
85 .for_each(paging::identity_map::<IdentityPageSize>);
86
87 if DeviceAlloc.phys_offset() != VirtAddr::zero() {
89 let flags = {
90 let mut flags = PageTableEntryFlags::empty();
91 flags.normal().writable().execute_disable();
92 flags
93 };
94 (start..end)
95 .step_by(IdentityPageSize::SIZE.try_into().unwrap())
96 .for_each(|addr| {
97 let phys_addr = PhysAddr::new(addr.try_into().unwrap());
98 let virt_addr = VirtAddr::from_ptr(DeviceAlloc.ptr_from::<()>(phys_addr));
99 paging::map::<IdentityPageSize>(virt_addr, phys_addr, 1, flags);
100 });
101 }
102}
103
104unsafe fn detect_from_fdt() -> Result<(), ()> {
105 let fdt = env::fdt().ok_or(())?;
106
107 let all_regions = fdt
108 .find_all_nodes("/memory")
109 .map(|m| m.reg().unwrap().next().unwrap());
110 if all_regions.count() == 0 {
111 return Err(());
112 }
113 let all_regions = fdt
114 .find_all_nodes("/memory")
115 .map(|m| m.reg().unwrap().next().unwrap());
116
117 for m in all_regions {
118 let start_address = m.starting_address as u64;
119 let size = m.size.unwrap() as u64;
120 let end_address = start_address + size;
121
122 if end_address <= super::kernel_end_address().as_u64() && !env::is_uefi() {
123 continue;
124 }
125
126 let start_address =
127 if start_address <= super::kernel_start_address().as_u64() && !env::is_uefi() {
128 super::kernel_end_address()
129 } else {
130 VirtAddr::new(start_address)
131 };
132
133 let range = PageRange::new(start_address.as_usize(), end_address as usize).unwrap();
134 unsafe {
135 FrameAlloc::deallocate(range);
136 map_frame_range(range);
137 }
138 TOTAL_MEMORY.fetch_add(range.len().get(), Ordering::Relaxed);
139 debug!("Claimed physical memory: {range:#x?}");
140 }
141
142 let reserve = |reservation: PageRange| {
143 debug!("Memory reservation: {reservation:#x?}");
144 while let Ok(reserved) = PHYSICAL_FREE_LIST
147 .lock()
148 .allocate_with(|range| reservation.and(range))
149 {
150 debug!("Reserved {reserved:#x?}");
151 }
152 };
153
154 for reservation in fdt.memory_reservations() {
155 let start = reservation.address().addr();
156 let end = start + reservation.size();
157 let reservation = PageRange::new(start, end).unwrap();
158 reserve(reservation);
159 }
160
161 let kernel_start = if env::is_uefi() {
162 super::kernel_start_address().as_usize()
163 } else {
164 0
167 };
168 let kernel_end = super::kernel_end_address().as_usize();
169 let kernel_region = PageRange::new(kernel_start, kernel_end).unwrap();
170 reserve(kernel_region);
171
172 let fdt_start = env::boot_info().hardware_info.device_tree.unwrap().get();
173 let fdt_start = usize::try_from(fdt_start).unwrap();
174 let fdt_end = fdt_start + fdt.total_size();
175 let fdt_region = PageRange::containing(fdt_start, fdt_end).unwrap();
176 reserve(fdt_region);
177
178 Ok(())
179}
180
181trait PageRangeExt: Sized {
183 fn containing(start: usize, end: usize) -> Result<Self, PageRangeError>;
184
185 fn and(self, rhs: Self) -> Option<Self>;
186}
187
188impl PageRangeExt for PageRange {
189 fn containing(start: usize, end: usize) -> Result<Self, PageRangeError> {
190 let start = start.align_down(free_list::PAGE_SIZE);
191 let end = end.align_up(free_list::PAGE_SIZE);
192 Self::new(start, end)
193 }
194
195 fn and(self, rhs: Self) -> Option<Self> {
196 let start = self.start().max(rhs.start());
197 let end = self.end().min(rhs.end());
198 Self::new(start, end).ok()
199 }
200}
201
202#[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))]
203unsafe fn detect_from_limits() -> Result<(), ()> {
204 let limit = crate::arch::kernel::get_limit();
205 if limit == 0 {
206 return Err(());
207 }
208
209 #[cfg(target_arch = "riscv64")]
210 let ram_address = crate::arch::kernel::get_ram_address().as_usize();
211 #[cfg(target_arch = "aarch64")]
212 let ram_address = 0;
213
214 let range =
215 PageRange::new(super::kernel_end_address().as_usize(), ram_address + limit).unwrap();
216 unsafe {
217 PHYSICAL_FREE_LIST.lock().deallocate(range).unwrap();
218 map_frame_range(range);
219 }
220 TOTAL_MEMORY.fetch_add(range.len().get(), Ordering::Relaxed);
221
222 Ok(())
223}
224
225unsafe fn init() {
226 if env::is_uefi() && DeviceAlloc.phys_offset() != VirtAddr::zero() {
227 let start = DeviceAlloc.phys_offset();
228 let count = DeviceAlloc.phys_offset().as_u64() / HugePageSize::SIZE;
229 let count = usize::try_from(count).unwrap();
230 paging::unmap::<HugePageSize>(start, count);
231 }
232
233 if let Err(_err) = unsafe { detect_from_fdt() } {
234 cfg_if::cfg_if! {
235 if #[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))] {
236 error!("Could not detect physical memory from FDT");
237 unsafe { detect_from_limits().unwrap(); }
238 } else {
239 panic!("Could not detect physical memory from FDT");
240 }
241 }
242 }
243}