1use core::alloc::AllocError;
2use core::fmt;
3use core::sync::atomic::{AtomicUsize, Ordering};
4
5use align_address::Align;
6use free_list::{FreeList, PageLayout, PageRange, PageRangeError};
7use hermit_sync::InterruptTicketMutex;
8use memory_addresses::{PhysAddr, VirtAddr};
9
10#[cfg(target_arch = "x86_64")]
11use crate::arch::mm::paging::PageTableEntryFlagsExt;
12use crate::arch::mm::paging::{self, HugePageSize, PageSize, PageTableEntryFlags};
13use crate::env;
14use crate::mm::device_alloc::DeviceAlloc;
15use crate::mm::{PageRangeAllocator, PageRangeBox};
16
17static PHYSICAL_FREE_LIST: InterruptTicketMutex<FreeList<16>> =
18 InterruptTicketMutex::new(FreeList::new());
19pub static TOTAL_MEMORY: AtomicUsize = AtomicUsize::new(0);
20
21pub struct FrameAlloc;
22
23impl PageRangeAllocator for FrameAlloc {
24 unsafe fn init() {
25 unsafe {
26 init();
27 }
28 }
29
30 fn allocate(layout: PageLayout) -> Result<PageRange, AllocError> {
31 PHYSICAL_FREE_LIST
32 .lock()
33 .allocate(layout)
34 .map_err(|_| AllocError)
35 }
36
37 fn allocate_at(range: PageRange) -> Result<(), AllocError> {
38 PHYSICAL_FREE_LIST
39 .lock()
40 .allocate_at(range)
41 .map_err(|_| AllocError)
42 }
43
44 unsafe fn deallocate(range: PageRange) {
45 unsafe {
46 PHYSICAL_FREE_LIST.lock().deallocate(range).unwrap();
47 }
48 }
49}
50
51impl fmt::Display for FrameAlloc {
52 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
53 let free_list = PHYSICAL_FREE_LIST.lock();
54 write!(f, "FrameAlloc free list:\n{free_list}")
55 }
56}
57
58pub type FrameBox = PageRangeBox<FrameAlloc>;
59
60pub fn total_memory_size() -> usize {
61 TOTAL_MEMORY.load(Ordering::Relaxed)
62}
63
64pub unsafe fn map_frame_range(frame_range: PageRange) {
65 cfg_select! {
66 target_arch = "aarch64" => {
67 type IdentityPageSize = crate::arch::mm::paging::BasePageSize;
68 }
69 target_arch = "riscv64" => {
70 type IdentityPageSize = crate::arch::mm::paging::HugePageSize;
71 }
72 target_arch = "x86_64" => {
73 type IdentityPageSize = crate::arch::mm::paging::LargePageSize;
74 }
75 }
76
77 let start = frame_range
78 .start()
79 .align_down(IdentityPageSize::SIZE.try_into().unwrap());
80 let end = frame_range
81 .end()
82 .align_up(IdentityPageSize::SIZE.try_into().unwrap());
83
84 (start..end)
85 .step_by(IdentityPageSize::SIZE.try_into().unwrap())
86 .map(|addr| PhysAddr::new(addr.try_into().unwrap()))
87 .for_each(paging::identity_map::<IdentityPageSize>);
88
89 if DeviceAlloc.phys_offset() != VirtAddr::zero() {
91 let flags = {
92 let mut flags = PageTableEntryFlags::empty();
93 flags.normal().writable().execute_disable();
94 flags
95 };
96 (start..end)
97 .step_by(IdentityPageSize::SIZE.try_into().unwrap())
98 .for_each(|addr| {
99 let phys_addr = PhysAddr::new(addr.try_into().unwrap());
100 let virt_addr = VirtAddr::from_ptr(DeviceAlloc.ptr_from::<()>(phys_addr));
101 paging::map::<IdentityPageSize>(virt_addr, phys_addr, 1, flags);
102 });
103 }
104}
105
106unsafe fn detect_from_fdt() -> Result<(), ()> {
107 let fdt = env::fdt().ok_or(())?;
108
109 let all_regions = fdt
110 .find_all_nodes("/memory")
111 .map(|m| m.reg().unwrap().next().unwrap());
112 if all_regions.count() == 0 {
113 return Err(());
114 }
115 let all_regions = fdt
116 .find_all_nodes("/memory")
117 .map(|m| m.reg().unwrap().next().unwrap());
118
119 for m in all_regions {
120 let start_address = m.starting_address.expose_provenance() as u64;
121 let size = m.size.unwrap() as u64;
122 let end_address = start_address + size;
123
124 if end_address <= super::kernel_end_address().as_u64() && !env::is_uefi() {
125 continue;
126 }
127
128 let start_address =
129 if start_address <= super::kernel_start_address().as_u64() && !env::is_uefi() {
130 super::kernel_end_address()
131 } else {
132 VirtAddr::new(start_address)
133 };
134
135 let range = PageRange::new(start_address.as_usize(), end_address as usize).unwrap();
136 unsafe {
137 FrameAlloc::deallocate(range);
138 map_frame_range(range);
139 }
140 TOTAL_MEMORY.fetch_add(range.len().get(), Ordering::Relaxed);
141 debug!("Claimed physical memory: {range:#x?}");
142 }
143
144 let reserve = |reservation: PageRange| {
145 debug!("Memory reservation: {reservation:#x?}");
146 while let Ok(reserved) = PHYSICAL_FREE_LIST
149 .lock()
150 .allocate_with(|range| reservation.and(range))
151 {
152 debug!("Reserved {reserved:#x?}");
153 }
154 };
155
156 for reservation in fdt.memory_reservations() {
157 let start = reservation.address().addr();
158 let end = start + reservation.size();
159 let reservation = PageRange::new(start, end).unwrap();
160 reserve(reservation);
161 }
162
163 let kernel_start = if env::is_uefi() {
164 super::kernel_start_address().as_usize()
165 } else {
166 0
169 };
170 let kernel_end = super::kernel_end_address().as_usize();
171 let kernel_region = PageRange::new(kernel_start, kernel_end).unwrap();
172 reserve(kernel_region);
173
174 let fdt_start = env::boot_info().hardware_info.device_tree.unwrap().get();
175 let fdt_start = usize::try_from(fdt_start).unwrap();
176 let fdt_end = fdt_start + fdt.total_size();
177 let fdt_region = PageRange::containing(fdt_start, fdt_end).unwrap();
178 reserve(fdt_region);
179
180 Ok(())
181}
182
183trait PageRangeExt: Sized {
185 fn containing(start: usize, end: usize) -> Result<Self, PageRangeError>;
186
187 fn and(self, rhs: Self) -> Option<Self>;
188}
189
190impl PageRangeExt for PageRange {
191 fn containing(start: usize, end: usize) -> Result<Self, PageRangeError> {
192 let start = start.align_down(free_list::PAGE_SIZE);
193 let end = end.align_up(free_list::PAGE_SIZE);
194 Self::new(start, end)
195 }
196
197 fn and(self, rhs: Self) -> Option<Self> {
198 let start = self.start().max(rhs.start());
199 let end = self.end().min(rhs.end());
200 Self::new(start, end).ok()
201 }
202}
203
204#[cfg(any(target_arch = "aarch64", target_arch = "riscv64"))]
205unsafe fn detect_from_limits() -> Result<(), ()> {
206 let limit = crate::arch::kernel::get_limit();
207 if limit == 0 {
208 return Err(());
209 }
210
211 #[cfg(target_arch = "riscv64")]
212 let ram_address = crate::arch::kernel::get_ram_address().as_usize();
213 #[cfg(target_arch = "aarch64")]
214 let ram_address = 0;
215
216 let range =
217 PageRange::new(super::kernel_end_address().as_usize(), ram_address + limit).unwrap();
218 unsafe {
219 PHYSICAL_FREE_LIST.lock().deallocate(range).unwrap();
220 map_frame_range(range);
221 }
222 TOTAL_MEMORY.fetch_add(range.len().get(), Ordering::Relaxed);
223
224 Ok(())
225}
226
227unsafe fn init() {
228 if env::is_uefi() && DeviceAlloc.phys_offset() != VirtAddr::zero() {
229 let start = DeviceAlloc.phys_offset();
230 let count = DeviceAlloc.phys_offset().as_u64() / HugePageSize::SIZE;
231 let count = usize::try_from(count).unwrap();
232 paging::unmap::<HugePageSize>(start, count);
233 }
234
235 if unsafe { detect_from_fdt().is_ok() } {
236 return;
237 }
238
239 cfg_select! {
240 any(target_arch = "aarch64", target_arch = "riscv64") => {
241 error!("Could not detect physical memory from FDT");
242 unsafe { detect_from_limits().unwrap(); }
243 }
244 _ => {
245 panic!("Could not detect physical memory from FDT");
246 }
247 }
248}