1pub(crate) mod device_alloc;
44mod page_range_alloc;
45mod physicalmem;
46mod virtualmem;
47
48use core::alloc::Layout;
49use core::mem::MaybeUninit;
50use core::ops::Range;
51
52use align_address::Align;
53use free_list::{PageLayout, PageRange};
54use hermit_sync::{Lazy, RawInterruptTicketMutex};
55pub use memory_addresses::{PhysAddr, VirtAddr};
56#[cfg(target_os = "none")]
57use talc::TalcLock;
58#[cfg(target_os = "none")]
59use talc::source::Manual;
60
61pub use self::page_range_alloc::{PageRangeAllocator, PageRangeBox};
62pub use self::physicalmem::{FrameAlloc, FrameBox};
63pub use self::virtualmem::{PageAlloc, PageBox};
64#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
65use crate::arch::mm::paging::HugePageSize;
66pub use crate::arch::mm::paging::virtual_to_physical;
67use crate::arch::mm::paging::{BasePageSize, LargePageSize, PageSize};
68use crate::{arch, env};
69
70#[cfg(target_os = "none")]
71#[global_allocator]
72pub(crate) static ALLOCATOR: TalcLock<RawInterruptTicketMutex, Manual> = TalcLock::new(Manual);
73
74static KERNEL_ADDR_RANGE: Lazy<Range<VirtAddr>> = Lazy::new(|| {
76 if cfg!(target_os = "none") {
77 let Range { start, end } = env::executable_ptr_range();
79 let start = VirtAddr::from_ptr(start);
80 let end = VirtAddr::from_ptr(end);
81 start.align_down(LargePageSize::SIZE)..end.align_up(LargePageSize::SIZE)
82 } else {
83 VirtAddr::zero()..VirtAddr::zero()
84 }
85});
86
87pub(crate) fn kernel_start_address() -> VirtAddr {
88 KERNEL_ADDR_RANGE.start
89}
90
91pub(crate) fn kernel_end_address() -> VirtAddr {
92 KERNEL_ADDR_RANGE.end
93}
94
95#[cfg(target_os = "none")]
96pub(crate) fn claim_initial_heap() {
97 #[repr(C, align(0x1000))]
98 struct InitialHeap([MaybeUninit<u8>; 0x1000]);
99
100 debug_assert_eq!(
101 Layout::new::<InitialHeap>(),
102 Layout::from_size_align(0x1000, 0x1000).unwrap()
103 );
104
105 static mut INITIAL_HEAP: InitialHeap = InitialHeap([MaybeUninit::uninit(); _]);
106
107 let base = (&raw mut INITIAL_HEAP).cast::<u8>();
108 let size = size_of::<InitialHeap>();
109 unsafe {
110 ALLOCATOR.lock().claim(base, size).unwrap();
111 }
112}
113
114#[cfg(target_os = "none")]
115pub(crate) fn init() {
116 use crate::arch::mm::paging;
117
118 Lazy::force(&KERNEL_ADDR_RANGE);
119
120 unsafe {
121 arch::mm::init();
122 }
123
124 let total_mem = physicalmem::total_memory_size();
125 let kernel_addr_range = KERNEL_ADDR_RANGE.clone();
126 info!("Total memory size: {} MiB", total_mem >> 20);
127 info!(
128 "Kernel region: {:p}..{:p}",
129 kernel_addr_range.start, kernel_addr_range.end
130 );
131
132 let npages = total_mem / BasePageSize::SIZE as usize;
135 let npage_div = BasePageSize::SIZE as usize / align_of::<usize>();
136 let npage_3tables = npages / npage_div + 1;
137 let npage_2tables = npage_3tables / npage_div + 1;
138 let npage_1tables = npage_2tables / npage_div + 1;
139 let reserved_space = (npage_3tables + npage_2tables + npage_1tables)
140 * BasePageSize::SIZE as usize
141 + 2 * LargePageSize::SIZE as usize;
142 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
143 let has_1gib_pages = arch::processor::supports_1gib_pages();
144 let has_2mib_pages = arch::processor::supports_2mib_pages();
145
146 let min_mem = if env::is_uefi() {
147 reserved_space
149 } else {
150 (kernel_addr_range.end.as_u64() - env::get_ram_address().as_u64() + reserved_space as u64)
151 as usize
152 };
153 info!("Minimum memory size: {} MiB", min_mem >> 20);
154 let avail_mem = total_mem
155 .checked_sub(min_mem)
156 .unwrap_or_else(|| panic!("Not enough memory available!"))
157 .align_down(LargePageSize::SIZE as usize);
158
159 let mut map_addr;
160 let mut map_size;
161 let heap_start_addr;
162
163 #[cfg(feature = "common-os")]
164 {
165 info!("Using Hermit as common OS!");
166
167 let reserve: usize = (avail_mem * 75) / 100;
169 let reserve = core::cmp::min(reserve, 0x0400_0000);
171
172 let virt_size: usize = reserve.align_down(LargePageSize::SIZE as usize);
173 let layout = PageLayout::from_size_align(virt_size, LargePageSize::SIZE as usize).unwrap();
174 let page_range = PageAlloc::allocate(layout).unwrap();
175 let virt_addr = VirtAddr::from(page_range.start());
176 heap_start_addr = virt_addr;
177
178 info!(
179 "Heap: size {} MB, start address {:p}",
180 virt_size >> 20,
181 virt_addr
182 );
183
184 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
185 if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
186 let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) as usize
188 / LargePageSize::SIZE as usize;
189 if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages) {
190 map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
191 map_size = virt_size - (map_addr - virt_addr) as usize;
192 } else {
193 map_addr = virt_addr.align_up(HugePageSize::SIZE);
194 map_size = virt_size - (map_addr - virt_addr) as usize;
195 }
196 } else {
197 map_addr = virt_addr;
198 map_size = virt_size;
199 }
200
201 #[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
202 {
203 map_addr = virt_addr;
204 map_size = virt_size;
205 }
206 }
207
208 #[cfg(not(feature = "common-os"))]
209 {
210 #[cfg(not(feature = "mman"))]
212 let stack_reserve: usize = (avail_mem * 10) / 100;
213
214 #[cfg(not(feature = "mman"))]
219 let virt_size: usize = (avail_mem - stack_reserve).align_down(LargePageSize::SIZE as usize);
220 #[cfg(feature = "mman")]
221 let virt_size: usize = ((avail_mem * 75) / 100).align_down(LargePageSize::SIZE as usize);
222
223 let layout = PageLayout::from_size_align(virt_size, LargePageSize::SIZE as usize).unwrap();
224 let page_range = PageAlloc::allocate(layout).unwrap();
225 let virt_addr = VirtAddr::from(page_range.start());
226 heap_start_addr = virt_addr;
227
228 info!(
229 "Heap: size {} MB, start address {:p}",
230 virt_size >> 20,
231 virt_addr
232 );
233
234 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
235 if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
236 let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) / LargePageSize::SIZE;
238 if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages as usize) {
239 map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
240 map_size = virt_size - (map_addr - virt_addr) as usize;
241 } else {
242 map_addr = virt_addr.align_up(HugePageSize::SIZE);
243 map_size = virt_size - (map_addr - virt_addr) as usize;
244 }
245 } else {
246 map_addr = virt_addr;
247 map_size = virt_size;
248 }
249
250 #[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
251 {
252 map_addr = virt_addr;
253 map_size = virt_size;
254 }
255 }
256
257 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
258 if has_1gib_pages
259 && map_size > HugePageSize::SIZE as usize
260 && map_addr.is_aligned_to(HugePageSize::SIZE)
261 {
262 let size = map_size.align_down(HugePageSize::SIZE as usize);
263 if let Err(num_pages) =
264 paging::map_heap::<HugePageSize>(map_addr, size / HugePageSize::SIZE as usize)
265 {
266 map_size -= num_pages * HugePageSize::SIZE as usize;
267 map_addr += num_pages as u64 * HugePageSize::SIZE;
268 } else {
269 map_size -= size;
270 map_addr += size;
271 }
272 }
273
274 if has_2mib_pages
275 && map_size > LargePageSize::SIZE as usize
276 && map_addr.is_aligned_to(LargePageSize::SIZE)
277 {
278 let size = map_size.align_down(LargePageSize::SIZE as usize);
279 if let Err(num_pages) =
280 paging::map_heap::<LargePageSize>(map_addr, size / LargePageSize::SIZE as usize)
281 {
282 map_size -= num_pages * LargePageSize::SIZE as usize;
283 map_addr += num_pages as u64 * LargePageSize::SIZE;
284 } else {
285 map_size -= size;
286 map_addr += size;
287 }
288 }
289
290 if map_size > BasePageSize::SIZE as usize && map_addr.is_aligned_to(BasePageSize::SIZE) {
291 let size = map_size.align_down(BasePageSize::SIZE as usize);
292 if let Err(num_pages) =
293 paging::map_heap::<BasePageSize>(map_addr, size / BasePageSize::SIZE as usize)
294 {
295 map_size -= num_pages * BasePageSize::SIZE as usize;
296 map_addr += num_pages as u64 * BasePageSize::SIZE;
297 } else {
298 map_size -= size;
299 map_addr += size;
300 }
301 }
302
303 let heap_end_addr = map_addr;
304
305 let size = heap_end_addr.as_usize() - heap_start_addr.as_usize();
306 unsafe {
307 ALLOCATOR
308 .lock()
309 .claim(heap_start_addr.as_mut_ptr(), size)
310 .unwrap();
311 }
312
313 info!("Heap is located at {heap_start_addr:p}..{heap_end_addr:p} ({map_size} Bytes unmapped)");
314}
315
316pub(crate) fn print_information() {
317 info!("{FrameAlloc}");
318 info!("{PageAlloc}");
319}
320
321#[cfg(feature = "pci")]
323pub(crate) fn map(
324 physical_address: PhysAddr,
325 size: usize,
326 writable: bool,
327 no_execution: bool,
328 no_cache: bool,
329) -> VirtAddr {
330 use crate::arch::mm::paging::PageTableEntryFlags;
331 #[cfg(target_arch = "x86_64")]
332 use crate::arch::mm::paging::PageTableEntryFlagsExt;
333
334 let size = size.align_up(BasePageSize::SIZE as usize);
335 let count = size / BasePageSize::SIZE as usize;
336
337 let mut flags = PageTableEntryFlags::empty();
338 flags.normal();
339 if writable {
340 flags.writable();
341 }
342 if no_execution {
343 flags.execute_disable();
344 }
345 if no_cache {
346 flags.device();
347 }
348
349 let layout = PageLayout::from_size(size).unwrap();
350 let page_range = PageAlloc::allocate(layout).unwrap();
351 let virtual_address = VirtAddr::from(page_range.start());
352 arch::mm::paging::map::<BasePageSize>(virtual_address, physical_address, count, flags);
353
354 virtual_address
355}
356
357#[allow(dead_code)]
358pub(crate) fn unmap(virtual_address: VirtAddr, size: usize) {
360 let size = size.align_up(BasePageSize::SIZE as usize);
361
362 if virtual_to_physical(virtual_address).is_some() {
363 arch::mm::paging::unmap::<BasePageSize>(
364 virtual_address,
365 size / BasePageSize::SIZE as usize,
366 );
367
368 let range = PageRange::from_start_len(virtual_address.as_usize(), size).unwrap();
369 unsafe {
370 PageAlloc::deallocate(range);
371 }
372 } else {
373 panic!("No page table entry for virtual address {virtual_address:p}");
374 }
375}