1pub(crate) mod device_alloc;
44mod page_range_alloc;
45mod physicalmem;
46mod virtualmem;
47
48use core::mem;
49use core::ops::Range;
50
51use align_address::Align;
52use free_list::{PageLayout, PageRange};
53use hermit_sync::{Lazy, RawInterruptTicketMutex};
54pub use memory_addresses::{PhysAddr, VirtAddr};
55use talc::{ErrOnOom, Span, Talc, Talck};
56
57pub use self::page_range_alloc::{PageRangeAllocator, PageRangeBox};
58pub use self::physicalmem::{FrameAlloc, FrameBox};
59pub use self::virtualmem::{PageAlloc, PageBox};
60#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
61use crate::arch::mm::paging::HugePageSize;
62pub use crate::arch::mm::paging::virtual_to_physical;
63use crate::arch::mm::paging::{BasePageSize, LargePageSize, PageSize};
64use crate::{arch, env};
65
66#[cfg(target_os = "none")]
67#[global_allocator]
68pub(crate) static ALLOCATOR: Talck<RawInterruptTicketMutex, ErrOnOom> = Talc::new(ErrOnOom).lock();
69
70static KERNEL_ADDR_RANGE: Lazy<Range<VirtAddr>> = Lazy::new(|| {
72 if cfg!(target_os = "none") {
73 env::get_base_address().align_down(LargePageSize::SIZE)
75 ..(env::get_base_address() + env::get_image_size()).align_up(LargePageSize::SIZE)
76 } else {
77 VirtAddr::zero()..VirtAddr::zero()
78 }
79});
80
81pub(crate) fn kernel_start_address() -> VirtAddr {
82 KERNEL_ADDR_RANGE.start
83}
84
85pub(crate) fn kernel_end_address() -> VirtAddr {
86 KERNEL_ADDR_RANGE.end
87}
88
89#[cfg(target_os = "none")]
90pub(crate) fn init() {
91 use crate::arch::mm::paging;
92
93 Lazy::force(&KERNEL_ADDR_RANGE);
94
95 unsafe {
96 arch::mm::init();
97 }
98
99 let total_mem = physicalmem::total_memory_size();
100 let kernel_addr_range = KERNEL_ADDR_RANGE.clone();
101 info!("Total memory size: {} MiB", total_mem >> 20);
102 info!(
103 "Kernel region: {:p}..{:p}",
104 kernel_addr_range.start, kernel_addr_range.end
105 );
106
107 let npages = total_mem / BasePageSize::SIZE as usize;
110 let npage_3tables = npages / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
111 let npage_2tables =
112 npage_3tables / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
113 let npage_1tables =
114 npage_2tables / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
115 let reserved_space = (npage_3tables + npage_2tables + npage_1tables)
116 * BasePageSize::SIZE as usize
117 + 2 * LargePageSize::SIZE as usize;
118 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
119 let has_1gib_pages = arch::processor::supports_1gib_pages();
120 let has_2mib_pages = arch::processor::supports_2mib_pages();
121
122 let min_mem = if env::is_uefi() {
123 reserved_space
125 } else {
126 (kernel_addr_range.end.as_u64() - env::get_ram_address().as_u64() + reserved_space as u64)
127 as usize
128 };
129 info!("Minimum memory size: {} MiB", min_mem >> 20);
130 let avail_mem = total_mem
131 .checked_sub(min_mem)
132 .unwrap_or_else(|| panic!("Not enough memory available!"))
133 .align_down(LargePageSize::SIZE as usize);
134
135 let mut map_addr;
136 let mut map_size;
137 let heap_start_addr;
138
139 #[cfg(feature = "common-os")]
140 {
141 info!("Using HermitOS as common OS!");
142
143 let reserve: usize = (avail_mem * 75) / 100;
145 let reserve = core::cmp::min(reserve, 0x0400_0000);
147
148 let virt_size: usize = reserve.align_down(LargePageSize::SIZE as usize);
149 let layout = PageLayout::from_size_align(virt_size, LargePageSize::SIZE as usize).unwrap();
150 let page_range = PageAlloc::allocate(layout).unwrap();
151 let virt_addr = VirtAddr::from(page_range.start());
152 heap_start_addr = virt_addr;
153
154 info!(
155 "Heap: size {} MB, start address {:p}",
156 virt_size >> 20,
157 virt_addr
158 );
159
160 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
161 if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
162 let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) as usize
164 / LargePageSize::SIZE as usize;
165 if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages) {
166 map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
167 map_size = virt_size - (map_addr - virt_addr) as usize;
168 } else {
169 map_addr = virt_addr.align_up(HugePageSize::SIZE);
170 map_size = virt_size - (map_addr - virt_addr) as usize;
171 }
172 } else {
173 map_addr = virt_addr;
174 map_size = virt_size;
175 }
176
177 #[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
178 {
179 map_addr = virt_addr;
180 map_size = virt_size;
181 }
182 }
183
184 #[cfg(not(feature = "common-os"))]
185 {
186 #[cfg(not(feature = "mman"))]
188 let stack_reserve: usize = (avail_mem * 10) / 100;
189
190 #[cfg(not(feature = "mman"))]
195 let virt_size: usize = (avail_mem - stack_reserve).align_down(LargePageSize::SIZE as usize);
196 #[cfg(feature = "mman")]
197 let virt_size: usize = ((avail_mem * 75) / 100).align_down(LargePageSize::SIZE as usize);
198
199 let layout = PageLayout::from_size_align(virt_size, LargePageSize::SIZE as usize).unwrap();
200 let page_range = PageAlloc::allocate(layout).unwrap();
201 let virt_addr = VirtAddr::from(page_range.start());
202 heap_start_addr = virt_addr;
203
204 info!(
205 "Heap: size {} MB, start address {:p}",
206 virt_size >> 20,
207 virt_addr
208 );
209
210 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
211 if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
212 let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) / LargePageSize::SIZE;
214 if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages as usize) {
215 map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
216 map_size = virt_size - (map_addr - virt_addr) as usize;
217 } else {
218 map_addr = virt_addr.align_up(HugePageSize::SIZE);
219 map_size = virt_size - (map_addr - virt_addr) as usize;
220 }
221 } else {
222 map_addr = virt_addr;
223 map_size = virt_size;
224 }
225
226 #[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
227 {
228 map_addr = virt_addr;
229 map_size = virt_size;
230 }
231 }
232
233 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
234 if has_1gib_pages
235 && map_size > HugePageSize::SIZE as usize
236 && map_addr.is_aligned_to(HugePageSize::SIZE)
237 {
238 let size = map_size.align_down(HugePageSize::SIZE as usize);
239 if let Err(num_pages) =
240 paging::map_heap::<HugePageSize>(map_addr, size / HugePageSize::SIZE as usize)
241 {
242 map_size -= num_pages * HugePageSize::SIZE as usize;
243 map_addr += num_pages as u64 * HugePageSize::SIZE;
244 } else {
245 map_size -= size;
246 map_addr += size;
247 }
248 }
249
250 if has_2mib_pages
251 && map_size > LargePageSize::SIZE as usize
252 && map_addr.is_aligned_to(LargePageSize::SIZE)
253 {
254 let size = map_size.align_down(LargePageSize::SIZE as usize);
255 if let Err(num_pages) =
256 paging::map_heap::<LargePageSize>(map_addr, size / LargePageSize::SIZE as usize)
257 {
258 map_size -= num_pages * LargePageSize::SIZE as usize;
259 map_addr += num_pages as u64 * LargePageSize::SIZE;
260 } else {
261 map_size -= size;
262 map_addr += size;
263 }
264 }
265
266 if map_size > BasePageSize::SIZE as usize && map_addr.is_aligned_to(BasePageSize::SIZE) {
267 let size = map_size.align_down(BasePageSize::SIZE as usize);
268 if let Err(num_pages) =
269 paging::map_heap::<BasePageSize>(map_addr, size / BasePageSize::SIZE as usize)
270 {
271 map_size -= num_pages * BasePageSize::SIZE as usize;
272 map_addr += num_pages as u64 * BasePageSize::SIZE;
273 } else {
274 map_size -= size;
275 map_addr += size;
276 }
277 }
278
279 let heap_end_addr = map_addr;
280
281 let arena = Span::new(heap_start_addr.as_mut_ptr(), heap_end_addr.as_mut_ptr());
282 unsafe {
283 ALLOCATOR.lock().claim(arena).unwrap();
284 }
285
286 info!("Heap is located at {heap_start_addr:p}..{heap_end_addr:p} ({map_size} Bytes unmapped)");
287}
288
289pub(crate) fn print_information() {
290 info!("{FrameAlloc}");
291 info!("{PageAlloc}");
292}
293
294#[cfg(feature = "pci")]
296pub(crate) fn map(
297 physical_address: PhysAddr,
298 size: usize,
299 writable: bool,
300 no_execution: bool,
301 no_cache: bool,
302) -> VirtAddr {
303 use crate::arch::mm::paging::PageTableEntryFlags;
304 #[cfg(target_arch = "x86_64")]
305 use crate::arch::mm::paging::PageTableEntryFlagsExt;
306
307 let size = size.align_up(BasePageSize::SIZE as usize);
308 let count = size / BasePageSize::SIZE as usize;
309
310 let mut flags = PageTableEntryFlags::empty();
311 flags.normal();
312 if writable {
313 flags.writable();
314 }
315 if no_execution {
316 flags.execute_disable();
317 }
318 if no_cache {
319 flags.device();
320 }
321
322 let layout = PageLayout::from_size(size).unwrap();
323 let page_range = PageAlloc::allocate(layout).unwrap();
324 let virtual_address = VirtAddr::from(page_range.start());
325 arch::mm::paging::map::<BasePageSize>(virtual_address, physical_address, count, flags);
326
327 virtual_address
328}
329
330#[allow(dead_code)]
331pub(crate) fn unmap(virtual_address: VirtAddr, size: usize) {
333 let size = size.align_up(BasePageSize::SIZE as usize);
334
335 if arch::mm::paging::virtual_to_physical(virtual_address).is_some() {
336 arch::mm::paging::unmap::<BasePageSize>(
337 virtual_address,
338 size / BasePageSize::SIZE as usize,
339 );
340
341 let range = PageRange::from_start_len(virtual_address.as_usize(), size).unwrap();
342 unsafe {
343 PageAlloc::deallocate(range);
344 }
345 } else {
346 panic!(
347 "No page table entry for virtual address {:p}",
348 virtual_address
349 );
350 }
351}