1pub(crate) mod device_alloc;
44mod page_range_alloc;
45mod physicalmem;
46mod virtualmem;
47
48use core::mem;
49use core::ops::Range;
50
51use align_address::Align;
52use free_list::{PageLayout, PageRange};
53use hermit_sync::{Lazy, RawInterruptTicketMutex};
54pub use memory_addresses::{PhysAddr, VirtAddr};
55use talc::{ErrOnOom, Span, Talc, Talck};
56
57pub use self::page_range_alloc::{PageRangeAllocator, PageRangeBox};
58pub use self::physicalmem::{FrameAlloc, FrameBox};
59pub use self::virtualmem::{PageAlloc, PageBox};
60#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
61use crate::arch::mm::paging::HugePageSize;
62pub use crate::arch::mm::paging::virtual_to_physical;
63use crate::arch::mm::paging::{BasePageSize, LargePageSize, PageSize};
64use crate::{arch, env};
65
66#[cfg(target_os = "none")]
67#[global_allocator]
68pub(crate) static ALLOCATOR: Talck<RawInterruptTicketMutex, ErrOnOom> = Talc::new(ErrOnOom).lock();
69
70static KERNEL_ADDR_RANGE: Lazy<Range<VirtAddr>> = Lazy::new(|| {
72 if cfg!(target_os = "none") {
73 env::get_base_address().align_down(LargePageSize::SIZE)
75 ..(env::get_base_address() + env::get_image_size()).align_up(LargePageSize::SIZE)
76 } else {
77 VirtAddr::zero()..VirtAddr::zero()
78 }
79});
80
81pub(crate) fn kernel_start_address() -> VirtAddr {
82 KERNEL_ADDR_RANGE.start
83}
84
85pub(crate) fn kernel_end_address() -> VirtAddr {
86 KERNEL_ADDR_RANGE.end
87}
88
89#[cfg(target_os = "none")]
90pub(crate) fn init() {
91 use crate::arch::mm::paging;
92
93 Lazy::force(&KERNEL_ADDR_RANGE);
94
95 unsafe {
96 arch::mm::init();
97 }
98
99 let total_mem = physicalmem::total_memory_size();
100 let kernel_addr_range = KERNEL_ADDR_RANGE.clone();
101 info!("Total memory size: {} MiB", total_mem >> 20);
102 info!(
103 "Kernel region: {:p}..{:p}",
104 kernel_addr_range.start, kernel_addr_range.end
105 );
106
107 let npages = total_mem / BasePageSize::SIZE as usize;
110 let npage_div = BasePageSize::SIZE as usize / mem::align_of::<usize>();
111 let npage_3tables = npages / npage_div + 1;
112 let npage_2tables = npage_3tables / npage_div + 1;
113 let npage_1tables = npage_2tables / npage_div + 1;
114 let reserved_space = (npage_3tables + npage_2tables + npage_1tables)
115 * BasePageSize::SIZE as usize
116 + 2 * LargePageSize::SIZE as usize;
117 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
118 let has_1gib_pages = arch::processor::supports_1gib_pages();
119 let has_2mib_pages = arch::processor::supports_2mib_pages();
120
121 let min_mem = if env::is_uefi() {
122 reserved_space
124 } else {
125 (kernel_addr_range.end.as_u64() - env::get_ram_address().as_u64() + reserved_space as u64)
126 as usize
127 };
128 info!("Minimum memory size: {} MiB", min_mem >> 20);
129 let avail_mem = total_mem
130 .checked_sub(min_mem)
131 .unwrap_or_else(|| panic!("Not enough memory available!"))
132 .align_down(LargePageSize::SIZE as usize);
133
134 let mut map_addr;
135 let mut map_size;
136 let heap_start_addr;
137
138 #[cfg(feature = "common-os")]
139 {
140 info!("Using HermitOS as common OS!");
141
142 let reserve: usize = (avail_mem * 75) / 100;
144 let reserve = core::cmp::min(reserve, 0x0400_0000);
146
147 let virt_size: usize = reserve.align_down(LargePageSize::SIZE as usize);
148 let layout = PageLayout::from_size_align(virt_size, LargePageSize::SIZE as usize).unwrap();
149 let page_range = PageAlloc::allocate(layout).unwrap();
150 let virt_addr = VirtAddr::from(page_range.start());
151 heap_start_addr = virt_addr;
152
153 info!(
154 "Heap: size {} MB, start address {:p}",
155 virt_size >> 20,
156 virt_addr
157 );
158
159 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
160 if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
161 let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) as usize
163 / LargePageSize::SIZE as usize;
164 if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages) {
165 map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
166 map_size = virt_size - (map_addr - virt_addr) as usize;
167 } else {
168 map_addr = virt_addr.align_up(HugePageSize::SIZE);
169 map_size = virt_size - (map_addr - virt_addr) as usize;
170 }
171 } else {
172 map_addr = virt_addr;
173 map_size = virt_size;
174 }
175
176 #[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
177 {
178 map_addr = virt_addr;
179 map_size = virt_size;
180 }
181 }
182
183 #[cfg(not(feature = "common-os"))]
184 {
185 #[cfg(not(feature = "mman"))]
187 let stack_reserve: usize = (avail_mem * 10) / 100;
188
189 #[cfg(not(feature = "mman"))]
194 let virt_size: usize = (avail_mem - stack_reserve).align_down(LargePageSize::SIZE as usize);
195 #[cfg(feature = "mman")]
196 let virt_size: usize = ((avail_mem * 75) / 100).align_down(LargePageSize::SIZE as usize);
197
198 let layout = PageLayout::from_size_align(virt_size, LargePageSize::SIZE as usize).unwrap();
199 let page_range = PageAlloc::allocate(layout).unwrap();
200 let virt_addr = VirtAddr::from(page_range.start());
201 heap_start_addr = virt_addr;
202
203 info!(
204 "Heap: size {} MB, start address {:p}",
205 virt_size >> 20,
206 virt_addr
207 );
208
209 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
210 if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
211 let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) / LargePageSize::SIZE;
213 if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages as usize) {
214 map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
215 map_size = virt_size - (map_addr - virt_addr) as usize;
216 } else {
217 map_addr = virt_addr.align_up(HugePageSize::SIZE);
218 map_size = virt_size - (map_addr - virt_addr) as usize;
219 }
220 } else {
221 map_addr = virt_addr;
222 map_size = virt_size;
223 }
224
225 #[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
226 {
227 map_addr = virt_addr;
228 map_size = virt_size;
229 }
230 }
231
232 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
233 if has_1gib_pages
234 && map_size > HugePageSize::SIZE as usize
235 && map_addr.is_aligned_to(HugePageSize::SIZE)
236 {
237 let size = map_size.align_down(HugePageSize::SIZE as usize);
238 if let Err(num_pages) =
239 paging::map_heap::<HugePageSize>(map_addr, size / HugePageSize::SIZE as usize)
240 {
241 map_size -= num_pages * HugePageSize::SIZE as usize;
242 map_addr += num_pages as u64 * HugePageSize::SIZE;
243 } else {
244 map_size -= size;
245 map_addr += size;
246 }
247 }
248
249 if has_2mib_pages
250 && map_size > LargePageSize::SIZE as usize
251 && map_addr.is_aligned_to(LargePageSize::SIZE)
252 {
253 let size = map_size.align_down(LargePageSize::SIZE as usize);
254 if let Err(num_pages) =
255 paging::map_heap::<LargePageSize>(map_addr, size / LargePageSize::SIZE as usize)
256 {
257 map_size -= num_pages * LargePageSize::SIZE as usize;
258 map_addr += num_pages as u64 * LargePageSize::SIZE;
259 } else {
260 map_size -= size;
261 map_addr += size;
262 }
263 }
264
265 if map_size > BasePageSize::SIZE as usize && map_addr.is_aligned_to(BasePageSize::SIZE) {
266 let size = map_size.align_down(BasePageSize::SIZE as usize);
267 if let Err(num_pages) =
268 paging::map_heap::<BasePageSize>(map_addr, size / BasePageSize::SIZE as usize)
269 {
270 map_size -= num_pages * BasePageSize::SIZE as usize;
271 map_addr += num_pages as u64 * BasePageSize::SIZE;
272 } else {
273 map_size -= size;
274 map_addr += size;
275 }
276 }
277
278 let heap_end_addr = map_addr;
279
280 let arena = Span::new(heap_start_addr.as_mut_ptr(), heap_end_addr.as_mut_ptr());
281 unsafe {
282 ALLOCATOR.lock().claim(arena).unwrap();
283 }
284
285 info!("Heap is located at {heap_start_addr:p}..{heap_end_addr:p} ({map_size} Bytes unmapped)");
286}
287
288pub(crate) fn print_information() {
289 info!("{FrameAlloc}");
290 info!("{PageAlloc}");
291}
292
293#[cfg(feature = "pci")]
295pub(crate) fn map(
296 physical_address: PhysAddr,
297 size: usize,
298 writable: bool,
299 no_execution: bool,
300 no_cache: bool,
301) -> VirtAddr {
302 use crate::arch::mm::paging::PageTableEntryFlags;
303 #[cfg(target_arch = "x86_64")]
304 use crate::arch::mm::paging::PageTableEntryFlagsExt;
305
306 let size = size.align_up(BasePageSize::SIZE as usize);
307 let count = size / BasePageSize::SIZE as usize;
308
309 let mut flags = PageTableEntryFlags::empty();
310 flags.normal();
311 if writable {
312 flags.writable();
313 }
314 if no_execution {
315 flags.execute_disable();
316 }
317 if no_cache {
318 flags.device();
319 }
320
321 let layout = PageLayout::from_size(size).unwrap();
322 let page_range = PageAlloc::allocate(layout).unwrap();
323 let virtual_address = VirtAddr::from(page_range.start());
324 arch::mm::paging::map::<BasePageSize>(virtual_address, physical_address, count, flags);
325
326 virtual_address
327}
328
329#[allow(dead_code)]
330pub(crate) fn unmap(virtual_address: VirtAddr, size: usize) {
332 let size = size.align_up(BasePageSize::SIZE as usize);
333
334 if arch::mm::paging::virtual_to_physical(virtual_address).is_some() {
335 arch::mm::paging::unmap::<BasePageSize>(
336 virtual_address,
337 size / BasePageSize::SIZE as usize,
338 );
339
340 let range = PageRange::from_start_len(virtual_address.as_usize(), size).unwrap();
341 unsafe {
342 PageAlloc::deallocate(range);
343 }
344 } else {
345 panic!("No page table entry for virtual address {virtual_address:p}");
346 }
347}