1pub(crate) mod allocator;
44pub(crate) mod device_alloc;
45pub(crate) mod physicalmem;
46pub(crate) mod virtualmem;
47
48use core::mem;
49use core::ops::Range;
50
51use align_address::Align;
52use free_list::{PageLayout, PageRange};
53use hermit_sync::Lazy;
54pub use memory_addresses::{PhysAddr, VirtAddr};
55
56use self::allocator::LockedAllocator;
57#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
58use crate::arch::mm::paging::HugePageSize;
59pub use crate::arch::mm::paging::virtual_to_physical;
60use crate::arch::mm::paging::{BasePageSize, LargePageSize, PageSize};
61use crate::mm::physicalmem::PHYSICAL_FREE_LIST;
62use crate::mm::virtualmem::KERNEL_FREE_LIST;
63use crate::{arch, env};
64
65#[cfg(target_os = "none")]
66#[global_allocator]
67pub(crate) static ALLOCATOR: LockedAllocator = LockedAllocator::new();
68
69static KERNEL_ADDR_RANGE: Lazy<Range<VirtAddr>> = Lazy::new(|| {
71 if cfg!(target_os = "none") {
72 env::get_base_address().align_down(LargePageSize::SIZE)
74 ..(env::get_base_address() + env::get_image_size()).align_up(LargePageSize::SIZE)
75 } else {
76 VirtAddr::zero()..VirtAddr::zero()
77 }
78});
79
80pub(crate) fn kernel_start_address() -> VirtAddr {
81 KERNEL_ADDR_RANGE.start
82}
83
84pub(crate) fn kernel_end_address() -> VirtAddr {
85 KERNEL_ADDR_RANGE.end
86}
87
88#[cfg(target_os = "none")]
89pub(crate) fn init() {
90 use crate::arch::mm::paging;
91
92 Lazy::force(&KERNEL_ADDR_RANGE);
93
94 arch::mm::init();
95 arch::mm::init_page_tables();
96
97 let total_mem = physicalmem::total_memory_size();
98 let kernel_addr_range = KERNEL_ADDR_RANGE.clone();
99 info!("Total memory size: {} MiB", total_mem >> 20);
100 info!(
101 "Kernel region: {:p}..{:p}",
102 kernel_addr_range.start, kernel_addr_range.end
103 );
104
105 let npages = total_mem / BasePageSize::SIZE as usize;
108 let npage_3tables = npages / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
109 let npage_2tables =
110 npage_3tables / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
111 let npage_1tables =
112 npage_2tables / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
113 let reserved_space = (npage_3tables + npage_2tables + npage_1tables)
114 * BasePageSize::SIZE as usize
115 + 2 * LargePageSize::SIZE as usize;
116 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
117 let has_1gib_pages = arch::processor::supports_1gib_pages();
118 let has_2mib_pages = arch::processor::supports_2mib_pages();
119
120 let min_mem = if env::is_uefi() {
121 reserved_space
123 } else {
124 (kernel_addr_range.end.as_u64() - env::get_ram_address().as_u64() + reserved_space as u64)
125 as usize
126 };
127 info!("Minimum memory size: {} MiB", min_mem >> 20);
128 let avail_mem = total_mem
129 .checked_sub(min_mem)
130 .unwrap_or_else(|| panic!("Not enough memory available!"))
131 .align_down(LargePageSize::SIZE as usize);
132
133 let mut map_addr;
134 let mut map_size;
135 let heap_start_addr;
136
137 #[cfg(feature = "common-os")]
138 {
139 info!("Using HermitOS as common OS!");
140
141 let reserve: usize = (avail_mem * 75) / 100;
143 let reserve = core::cmp::min(reserve, 0x0400_0000);
145
146 let virt_size: usize = reserve.align_down(LargePageSize::SIZE as usize);
147 let layout = PageLayout::from_size_align(virt_size, LargePageSize::SIZE as usize).unwrap();
148 let page_range = KERNEL_FREE_LIST.lock().allocate(layout).unwrap();
149 let virt_addr = VirtAddr::from(page_range.start());
150 heap_start_addr = virt_addr;
151
152 info!(
153 "Heap: size {} MB, start address {:p}",
154 virt_size >> 20,
155 virt_addr
156 );
157
158 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
159 if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
160 let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) as usize
162 / LargePageSize::SIZE as usize;
163 if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages) {
164 map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
165 map_size = virt_size - (map_addr - virt_addr) as usize;
166 } else {
167 map_addr = virt_addr.align_up(HugePageSize::SIZE);
168 map_size = virt_size - (map_addr - virt_addr) as usize;
169 }
170 } else {
171 map_addr = virt_addr;
172 map_size = virt_size;
173 }
174
175 #[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
176 {
177 map_addr = virt_addr;
178 map_size = virt_size;
179 }
180 }
181
182 #[cfg(not(feature = "common-os"))]
183 {
184 #[cfg(not(feature = "mman"))]
186 let stack_reserve: usize = (avail_mem * 10) / 100;
187
188 #[cfg(not(feature = "mman"))]
193 let virt_size: usize = (avail_mem - stack_reserve).align_down(LargePageSize::SIZE as usize);
194 #[cfg(feature = "mman")]
195 let virt_size: usize = ((avail_mem * 75) / 100).align_down(LargePageSize::SIZE as usize);
196
197 let layout = PageLayout::from_size_align(virt_size, LargePageSize::SIZE as usize).unwrap();
198 let page_range = KERNEL_FREE_LIST.lock().allocate(layout).unwrap();
199 let virt_addr = VirtAddr::from(page_range.start());
200 heap_start_addr = virt_addr;
201
202 info!(
203 "Heap: size {} MB, start address {:p}",
204 virt_size >> 20,
205 virt_addr
206 );
207
208 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
209 if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
210 let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) / LargePageSize::SIZE;
212 if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages as usize) {
213 map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
214 map_size = virt_size - (map_addr - virt_addr) as usize;
215 } else {
216 map_addr = virt_addr.align_up(HugePageSize::SIZE);
217 map_size = virt_size - (map_addr - virt_addr) as usize;
218 }
219 } else {
220 map_addr = virt_addr;
221 map_size = virt_size;
222 }
223
224 #[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
225 {
226 map_addr = virt_addr;
227 map_size = virt_size;
228 }
229 }
230
231 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
232 if has_1gib_pages
233 && map_size > HugePageSize::SIZE as usize
234 && map_addr.is_aligned_to(HugePageSize::SIZE)
235 {
236 let size = map_size.align_down(HugePageSize::SIZE as usize);
237 if let Err(num_pages) =
238 paging::map_heap::<HugePageSize>(map_addr, size / HugePageSize::SIZE as usize)
239 {
240 map_size -= num_pages * HugePageSize::SIZE as usize;
241 map_addr += num_pages as u64 * HugePageSize::SIZE;
242 } else {
243 map_size -= size;
244 map_addr += size;
245 }
246 }
247
248 if has_2mib_pages
249 && map_size > LargePageSize::SIZE as usize
250 && map_addr.is_aligned_to(LargePageSize::SIZE)
251 {
252 let size = map_size.align_down(LargePageSize::SIZE as usize);
253 if let Err(num_pages) =
254 paging::map_heap::<LargePageSize>(map_addr, size / LargePageSize::SIZE as usize)
255 {
256 map_size -= num_pages * LargePageSize::SIZE as usize;
257 map_addr += num_pages as u64 * LargePageSize::SIZE;
258 } else {
259 map_size -= size;
260 map_addr += size;
261 }
262 }
263
264 if map_size > BasePageSize::SIZE as usize && map_addr.is_aligned_to(BasePageSize::SIZE) {
265 let size = map_size.align_down(BasePageSize::SIZE as usize);
266 if let Err(num_pages) =
267 paging::map_heap::<BasePageSize>(map_addr, size / BasePageSize::SIZE as usize)
268 {
269 map_size -= num_pages * BasePageSize::SIZE as usize;
270 map_addr += num_pages as u64 * BasePageSize::SIZE;
271 } else {
272 map_size -= size;
273 map_addr += size;
274 }
275 }
276
277 let heap_end_addr = map_addr;
278
279 unsafe {
280 ALLOCATOR.init(
281 heap_start_addr.as_mut_ptr(),
282 (heap_end_addr - heap_start_addr) as usize,
283 );
284 }
285
286 info!("Heap is located at {heap_start_addr:p}..{heap_end_addr:p} ({map_size} Bytes unmapped)");
287}
288
289pub(crate) fn print_information() {
290 info!("Physical memory free list:\n{}", PHYSICAL_FREE_LIST.lock());
291 info!("Virtual memory free list:\n{}", KERNEL_FREE_LIST.lock());
292}
293
294#[cfg(feature = "pci")]
296pub(crate) fn map(
297 physical_address: PhysAddr,
298 size: usize,
299 writable: bool,
300 no_execution: bool,
301 no_cache: bool,
302) -> VirtAddr {
303 use crate::arch::mm::paging::PageTableEntryFlags;
304 #[cfg(target_arch = "x86_64")]
305 use crate::arch::mm::paging::PageTableEntryFlagsExt;
306
307 let size = size.align_up(BasePageSize::SIZE as usize);
308 let count = size / BasePageSize::SIZE as usize;
309
310 let mut flags = PageTableEntryFlags::empty();
311 flags.normal();
312 if writable {
313 flags.writable();
314 }
315 if no_execution {
316 flags.execute_disable();
317 }
318 if no_cache {
319 flags.device();
320 }
321
322 let layout = PageLayout::from_size(size).unwrap();
323 let page_range = KERNEL_FREE_LIST.lock().allocate(layout).unwrap();
324 let virtual_address = VirtAddr::from(page_range.start());
325 arch::mm::paging::map::<BasePageSize>(virtual_address, physical_address, count, flags);
326
327 virtual_address
328}
329
330#[allow(dead_code)]
331pub(crate) fn unmap(virtual_address: VirtAddr, size: usize) {
333 let size = size.align_up(BasePageSize::SIZE as usize);
334
335 if arch::mm::paging::virtual_to_physical(virtual_address).is_some() {
336 arch::mm::paging::unmap::<BasePageSize>(
337 virtual_address,
338 size / BasePageSize::SIZE as usize,
339 );
340
341 let range = PageRange::from_start_len(virtual_address.as_usize(), size).unwrap();
342 unsafe {
343 KERNEL_FREE_LIST.lock().deallocate(range).unwrap();
344 }
345 } else {
346 panic!(
347 "No page table entry for virtual address {:p}",
348 virtual_address
349 );
350 }
351}