1pub(crate) mod device_alloc;
44mod page_range_alloc;
45mod physicalmem;
46mod virtualmem;
47
48use core::mem;
49use core::ops::Range;
50
51use align_address::Align;
52use free_list::{PageLayout, PageRange};
53use hermit_sync::{Lazy, RawInterruptTicketMutex};
54pub use memory_addresses::{PhysAddr, VirtAddr};
55#[cfg(target_os = "none")]
56use talc::TalcLock;
57#[cfg(target_os = "none")]
58use talc::source::Manual;
59
60pub use self::page_range_alloc::{PageRangeAllocator, PageRangeBox};
61pub use self::physicalmem::{FrameAlloc, FrameBox};
62pub use self::virtualmem::{PageAlloc, PageBox};
63#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
64use crate::arch::mm::paging::HugePageSize;
65pub use crate::arch::mm::paging::virtual_to_physical;
66use crate::arch::mm::paging::{BasePageSize, LargePageSize, PageSize};
67use crate::{arch, env};
68
69#[cfg(target_os = "none")]
70#[global_allocator]
71pub(crate) static ALLOCATOR: TalcLock<RawInterruptTicketMutex, Manual> = TalcLock::new(Manual);
72
73static KERNEL_ADDR_RANGE: Lazy<Range<VirtAddr>> = Lazy::new(|| {
75 if cfg!(target_os = "none") {
76 let Range { start, end } = env::executable_ptr_range();
78 let start = VirtAddr::from_ptr(start);
79 let end = VirtAddr::from_ptr(end);
80 start.align_down(LargePageSize::SIZE)..end.align_up(LargePageSize::SIZE)
81 } else {
82 VirtAddr::zero()..VirtAddr::zero()
83 }
84});
85
86pub(crate) fn kernel_start_address() -> VirtAddr {
87 KERNEL_ADDR_RANGE.start
88}
89
90pub(crate) fn kernel_end_address() -> VirtAddr {
91 KERNEL_ADDR_RANGE.end
92}
93
94#[cfg(target_os = "none")]
95pub(crate) fn init() {
96 use crate::arch::mm::paging;
97
98 Lazy::force(&KERNEL_ADDR_RANGE);
99
100 unsafe {
101 arch::mm::init();
102 }
103
104 let total_mem = physicalmem::total_memory_size();
105 let kernel_addr_range = KERNEL_ADDR_RANGE.clone();
106 info!("Total memory size: {} MiB", total_mem >> 20);
107 info!(
108 "Kernel region: {:p}..{:p}",
109 kernel_addr_range.start, kernel_addr_range.end
110 );
111
112 let npages = total_mem / BasePageSize::SIZE as usize;
115 let npage_div = BasePageSize::SIZE as usize / mem::align_of::<usize>();
116 let npage_3tables = npages / npage_div + 1;
117 let npage_2tables = npage_3tables / npage_div + 1;
118 let npage_1tables = npage_2tables / npage_div + 1;
119 let reserved_space = (npage_3tables + npage_2tables + npage_1tables)
120 * BasePageSize::SIZE as usize
121 + 2 * LargePageSize::SIZE as usize;
122 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
123 let has_1gib_pages = arch::processor::supports_1gib_pages();
124 let has_2mib_pages = arch::processor::supports_2mib_pages();
125
126 let min_mem = if env::is_uefi() {
127 reserved_space
129 } else {
130 (kernel_addr_range.end.as_u64() - env::get_ram_address().as_u64() + reserved_space as u64)
131 as usize
132 };
133 info!("Minimum memory size: {} MiB", min_mem >> 20);
134 let avail_mem = total_mem
135 .checked_sub(min_mem)
136 .unwrap_or_else(|| panic!("Not enough memory available!"))
137 .align_down(LargePageSize::SIZE as usize);
138
139 let mut map_addr;
140 let mut map_size;
141 let heap_start_addr;
142
143 #[cfg(feature = "common-os")]
144 {
145 info!("Using Hermit as common OS!");
146
147 let reserve: usize = (avail_mem * 75) / 100;
149 let reserve = core::cmp::min(reserve, 0x0400_0000);
151
152 let virt_size: usize = reserve.align_down(LargePageSize::SIZE as usize);
153 let layout = PageLayout::from_size_align(virt_size, LargePageSize::SIZE as usize).unwrap();
154 let page_range = PageAlloc::allocate(layout).unwrap();
155 let virt_addr = VirtAddr::from(page_range.start());
156 heap_start_addr = virt_addr;
157
158 info!(
159 "Heap: size {} MB, start address {:p}",
160 virt_size >> 20,
161 virt_addr
162 );
163
164 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
165 if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
166 let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) as usize
168 / LargePageSize::SIZE as usize;
169 if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages) {
170 map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
171 map_size = virt_size - (map_addr - virt_addr) as usize;
172 } else {
173 map_addr = virt_addr.align_up(HugePageSize::SIZE);
174 map_size = virt_size - (map_addr - virt_addr) as usize;
175 }
176 } else {
177 map_addr = virt_addr;
178 map_size = virt_size;
179 }
180
181 #[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
182 {
183 map_addr = virt_addr;
184 map_size = virt_size;
185 }
186 }
187
188 #[cfg(not(feature = "common-os"))]
189 {
190 #[cfg(not(feature = "mman"))]
192 let stack_reserve: usize = (avail_mem * 10) / 100;
193
194 #[cfg(not(feature = "mman"))]
199 let virt_size: usize = (avail_mem - stack_reserve).align_down(LargePageSize::SIZE as usize);
200 #[cfg(feature = "mman")]
201 let virt_size: usize = ((avail_mem * 75) / 100).align_down(LargePageSize::SIZE as usize);
202
203 let layout = PageLayout::from_size_align(virt_size, LargePageSize::SIZE as usize).unwrap();
204 let page_range = PageAlloc::allocate(layout).unwrap();
205 let virt_addr = VirtAddr::from(page_range.start());
206 heap_start_addr = virt_addr;
207
208 info!(
209 "Heap: size {} MB, start address {:p}",
210 virt_size >> 20,
211 virt_addr
212 );
213
214 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
215 if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
216 let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) / LargePageSize::SIZE;
218 if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages as usize) {
219 map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
220 map_size = virt_size - (map_addr - virt_addr) as usize;
221 } else {
222 map_addr = virt_addr.align_up(HugePageSize::SIZE);
223 map_size = virt_size - (map_addr - virt_addr) as usize;
224 }
225 } else {
226 map_addr = virt_addr;
227 map_size = virt_size;
228 }
229
230 #[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
231 {
232 map_addr = virt_addr;
233 map_size = virt_size;
234 }
235 }
236
237 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
238 if has_1gib_pages
239 && map_size > HugePageSize::SIZE as usize
240 && map_addr.is_aligned_to(HugePageSize::SIZE)
241 {
242 let size = map_size.align_down(HugePageSize::SIZE as usize);
243 if let Err(num_pages) =
244 paging::map_heap::<HugePageSize>(map_addr, size / HugePageSize::SIZE as usize)
245 {
246 map_size -= num_pages * HugePageSize::SIZE as usize;
247 map_addr += num_pages as u64 * HugePageSize::SIZE;
248 } else {
249 map_size -= size;
250 map_addr += size;
251 }
252 }
253
254 if has_2mib_pages
255 && map_size > LargePageSize::SIZE as usize
256 && map_addr.is_aligned_to(LargePageSize::SIZE)
257 {
258 let size = map_size.align_down(LargePageSize::SIZE as usize);
259 if let Err(num_pages) =
260 paging::map_heap::<LargePageSize>(map_addr, size / LargePageSize::SIZE as usize)
261 {
262 map_size -= num_pages * LargePageSize::SIZE as usize;
263 map_addr += num_pages as u64 * LargePageSize::SIZE;
264 } else {
265 map_size -= size;
266 map_addr += size;
267 }
268 }
269
270 if map_size > BasePageSize::SIZE as usize && map_addr.is_aligned_to(BasePageSize::SIZE) {
271 let size = map_size.align_down(BasePageSize::SIZE as usize);
272 if let Err(num_pages) =
273 paging::map_heap::<BasePageSize>(map_addr, size / BasePageSize::SIZE as usize)
274 {
275 map_size -= num_pages * BasePageSize::SIZE as usize;
276 map_addr += num_pages as u64 * BasePageSize::SIZE;
277 } else {
278 map_size -= size;
279 map_addr += size;
280 }
281 }
282
283 let heap_end_addr = map_addr;
284
285 let size = heap_end_addr.as_usize() - heap_start_addr.as_usize();
286 unsafe {
287 ALLOCATOR
288 .lock()
289 .claim(heap_start_addr.as_mut_ptr(), size)
290 .unwrap();
291 }
292
293 info!("Heap is located at {heap_start_addr:p}..{heap_end_addr:p} ({map_size} Bytes unmapped)");
294}
295
296pub(crate) fn print_information() {
297 info!("{FrameAlloc}");
298 info!("{PageAlloc}");
299}
300
301#[cfg(feature = "pci")]
303pub(crate) fn map(
304 physical_address: PhysAddr,
305 size: usize,
306 writable: bool,
307 no_execution: bool,
308 no_cache: bool,
309) -> VirtAddr {
310 use crate::arch::mm::paging::PageTableEntryFlags;
311 #[cfg(target_arch = "x86_64")]
312 use crate::arch::mm::paging::PageTableEntryFlagsExt;
313
314 let size = size.align_up(BasePageSize::SIZE as usize);
315 let count = size / BasePageSize::SIZE as usize;
316
317 let mut flags = PageTableEntryFlags::empty();
318 flags.normal();
319 if writable {
320 flags.writable();
321 }
322 if no_execution {
323 flags.execute_disable();
324 }
325 if no_cache {
326 flags.device();
327 }
328
329 let layout = PageLayout::from_size(size).unwrap();
330 let page_range = PageAlloc::allocate(layout).unwrap();
331 let virtual_address = VirtAddr::from(page_range.start());
332 arch::mm::paging::map::<BasePageSize>(virtual_address, physical_address, count, flags);
333
334 virtual_address
335}
336
337#[allow(dead_code)]
338pub(crate) fn unmap(virtual_address: VirtAddr, size: usize) {
340 let size = size.align_up(BasePageSize::SIZE as usize);
341
342 if arch::mm::paging::virtual_to_physical(virtual_address).is_some() {
343 arch::mm::paging::unmap::<BasePageSize>(
344 virtual_address,
345 size / BasePageSize::SIZE as usize,
346 );
347
348 let range = PageRange::from_start_len(virtual_address.as_usize(), size).unwrap();
349 unsafe {
350 PageAlloc::deallocate(range);
351 }
352 } else {
353 panic!("No page table entry for virtual address {virtual_address:p}");
354 }
355}