1pub(crate) mod device_alloc;
44pub(crate) mod physicalmem;
45pub(crate) mod virtualmem;
46
47use core::mem;
48use core::ops::Range;
49
50use align_address::Align;
51use free_list::{PageLayout, PageRange};
52use hermit_sync::{Lazy, RawInterruptTicketMutex};
53pub use memory_addresses::{PhysAddr, VirtAddr};
54use talc::{ErrOnOom, Span, Talc, Talck};
55
56#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
57use crate::arch::mm::paging::HugePageSize;
58pub use crate::arch::mm::paging::virtual_to_physical;
59use crate::arch::mm::paging::{BasePageSize, LargePageSize, PageSize};
60use crate::mm::physicalmem::PHYSICAL_FREE_LIST;
61use crate::mm::virtualmem::KERNEL_FREE_LIST;
62use crate::{arch, env};
63
64#[cfg(target_os = "none")]
65#[global_allocator]
66pub(crate) static ALLOCATOR: Talck<RawInterruptTicketMutex, ErrOnOom> = Talc::new(ErrOnOom).lock();
67
68static KERNEL_ADDR_RANGE: Lazy<Range<VirtAddr>> = Lazy::new(|| {
70 if cfg!(target_os = "none") {
71 env::get_base_address().align_down(LargePageSize::SIZE)
73 ..(env::get_base_address() + env::get_image_size()).align_up(LargePageSize::SIZE)
74 } else {
75 VirtAddr::zero()..VirtAddr::zero()
76 }
77});
78
79pub(crate) fn kernel_start_address() -> VirtAddr {
80 KERNEL_ADDR_RANGE.start
81}
82
83pub(crate) fn kernel_end_address() -> VirtAddr {
84 KERNEL_ADDR_RANGE.end
85}
86
87#[cfg(target_os = "none")]
88pub(crate) fn init() {
89 use crate::arch::mm::paging;
90
91 Lazy::force(&KERNEL_ADDR_RANGE);
92
93 arch::mm::init();
94 arch::mm::init_page_tables();
95
96 let total_mem = physicalmem::total_memory_size();
97 let kernel_addr_range = KERNEL_ADDR_RANGE.clone();
98 info!("Total memory size: {} MiB", total_mem >> 20);
99 info!(
100 "Kernel region: {:p}..{:p}",
101 kernel_addr_range.start, kernel_addr_range.end
102 );
103
104 let npages = total_mem / BasePageSize::SIZE as usize;
107 let npage_3tables = npages / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
108 let npage_2tables =
109 npage_3tables / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
110 let npage_1tables =
111 npage_2tables / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
112 let reserved_space = (npage_3tables + npage_2tables + npage_1tables)
113 * BasePageSize::SIZE as usize
114 + 2 * LargePageSize::SIZE as usize;
115 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
116 let has_1gib_pages = arch::processor::supports_1gib_pages();
117 let has_2mib_pages = arch::processor::supports_2mib_pages();
118
119 let min_mem = if env::is_uefi() {
120 reserved_space
122 } else {
123 (kernel_addr_range.end.as_u64() - env::get_ram_address().as_u64() + reserved_space as u64)
124 as usize
125 };
126 info!("Minimum memory size: {} MiB", min_mem >> 20);
127 let avail_mem = total_mem
128 .checked_sub(min_mem)
129 .unwrap_or_else(|| panic!("Not enough memory available!"))
130 .align_down(LargePageSize::SIZE as usize);
131
132 let mut map_addr;
133 let mut map_size;
134 let heap_start_addr;
135
136 #[cfg(feature = "common-os")]
137 {
138 info!("Using HermitOS as common OS!");
139
140 let reserve: usize = (avail_mem * 75) / 100;
142 let reserve = core::cmp::min(reserve, 0x0400_0000);
144
145 let virt_size: usize = reserve.align_down(LargePageSize::SIZE as usize);
146 let layout = PageLayout::from_size_align(virt_size, LargePageSize::SIZE as usize).unwrap();
147 let page_range = KERNEL_FREE_LIST.lock().allocate(layout).unwrap();
148 let virt_addr = VirtAddr::from(page_range.start());
149 heap_start_addr = virt_addr;
150
151 info!(
152 "Heap: size {} MB, start address {:p}",
153 virt_size >> 20,
154 virt_addr
155 );
156
157 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
158 if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
159 let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) as usize
161 / LargePageSize::SIZE as usize;
162 if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages) {
163 map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
164 map_size = virt_size - (map_addr - virt_addr) as usize;
165 } else {
166 map_addr = virt_addr.align_up(HugePageSize::SIZE);
167 map_size = virt_size - (map_addr - virt_addr) as usize;
168 }
169 } else {
170 map_addr = virt_addr;
171 map_size = virt_size;
172 }
173
174 #[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
175 {
176 map_addr = virt_addr;
177 map_size = virt_size;
178 }
179 }
180
181 #[cfg(not(feature = "common-os"))]
182 {
183 #[cfg(not(feature = "mman"))]
185 let stack_reserve: usize = (avail_mem * 10) / 100;
186
187 #[cfg(not(feature = "mman"))]
192 let virt_size: usize = (avail_mem - stack_reserve).align_down(LargePageSize::SIZE as usize);
193 #[cfg(feature = "mman")]
194 let virt_size: usize = ((avail_mem * 75) / 100).align_down(LargePageSize::SIZE as usize);
195
196 let layout = PageLayout::from_size_align(virt_size, LargePageSize::SIZE as usize).unwrap();
197 let page_range = KERNEL_FREE_LIST.lock().allocate(layout).unwrap();
198 let virt_addr = VirtAddr::from(page_range.start());
199 heap_start_addr = virt_addr;
200
201 info!(
202 "Heap: size {} MB, start address {:p}",
203 virt_size >> 20,
204 virt_addr
205 );
206
207 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
208 if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
209 let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) / LargePageSize::SIZE;
211 if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages as usize) {
212 map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
213 map_size = virt_size - (map_addr - virt_addr) as usize;
214 } else {
215 map_addr = virt_addr.align_up(HugePageSize::SIZE);
216 map_size = virt_size - (map_addr - virt_addr) as usize;
217 }
218 } else {
219 map_addr = virt_addr;
220 map_size = virt_size;
221 }
222
223 #[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
224 {
225 map_addr = virt_addr;
226 map_size = virt_size;
227 }
228 }
229
230 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
231 if has_1gib_pages
232 && map_size > HugePageSize::SIZE as usize
233 && map_addr.is_aligned_to(HugePageSize::SIZE)
234 {
235 let size = map_size.align_down(HugePageSize::SIZE as usize);
236 if let Err(num_pages) =
237 paging::map_heap::<HugePageSize>(map_addr, size / HugePageSize::SIZE as usize)
238 {
239 map_size -= num_pages * HugePageSize::SIZE as usize;
240 map_addr += num_pages as u64 * HugePageSize::SIZE;
241 } else {
242 map_size -= size;
243 map_addr += size;
244 }
245 }
246
247 if has_2mib_pages
248 && map_size > LargePageSize::SIZE as usize
249 && map_addr.is_aligned_to(LargePageSize::SIZE)
250 {
251 let size = map_size.align_down(LargePageSize::SIZE as usize);
252 if let Err(num_pages) =
253 paging::map_heap::<LargePageSize>(map_addr, size / LargePageSize::SIZE as usize)
254 {
255 map_size -= num_pages * LargePageSize::SIZE as usize;
256 map_addr += num_pages as u64 * LargePageSize::SIZE;
257 } else {
258 map_size -= size;
259 map_addr += size;
260 }
261 }
262
263 if map_size > BasePageSize::SIZE as usize && map_addr.is_aligned_to(BasePageSize::SIZE) {
264 let size = map_size.align_down(BasePageSize::SIZE as usize);
265 if let Err(num_pages) =
266 paging::map_heap::<BasePageSize>(map_addr, size / BasePageSize::SIZE as usize)
267 {
268 map_size -= num_pages * BasePageSize::SIZE as usize;
269 map_addr += num_pages as u64 * BasePageSize::SIZE;
270 } else {
271 map_size -= size;
272 map_addr += size;
273 }
274 }
275
276 let heap_end_addr = map_addr;
277
278 let arena = Span::new(heap_start_addr.as_mut_ptr(), heap_end_addr.as_mut_ptr());
279 unsafe {
280 ALLOCATOR.lock().claim(arena).unwrap();
281 }
282
283 info!("Heap is located at {heap_start_addr:p}..{heap_end_addr:p} ({map_size} Bytes unmapped)");
284}
285
286pub(crate) fn print_information() {
287 info!("Physical memory free list:\n{}", PHYSICAL_FREE_LIST.lock());
288 info!("Virtual memory free list:\n{}", KERNEL_FREE_LIST.lock());
289}
290
291#[cfg(feature = "pci")]
293pub(crate) fn map(
294 physical_address: PhysAddr,
295 size: usize,
296 writable: bool,
297 no_execution: bool,
298 no_cache: bool,
299) -> VirtAddr {
300 use crate::arch::mm::paging::PageTableEntryFlags;
301 #[cfg(target_arch = "x86_64")]
302 use crate::arch::mm::paging::PageTableEntryFlagsExt;
303
304 let size = size.align_up(BasePageSize::SIZE as usize);
305 let count = size / BasePageSize::SIZE as usize;
306
307 let mut flags = PageTableEntryFlags::empty();
308 flags.normal();
309 if writable {
310 flags.writable();
311 }
312 if no_execution {
313 flags.execute_disable();
314 }
315 if no_cache {
316 flags.device();
317 }
318
319 let layout = PageLayout::from_size(size).unwrap();
320 let page_range = KERNEL_FREE_LIST.lock().allocate(layout).unwrap();
321 let virtual_address = VirtAddr::from(page_range.start());
322 arch::mm::paging::map::<BasePageSize>(virtual_address, physical_address, count, flags);
323
324 virtual_address
325}
326
327#[allow(dead_code)]
328pub(crate) fn unmap(virtual_address: VirtAddr, size: usize) {
330 let size = size.align_up(BasePageSize::SIZE as usize);
331
332 if arch::mm::paging::virtual_to_physical(virtual_address).is_some() {
333 arch::mm::paging::unmap::<BasePageSize>(
334 virtual_address,
335 size / BasePageSize::SIZE as usize,
336 );
337
338 let range = PageRange::from_start_len(virtual_address.as_usize(), size).unwrap();
339 unsafe {
340 KERNEL_FREE_LIST.lock().deallocate(range).unwrap();
341 }
342 } else {
343 panic!(
344 "No page table entry for virtual address {:p}",
345 virtual_address
346 );
347 }
348}