1pub(crate) mod allocator;
2pub(crate) mod device_alloc;
3pub(crate) mod physicalmem;
4pub(crate) mod virtualmem;
5
6use core::mem;
7use core::ops::Range;
8
9use align_address::Align;
10use hermit_sync::Lazy;
11pub use memory_addresses::{PhysAddr, VirtAddr};
12
13use self::allocator::LockedAllocator;
14#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
15use crate::arch::mm::paging::HugePageSize;
16pub use crate::arch::mm::paging::virtual_to_physical;
17use crate::arch::mm::paging::{BasePageSize, LargePageSize, PageSize};
18use crate::{arch, env};
19
20#[cfg(target_os = "none")]
21#[global_allocator]
22pub(crate) static ALLOCATOR: LockedAllocator = LockedAllocator::new();
23
24static KERNEL_ADDR_RANGE: Lazy<Range<VirtAddr>> = Lazy::new(|| {
26 if cfg!(target_os = "none") {
27 env::get_base_address().align_down(LargePageSize::SIZE)
29 ..(env::get_base_address() + env::get_image_size()).align_up(LargePageSize::SIZE)
30 } else {
31 VirtAddr::zero()..VirtAddr::zero()
32 }
33});
34
35pub(crate) fn kernel_start_address() -> VirtAddr {
36 KERNEL_ADDR_RANGE.start
37}
38
39pub(crate) fn kernel_end_address() -> VirtAddr {
40 KERNEL_ADDR_RANGE.end
41}
42
43#[cfg(target_os = "none")]
44pub(crate) fn init() {
45 use crate::arch::mm::paging;
46
47 Lazy::force(&KERNEL_ADDR_RANGE);
48
49 arch::mm::init();
50 arch::mm::init_page_tables();
51
52 let total_mem = physicalmem::total_memory_size();
53 let kernel_addr_range = KERNEL_ADDR_RANGE.clone();
54 info!("Total memory size: {} MiB", total_mem >> 20);
55 info!(
56 "Kernel region: {:p}..{:p}",
57 kernel_addr_range.start, kernel_addr_range.end
58 );
59
60 let npages = total_mem / BasePageSize::SIZE as usize;
63 let npage_3tables = npages / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
64 let npage_2tables =
65 npage_3tables / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
66 let npage_1tables =
67 npage_2tables / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
68 let reserved_space = (npage_3tables + npage_2tables + npage_1tables)
69 * BasePageSize::SIZE as usize
70 + 2 * LargePageSize::SIZE as usize;
71 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
72 let has_1gib_pages = arch::processor::supports_1gib_pages();
73 let has_2mib_pages = arch::processor::supports_2mib_pages();
74
75 let min_mem = if env::is_uefi() {
76 reserved_space
78 } else {
79 (kernel_addr_range.end.as_u64() - env::get_ram_address().as_u64() + reserved_space as u64)
80 as usize
81 };
82 info!("Minimum memory size: {} MiB", min_mem >> 20);
83 let avail_mem = total_mem
84 .checked_sub(min_mem)
85 .unwrap_or_else(|| panic!("Not enough memory available!"))
86 .align_down(LargePageSize::SIZE as usize);
87
88 let mut map_addr;
89 let mut map_size;
90 let heap_start_addr;
91
92 #[cfg(feature = "common-os")]
93 {
94 info!("Using HermitOS as common OS!");
95
96 let reserve: usize = (avail_mem * 75) / 100;
98 let reserve = core::cmp::min(reserve, 0x0400_0000);
100
101 let virt_size: usize = reserve.align_down(LargePageSize::SIZE as usize);
102 let virt_addr =
103 self::virtualmem::allocate_aligned(virt_size, LargePageSize::SIZE as usize).unwrap();
104 heap_start_addr = virt_addr;
105
106 info!(
107 "Heap: size {} MB, start address {:p}",
108 virt_size >> 20,
109 virt_addr
110 );
111
112 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
113 if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
114 let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) as usize
116 / LargePageSize::SIZE as usize;
117 if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages) {
118 map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
119 map_size = virt_size - (map_addr - virt_addr) as usize;
120 } else {
121 map_addr = virt_addr.align_up(HugePageSize::SIZE);
122 map_size = virt_size - (map_addr - virt_addr) as usize;
123 }
124 } else {
125 map_addr = virt_addr;
126 map_size = virt_size;
127 }
128
129 #[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
130 {
131 map_addr = virt_addr;
132 map_size = virt_size;
133 }
134 }
135
136 #[cfg(not(feature = "common-os"))]
137 {
138 let stack_reserve: usize = (avail_mem * 10) / 100;
140
141 #[cfg(not(feature = "mmap"))]
146 let virt_size: usize = (avail_mem - stack_reserve).align_down(LargePageSize::SIZE as usize);
147 #[cfg(feature = "mmap")]
148 let virt_size: usize = ((avail_mem * 75) / 100).align_down(LargePageSize::SIZE as usize);
149
150 let virt_addr =
151 crate::mm::virtualmem::allocate_aligned(virt_size, LargePageSize::SIZE as usize)
152 .unwrap();
153 heap_start_addr = virt_addr;
154
155 info!(
156 "Heap: size {} MB, start address {:p}",
157 virt_size >> 20,
158 virt_addr
159 );
160
161 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
162 if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
163 let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) / LargePageSize::SIZE;
165 if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages as usize) {
166 map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
167 map_size = virt_size - (map_addr - virt_addr) as usize;
168 } else {
169 map_addr = virt_addr.align_up(HugePageSize::SIZE);
170 map_size = virt_size - (map_addr - virt_addr) as usize;
171 }
172 } else {
173 map_addr = virt_addr;
174 map_size = virt_size;
175 }
176
177 #[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
178 {
179 map_addr = virt_addr;
180 map_size = virt_size;
181 }
182 }
183
184 #[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
185 if has_1gib_pages
186 && map_size > HugePageSize::SIZE as usize
187 && map_addr.is_aligned_to(HugePageSize::SIZE)
188 {
189 let size = map_size.align_down(HugePageSize::SIZE as usize);
190 if let Err(num_pages) =
191 paging::map_heap::<HugePageSize>(map_addr, size / HugePageSize::SIZE as usize)
192 {
193 map_size -= num_pages * HugePageSize::SIZE as usize;
194 map_addr += num_pages as u64 * HugePageSize::SIZE;
195 } else {
196 map_size -= size;
197 map_addr += size;
198 }
199 }
200
201 if has_2mib_pages
202 && map_size > LargePageSize::SIZE as usize
203 && map_addr.is_aligned_to(LargePageSize::SIZE)
204 {
205 let size = map_size.align_down(LargePageSize::SIZE as usize);
206 if let Err(num_pages) =
207 paging::map_heap::<LargePageSize>(map_addr, size / LargePageSize::SIZE as usize)
208 {
209 map_size -= num_pages * LargePageSize::SIZE as usize;
210 map_addr += num_pages as u64 * LargePageSize::SIZE;
211 } else {
212 map_size -= size;
213 map_addr += size;
214 }
215 }
216
217 if map_size > BasePageSize::SIZE as usize && map_addr.is_aligned_to(BasePageSize::SIZE) {
218 let size = map_size.align_down(BasePageSize::SIZE as usize);
219 if let Err(num_pages) =
220 paging::map_heap::<BasePageSize>(map_addr, size / BasePageSize::SIZE as usize)
221 {
222 map_size -= num_pages * BasePageSize::SIZE as usize;
223 map_addr += num_pages as u64 * BasePageSize::SIZE;
224 } else {
225 map_size -= size;
226 map_addr += size;
227 }
228 }
229
230 let heap_end_addr = map_addr;
231
232 unsafe {
233 ALLOCATOR.init(
234 heap_start_addr.as_mut_ptr(),
235 (heap_end_addr - heap_start_addr) as usize,
236 );
237 }
238
239 info!("Heap is located at {heap_start_addr:p}..{heap_end_addr:p} ({map_size} Bytes unmapped)");
240}
241
242pub(crate) fn print_information() {
243 self::physicalmem::print_information();
244 self::virtualmem::print_information();
245}
246
247#[cfg(feature = "pci")]
249pub(crate) fn map(
250 physical_address: PhysAddr,
251 size: usize,
252 writable: bool,
253 no_execution: bool,
254 no_cache: bool,
255) -> VirtAddr {
256 use crate::arch::mm::paging::PageTableEntryFlags;
257 #[cfg(target_arch = "x86_64")]
258 use crate::arch::mm::paging::PageTableEntryFlagsExt;
259
260 let size = size.align_up(BasePageSize::SIZE as usize);
261 let count = size / BasePageSize::SIZE as usize;
262
263 let mut flags = PageTableEntryFlags::empty();
264 flags.normal();
265 if writable {
266 flags.writable();
267 }
268 if no_execution {
269 flags.execute_disable();
270 }
271 if no_cache {
272 flags.device();
273 }
274
275 let virtual_address = self::virtualmem::allocate(size).unwrap();
276 arch::mm::paging::map::<BasePageSize>(virtual_address, physical_address, count, flags);
277
278 virtual_address
279}
280
281#[allow(dead_code)]
282pub(crate) fn unmap(virtual_address: VirtAddr, size: usize) {
284 let size = size.align_up(BasePageSize::SIZE as usize);
285
286 if arch::mm::paging::virtual_to_physical(virtual_address).is_some() {
287 arch::mm::paging::unmap::<BasePageSize>(
288 virtual_address,
289 size / BasePageSize::SIZE as usize,
290 );
291 self::virtualmem::deallocate(virtual_address, size);
292 } else {
293 panic!(
294 "No page table entry for virtual address {:p}",
295 virtual_address
296 );
297 }
298}