hermit/mm/
mod.rs

1pub(crate) mod allocator;
2pub(crate) mod device_alloc;
3
4use core::mem;
5use core::ops::Range;
6
7use align_address::Align;
8use hermit_sync::Lazy;
9pub use memory_addresses::{PhysAddr, VirtAddr};
10
11use self::allocator::LockedAllocator;
12#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
13use crate::arch::mm::paging::HugePageSize;
14#[cfg(target_arch = "x86_64")]
15use crate::arch::mm::paging::PageTableEntryFlagsExt;
16pub use crate::arch::mm::paging::virtual_to_physical;
17use crate::arch::mm::paging::{BasePageSize, LargePageSize, PageSize, PageTableEntryFlags};
18use crate::arch::mm::physicalmem;
19use crate::{arch, env};
20
21#[cfg(target_os = "none")]
22#[global_allocator]
23pub(crate) static ALLOCATOR: LockedAllocator = LockedAllocator::new();
24
25/// Physical and virtual address range of the 2 MiB pages that map the kernel.
26static KERNEL_ADDR_RANGE: Lazy<Range<VirtAddr>> = Lazy::new(|| {
27	if cfg!(target_os = "none") {
28		// Calculate the start and end addresses of the 2 MiB page(s) that map the kernel.
29		env::get_base_address().align_down(LargePageSize::SIZE)
30			..(env::get_base_address() + env::get_image_size()).align_up(LargePageSize::SIZE)
31	} else {
32		VirtAddr::zero()..VirtAddr::zero()
33	}
34});
35
36pub(crate) fn kernel_start_address() -> VirtAddr {
37	KERNEL_ADDR_RANGE.start
38}
39
40pub(crate) fn kernel_end_address() -> VirtAddr {
41	KERNEL_ADDR_RANGE.end
42}
43
44#[cfg(target_os = "none")]
45pub(crate) fn init() {
46	use crate::arch::mm::paging;
47
48	Lazy::force(&KERNEL_ADDR_RANGE);
49
50	arch::mm::init();
51	arch::mm::init_page_tables();
52
53	let total_mem = physicalmem::total_memory_size();
54	let kernel_addr_range = KERNEL_ADDR_RANGE.clone();
55	info!("Total memory size: {} MiB", total_mem >> 20);
56	info!(
57		"Kernel region: {:p}..{:p}",
58		kernel_addr_range.start, kernel_addr_range.end
59	);
60
61	// we reserve physical memory for the required page tables
62	// In worst case, we use page size of BasePageSize::SIZE
63	let npages = total_mem / BasePageSize::SIZE as usize;
64	let npage_3tables = npages / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
65	let npage_2tables =
66		npage_3tables / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
67	let npage_1tables =
68		npage_2tables / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
69	let reserved_space = (npage_3tables + npage_2tables + npage_1tables)
70		* BasePageSize::SIZE as usize
71		+ 2 * LargePageSize::SIZE as usize;
72	#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
73	let has_1gib_pages = arch::processor::supports_1gib_pages();
74	let has_2mib_pages = arch::processor::supports_2mib_pages();
75
76	let min_mem = if env::is_uefi() {
77		// On UEFI, the given memory is guaranteed free memory and the kernel is located before the given memory
78		reserved_space
79	} else {
80		(kernel_addr_range.end.as_u64() - env::get_ram_address().as_u64() + reserved_space as u64)
81			as usize
82	};
83	info!("Minimum memory size: {}", min_mem >> 20);
84	let avail_mem = total_mem
85		.checked_sub(min_mem)
86		.unwrap_or_else(|| panic!("Not enough memory available!"))
87		.align_down(LargePageSize::SIZE as usize);
88
89	let mut map_addr;
90	let mut map_size;
91	let heap_start_addr;
92
93	#[cfg(feature = "common-os")]
94	{
95		info!("Using HermitOS as common OS!");
96
97		// we reserve at least 75% of the memory for the user space
98		let reserve: usize = (avail_mem * 75) / 100;
99		// 64 MB is enough as kernel heap
100		let reserve = core::cmp::min(reserve, 0x0400_0000);
101
102		let virt_size: usize = reserve.align_down(LargePageSize::SIZE as usize);
103		let virt_addr =
104			arch::mm::virtualmem::allocate_aligned(virt_size, LargePageSize::SIZE as usize)
105				.unwrap();
106		heap_start_addr = virt_addr;
107
108		info!(
109			"Heap: size {} MB, start address {:p}",
110			virt_size >> 20,
111			virt_addr
112		);
113
114		#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
115		if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
116			// Mount large pages to the next huge page boundary
117			let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) as usize
118				/ LargePageSize::SIZE as usize;
119			if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages) {
120				map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
121				map_size = virt_size - (map_addr - virt_addr) as usize;
122			} else {
123				map_addr = virt_addr.align_up(HugePageSize::SIZE);
124				map_size = virt_size - (map_addr - virt_addr) as usize;
125			}
126		} else {
127			map_addr = virt_addr;
128			map_size = virt_size;
129		}
130
131		#[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
132		{
133			map_addr = virt_addr;
134			map_size = virt_size;
135		}
136	}
137
138	#[cfg(not(feature = "common-os"))]
139	{
140		// we reserve 10% of the memory for stack allocations
141		let stack_reserve: usize = (avail_mem * 10) / 100;
142
143		// At first, we map only a small part into the heap.
144		// Afterwards, we already use the heap and map the rest into
145		// the virtual address space.
146
147		#[cfg(not(feature = "mmap"))]
148		let virt_size: usize = (avail_mem - stack_reserve).align_down(LargePageSize::SIZE as usize);
149		#[cfg(feature = "mmap")]
150		let virt_size: usize = ((avail_mem * 75) / 100).align_down(LargePageSize::SIZE as usize);
151
152		let virt_addr =
153			arch::mm::virtualmem::allocate_aligned(virt_size, LargePageSize::SIZE as usize)
154				.unwrap();
155		heap_start_addr = virt_addr;
156
157		info!(
158			"Heap: size {} MB, start address {:p}",
159			virt_size >> 20,
160			virt_addr
161		);
162
163		#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
164		if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
165			// Mount large pages to the next huge page boundary
166			let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) / LargePageSize::SIZE;
167			if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages as usize) {
168				map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
169				map_size = virt_size - (map_addr - virt_addr) as usize;
170			} else {
171				map_addr = virt_addr.align_up(HugePageSize::SIZE);
172				map_size = virt_size - (map_addr - virt_addr) as usize;
173			}
174		} else {
175			map_addr = virt_addr;
176			map_size = virt_size;
177		}
178
179		#[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
180		{
181			map_addr = virt_addr;
182			map_size = virt_size;
183		}
184	}
185
186	#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
187	if has_1gib_pages
188		&& map_size > HugePageSize::SIZE as usize
189		&& map_addr.is_aligned_to(HugePageSize::SIZE)
190	{
191		let size = map_size.align_down(HugePageSize::SIZE as usize);
192		if let Err(num_pages) =
193			paging::map_heap::<HugePageSize>(map_addr, size / HugePageSize::SIZE as usize)
194		{
195			map_size -= num_pages * HugePageSize::SIZE as usize;
196			map_addr += num_pages as u64 * HugePageSize::SIZE;
197		} else {
198			map_size -= size;
199			map_addr += size;
200		}
201	}
202
203	if has_2mib_pages
204		&& map_size > LargePageSize::SIZE as usize
205		&& map_addr.is_aligned_to(LargePageSize::SIZE)
206	{
207		let size = map_size.align_down(LargePageSize::SIZE as usize);
208		if let Err(num_pages) =
209			paging::map_heap::<LargePageSize>(map_addr, size / LargePageSize::SIZE as usize)
210		{
211			map_size -= num_pages * LargePageSize::SIZE as usize;
212			map_addr += num_pages as u64 * LargePageSize::SIZE;
213		} else {
214			map_size -= size;
215			map_addr += size;
216		}
217	}
218
219	if map_size > BasePageSize::SIZE as usize && map_addr.is_aligned_to(BasePageSize::SIZE) {
220		let size = map_size.align_down(BasePageSize::SIZE as usize);
221		if let Err(num_pages) =
222			paging::map_heap::<BasePageSize>(map_addr, size / BasePageSize::SIZE as usize)
223		{
224			map_size -= num_pages * BasePageSize::SIZE as usize;
225			map_addr += num_pages as u64 * BasePageSize::SIZE;
226		} else {
227			map_size -= size;
228			map_addr += size;
229		}
230	}
231
232	let heap_end_addr = map_addr;
233
234	unsafe {
235		ALLOCATOR.init(
236			heap_start_addr.as_mut_ptr(),
237			(heap_end_addr - heap_start_addr) as usize,
238		);
239	}
240
241	info!("Heap is located at {heap_start_addr:p}..{heap_end_addr:p} ({map_size} Bytes unmapped)");
242}
243
244pub(crate) fn print_information() {
245	arch::mm::physicalmem::print_information();
246	arch::mm::virtualmem::print_information();
247}
248
249/// Soft-deprecated in favor of `DeviceAlloc`
250pub(crate) fn allocate(size: usize, no_execution: bool) -> VirtAddr {
251	let size = size.align_up(BasePageSize::SIZE as usize);
252	let physical_address = arch::mm::physicalmem::allocate(size).unwrap();
253	let virtual_address = arch::mm::virtualmem::allocate(size).unwrap();
254
255	let count = size / BasePageSize::SIZE as usize;
256	let mut flags = PageTableEntryFlags::empty();
257	flags.normal().writable();
258	if no_execution {
259		flags.execute_disable();
260	}
261
262	arch::mm::paging::map::<BasePageSize>(virtual_address, physical_address, count, flags);
263
264	virtual_address
265}
266
267/// Soft-deprecated in favor of `DeviceAlloc`
268pub(crate) fn deallocate(virtual_address: VirtAddr, size: usize) {
269	let size = size.align_up(BasePageSize::SIZE as usize);
270
271	if let Some(phys_addr) = arch::mm::paging::virtual_to_physical(virtual_address) {
272		arch::mm::paging::unmap::<BasePageSize>(
273			virtual_address,
274			size / BasePageSize::SIZE as usize,
275		);
276		arch::mm::virtualmem::deallocate(virtual_address, size);
277		arch::mm::physicalmem::deallocate(phys_addr, size);
278	} else {
279		panic!(
280			"No page table entry for virtual address {:p}",
281			virtual_address
282		);
283	}
284}
285
286/// Maps a given physical address and size in virtual space and returns address.
287#[cfg(feature = "pci")]
288pub(crate) fn map(
289	physical_address: PhysAddr,
290	size: usize,
291	writable: bool,
292	no_execution: bool,
293	no_cache: bool,
294) -> VirtAddr {
295	let size = size.align_up(BasePageSize::SIZE as usize);
296	let count = size / BasePageSize::SIZE as usize;
297
298	let mut flags = PageTableEntryFlags::empty();
299	flags.normal();
300	if writable {
301		flags.writable();
302	}
303	if no_execution {
304		flags.execute_disable();
305	}
306	if no_cache {
307		flags.device();
308	}
309
310	let virtual_address = arch::mm::virtualmem::allocate(size).unwrap();
311	arch::mm::paging::map::<BasePageSize>(virtual_address, physical_address, count, flags);
312
313	virtual_address
314}
315
316#[allow(dead_code)]
317/// unmaps virtual address, without 'freeing' physical memory it is mapped to!
318pub(crate) fn unmap(virtual_address: VirtAddr, size: usize) {
319	let size = size.align_up(BasePageSize::SIZE as usize);
320
321	if arch::mm::paging::virtual_to_physical(virtual_address).is_some() {
322		arch::mm::paging::unmap::<BasePageSize>(
323			virtual_address,
324			size / BasePageSize::SIZE as usize,
325		);
326		arch::mm::virtualmem::deallocate(virtual_address, size);
327	} else {
328		panic!(
329			"No page table entry for virtual address {:p}",
330			virtual_address
331		);
332	}
333}