hermit/mm/
mod.rs

1pub(crate) mod allocator;
2#[cfg(any(feature = "tcp", feature = "udp", feature = "fuse", feature = "vsock"))]
3pub(crate) mod device_alloc;
4pub(crate) mod physicalmem;
5pub(crate) mod virtualmem;
6
7use core::mem;
8use core::ops::Range;
9
10use align_address::Align;
11use hermit_sync::Lazy;
12pub use memory_addresses::{PhysAddr, VirtAddr};
13
14use self::allocator::LockedAllocator;
15#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
16use crate::arch::mm::paging::HugePageSize;
17pub use crate::arch::mm::paging::virtual_to_physical;
18use crate::arch::mm::paging::{BasePageSize, LargePageSize, PageSize};
19use crate::{arch, env};
20
21#[cfg(target_os = "none")]
22#[global_allocator]
23pub(crate) static ALLOCATOR: LockedAllocator = LockedAllocator::new();
24
25/// Physical and virtual address range of the 2 MiB pages that map the kernel.
26static KERNEL_ADDR_RANGE: Lazy<Range<VirtAddr>> = Lazy::new(|| {
27	if cfg!(target_os = "none") {
28		// Calculate the start and end addresses of the 2 MiB page(s) that map the kernel.
29		env::get_base_address().align_down(LargePageSize::SIZE)
30			..(env::get_base_address() + env::get_image_size()).align_up(LargePageSize::SIZE)
31	} else {
32		VirtAddr::zero()..VirtAddr::zero()
33	}
34});
35
36pub(crate) fn kernel_start_address() -> VirtAddr {
37	KERNEL_ADDR_RANGE.start
38}
39
40pub(crate) fn kernel_end_address() -> VirtAddr {
41	KERNEL_ADDR_RANGE.end
42}
43
44#[cfg(target_os = "none")]
45pub(crate) fn init() {
46	use crate::arch::mm::paging;
47
48	Lazy::force(&KERNEL_ADDR_RANGE);
49
50	arch::mm::init();
51	arch::mm::init_page_tables();
52
53	let total_mem = physicalmem::total_memory_size();
54	let kernel_addr_range = KERNEL_ADDR_RANGE.clone();
55	info!("Total memory size: {} MiB", total_mem >> 20);
56	info!(
57		"Kernel region: {:p}..{:p}",
58		kernel_addr_range.start, kernel_addr_range.end
59	);
60
61	// we reserve physical memory for the required page tables
62	// In worst case, we use page size of BasePageSize::SIZE
63	let npages = total_mem / BasePageSize::SIZE as usize;
64	let npage_3tables = npages / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
65	let npage_2tables =
66		npage_3tables / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
67	let npage_1tables =
68		npage_2tables / (BasePageSize::SIZE as usize / mem::align_of::<usize>()) + 1;
69	let reserved_space = (npage_3tables + npage_2tables + npage_1tables)
70		* BasePageSize::SIZE as usize
71		+ 2 * LargePageSize::SIZE as usize;
72	#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
73	let has_1gib_pages = arch::processor::supports_1gib_pages();
74	let has_2mib_pages = arch::processor::supports_2mib_pages();
75
76	let min_mem = if env::is_uefi() {
77		// On UEFI, the given memory is guaranteed free memory and the kernel is located before the given memory
78		reserved_space
79	} else {
80		(kernel_addr_range.end.as_u64() - env::get_ram_address().as_u64() + reserved_space as u64)
81			as usize
82	};
83	info!("Minimum memory size: {} MiB", min_mem >> 20);
84	let avail_mem = total_mem
85		.checked_sub(min_mem)
86		.unwrap_or_else(|| panic!("Not enough memory available!"))
87		.align_down(LargePageSize::SIZE as usize);
88
89	let mut map_addr;
90	let mut map_size;
91	let heap_start_addr;
92
93	#[cfg(feature = "common-os")]
94	{
95		info!("Using HermitOS as common OS!");
96
97		// we reserve at least 75% of the memory for the user space
98		let reserve: usize = (avail_mem * 75) / 100;
99		// 64 MB is enough as kernel heap
100		let reserve = core::cmp::min(reserve, 0x0400_0000);
101
102		let virt_size: usize = reserve.align_down(LargePageSize::SIZE as usize);
103		let virt_addr =
104			self::virtualmem::allocate_aligned(virt_size, LargePageSize::SIZE as usize).unwrap();
105		heap_start_addr = virt_addr;
106
107		info!(
108			"Heap: size {} MB, start address {:p}",
109			virt_size >> 20,
110			virt_addr
111		);
112
113		#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
114		if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
115			// Mount large pages to the next huge page boundary
116			let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) as usize
117				/ LargePageSize::SIZE as usize;
118			if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages) {
119				map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
120				map_size = virt_size - (map_addr - virt_addr) as usize;
121			} else {
122				map_addr = virt_addr.align_up(HugePageSize::SIZE);
123				map_size = virt_size - (map_addr - virt_addr) as usize;
124			}
125		} else {
126			map_addr = virt_addr;
127			map_size = virt_size;
128		}
129
130		#[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
131		{
132			map_addr = virt_addr;
133			map_size = virt_size;
134		}
135	}
136
137	#[cfg(not(feature = "common-os"))]
138	{
139		// we reserve 10% of the memory for stack allocations
140		#[cfg(not(feature = "mmap"))]
141		let stack_reserve: usize = (avail_mem * 10) / 100;
142
143		// At first, we map only a small part into the heap.
144		// Afterwards, we already use the heap and map the rest into
145		// the virtual address space.
146
147		#[cfg(not(feature = "mmap"))]
148		let virt_size: usize = (avail_mem - stack_reserve).align_down(LargePageSize::SIZE as usize);
149		#[cfg(feature = "mmap")]
150		let virt_size: usize = ((avail_mem * 75) / 100).align_down(LargePageSize::SIZE as usize);
151
152		let virt_addr =
153			crate::mm::virtualmem::allocate_aligned(virt_size, LargePageSize::SIZE as usize)
154				.unwrap();
155		heap_start_addr = virt_addr;
156
157		info!(
158			"Heap: size {} MB, start address {:p}",
159			virt_size >> 20,
160			virt_addr
161		);
162
163		#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
164		if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
165			// Mount large pages to the next huge page boundary
166			let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) / LargePageSize::SIZE;
167			if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages as usize) {
168				map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
169				map_size = virt_size - (map_addr - virt_addr) as usize;
170			} else {
171				map_addr = virt_addr.align_up(HugePageSize::SIZE);
172				map_size = virt_size - (map_addr - virt_addr) as usize;
173			}
174		} else {
175			map_addr = virt_addr;
176			map_size = virt_size;
177		}
178
179		#[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
180		{
181			map_addr = virt_addr;
182			map_size = virt_size;
183		}
184	}
185
186	#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
187	if has_1gib_pages
188		&& map_size > HugePageSize::SIZE as usize
189		&& map_addr.is_aligned_to(HugePageSize::SIZE)
190	{
191		let size = map_size.align_down(HugePageSize::SIZE as usize);
192		if let Err(num_pages) =
193			paging::map_heap::<HugePageSize>(map_addr, size / HugePageSize::SIZE as usize)
194		{
195			map_size -= num_pages * HugePageSize::SIZE as usize;
196			map_addr += num_pages as u64 * HugePageSize::SIZE;
197		} else {
198			map_size -= size;
199			map_addr += size;
200		}
201	}
202
203	if has_2mib_pages
204		&& map_size > LargePageSize::SIZE as usize
205		&& map_addr.is_aligned_to(LargePageSize::SIZE)
206	{
207		let size = map_size.align_down(LargePageSize::SIZE as usize);
208		if let Err(num_pages) =
209			paging::map_heap::<LargePageSize>(map_addr, size / LargePageSize::SIZE as usize)
210		{
211			map_size -= num_pages * LargePageSize::SIZE as usize;
212			map_addr += num_pages as u64 * LargePageSize::SIZE;
213		} else {
214			map_size -= size;
215			map_addr += size;
216		}
217	}
218
219	if map_size > BasePageSize::SIZE as usize && map_addr.is_aligned_to(BasePageSize::SIZE) {
220		let size = map_size.align_down(BasePageSize::SIZE as usize);
221		if let Err(num_pages) =
222			paging::map_heap::<BasePageSize>(map_addr, size / BasePageSize::SIZE as usize)
223		{
224			map_size -= num_pages * BasePageSize::SIZE as usize;
225			map_addr += num_pages as u64 * BasePageSize::SIZE;
226		} else {
227			map_size -= size;
228			map_addr += size;
229		}
230	}
231
232	let heap_end_addr = map_addr;
233
234	unsafe {
235		ALLOCATOR.init(
236			heap_start_addr.as_mut_ptr(),
237			(heap_end_addr - heap_start_addr) as usize,
238		);
239	}
240
241	info!("Heap is located at {heap_start_addr:p}..{heap_end_addr:p} ({map_size} Bytes unmapped)");
242}
243
244pub(crate) fn print_information() {
245	self::physicalmem::print_information();
246	self::virtualmem::print_information();
247}
248
249/// Maps a given physical address and size in virtual space and returns address.
250#[cfg(feature = "pci")]
251pub(crate) fn map(
252	physical_address: PhysAddr,
253	size: usize,
254	writable: bool,
255	no_execution: bool,
256	no_cache: bool,
257) -> VirtAddr {
258	use crate::arch::mm::paging::PageTableEntryFlags;
259	#[cfg(target_arch = "x86_64")]
260	use crate::arch::mm::paging::PageTableEntryFlagsExt;
261
262	let size = size.align_up(BasePageSize::SIZE as usize);
263	let count = size / BasePageSize::SIZE as usize;
264
265	let mut flags = PageTableEntryFlags::empty();
266	flags.normal();
267	if writable {
268		flags.writable();
269	}
270	if no_execution {
271		flags.execute_disable();
272	}
273	if no_cache {
274		flags.device();
275	}
276
277	let virtual_address = self::virtualmem::allocate(size).unwrap();
278	arch::mm::paging::map::<BasePageSize>(virtual_address, physical_address, count, flags);
279
280	virtual_address
281}
282
283#[allow(dead_code)]
284/// unmaps virtual address, without 'freeing' physical memory it is mapped to!
285pub(crate) fn unmap(virtual_address: VirtAddr, size: usize) {
286	let size = size.align_up(BasePageSize::SIZE as usize);
287
288	if arch::mm::paging::virtual_to_physical(virtual_address).is_some() {
289		arch::mm::paging::unmap::<BasePageSize>(
290			virtual_address,
291			size / BasePageSize::SIZE as usize,
292		);
293		self::virtualmem::deallocate(virtual_address, size);
294	} else {
295		panic!(
296			"No page table entry for virtual address {:p}",
297			virtual_address
298		);
299	}
300}