Skip to main content

hermit/mm/
mod.rs

1//! Memory management.
2//!
3//! This is an overview of Hermit's memory layout:
4//!
5//! - `DeviceAlloc.device_offset` is 0 if `!cfg!(careful)`
6//! - User space virtual memory is only used if `!cfg!(feature = "common-os")`
7//! - On x86-64, PCI BARs, I/O APICs, and local APICs may be in `0xc0000000..0xffffffff`, which could be inside of `MEM`.
8//!
9//! ```text
10//!                               Virtual address
11//!                                    space
12//!
13//!                                 ...┌───┬──► 00000000
14//!           Physical address   ...   │   │
15//!                space      ...      │   │ Identity map
16//!                        ...         │   │
17//!    00000000 ◄──┬───┐...         ...├───┼──► mem_size
18//!                │   │   ...   ...   │   │
19//!     FrameAlloc │MEM│      ...      │   │ Unused
20//!                │   │   ...   ...   │   │
21//!    mem_size ◄──┼───┤...         ...├───┼──► DeviceAlloc.phys_offset
22//!                │   │   ...         │   │
23//!                │   │      ...      │   │ DeviceAlloc
24//!                │   │         ...   │   │
25//!          Empty │   │            ...├───┼──► DeviceAlloc.phys_offset + mem_size
26//!                │   │               │   │
27//!                │   │               │   │
28//!                │   │               │   │ Unused
29//!     Unknown ◄──┼───┤               │   │
30//!                │   │               │   │
31//!            PCI │   │               ├───┼──► kernel_virt_start
32//!                │   │               │   │
33//!     Unknown ◄──┼───┤               │   │ PageAlloc
34//!                │   │               │   │
35//!                │   │               ├───┼──► kernel_virt_end
36//!                │   │               │   │
37//!          Empty │   │               │   │
38//!                │   │               │   │ User space
39//!                │   │               │   │
40//!                │   │               │   │
41//! ```
42
43pub(crate) mod device_alloc;
44mod page_range_alloc;
45mod physicalmem;
46mod virtualmem;
47
48use core::mem;
49use core::ops::Range;
50
51use align_address::Align;
52use free_list::{PageLayout, PageRange};
53use hermit_sync::{Lazy, RawInterruptTicketMutex};
54pub use memory_addresses::{PhysAddr, VirtAddr};
55use talc::{ErrOnOom, Span, Talc, Talck};
56
57pub use self::page_range_alloc::{PageRangeAllocator, PageRangeBox};
58pub use self::physicalmem::{FrameAlloc, FrameBox};
59pub use self::virtualmem::{PageAlloc, PageBox};
60#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
61use crate::arch::mm::paging::HugePageSize;
62pub use crate::arch::mm::paging::virtual_to_physical;
63use crate::arch::mm::paging::{BasePageSize, LargePageSize, PageSize};
64use crate::{arch, env};
65
66#[cfg(target_os = "none")]
67#[global_allocator]
68pub(crate) static ALLOCATOR: Talck<RawInterruptTicketMutex, ErrOnOom> = Talc::new(ErrOnOom).lock();
69
70/// Physical and virtual address range of the 2 MiB pages that map the kernel.
71static KERNEL_ADDR_RANGE: Lazy<Range<VirtAddr>> = Lazy::new(|| {
72	if cfg!(target_os = "none") {
73		// Calculate the start and end addresses of the 2 MiB page(s) that map the kernel.
74		env::get_base_address().align_down(LargePageSize::SIZE)
75			..(env::get_base_address() + env::get_image_size()).align_up(LargePageSize::SIZE)
76	} else {
77		VirtAddr::zero()..VirtAddr::zero()
78	}
79});
80
81pub(crate) fn kernel_start_address() -> VirtAddr {
82	KERNEL_ADDR_RANGE.start
83}
84
85pub(crate) fn kernel_end_address() -> VirtAddr {
86	KERNEL_ADDR_RANGE.end
87}
88
89#[cfg(target_os = "none")]
90pub(crate) fn init() {
91	use crate::arch::mm::paging;
92
93	Lazy::force(&KERNEL_ADDR_RANGE);
94
95	unsafe {
96		arch::mm::init();
97	}
98
99	let total_mem = physicalmem::total_memory_size();
100	let kernel_addr_range = KERNEL_ADDR_RANGE.clone();
101	info!("Total memory size: {} MiB", total_mem >> 20);
102	info!(
103		"Kernel region: {:p}..{:p}",
104		kernel_addr_range.start, kernel_addr_range.end
105	);
106
107	// we reserve physical memory for the required page tables
108	// In worst case, we use page size of BasePageSize::SIZE
109	let npages = total_mem / BasePageSize::SIZE as usize;
110	let npage_div = BasePageSize::SIZE as usize / mem::align_of::<usize>();
111	let npage_3tables = npages / npage_div + 1;
112	let npage_2tables = npage_3tables / npage_div + 1;
113	let npage_1tables = npage_2tables / npage_div + 1;
114	let reserved_space = (npage_3tables + npage_2tables + npage_1tables)
115		* BasePageSize::SIZE as usize
116		+ 2 * LargePageSize::SIZE as usize;
117	#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
118	let has_1gib_pages = arch::processor::supports_1gib_pages();
119	let has_2mib_pages = arch::processor::supports_2mib_pages();
120
121	let min_mem = if env::is_uefi() {
122		// On UEFI, the given memory is guaranteed free memory and the kernel is located before the given memory
123		reserved_space
124	} else {
125		(kernel_addr_range.end.as_u64() - env::get_ram_address().as_u64() + reserved_space as u64)
126			as usize
127	};
128	info!("Minimum memory size: {} MiB", min_mem >> 20);
129	let avail_mem = total_mem
130		.checked_sub(min_mem)
131		.unwrap_or_else(|| panic!("Not enough memory available!"))
132		.align_down(LargePageSize::SIZE as usize);
133
134	let mut map_addr;
135	let mut map_size;
136	let heap_start_addr;
137
138	#[cfg(feature = "common-os")]
139	{
140		info!("Using HermitOS as common OS!");
141
142		// we reserve at least 75% of the memory for the user space
143		let reserve: usize = (avail_mem * 75) / 100;
144		// 64 MB is enough as kernel heap
145		let reserve = core::cmp::min(reserve, 0x0400_0000);
146
147		let virt_size: usize = reserve.align_down(LargePageSize::SIZE as usize);
148		let layout = PageLayout::from_size_align(virt_size, LargePageSize::SIZE as usize).unwrap();
149		let page_range = PageAlloc::allocate(layout).unwrap();
150		let virt_addr = VirtAddr::from(page_range.start());
151		heap_start_addr = virt_addr;
152
153		info!(
154			"Heap: size {} MB, start address {:p}",
155			virt_size >> 20,
156			virt_addr
157		);
158
159		#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
160		if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
161			// Mount large pages to the next huge page boundary
162			let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) as usize
163				/ LargePageSize::SIZE as usize;
164			if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages) {
165				map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
166				map_size = virt_size - (map_addr - virt_addr) as usize;
167			} else {
168				map_addr = virt_addr.align_up(HugePageSize::SIZE);
169				map_size = virt_size - (map_addr - virt_addr) as usize;
170			}
171		} else {
172			map_addr = virt_addr;
173			map_size = virt_size;
174		}
175
176		#[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
177		{
178			map_addr = virt_addr;
179			map_size = virt_size;
180		}
181	}
182
183	#[cfg(not(feature = "common-os"))]
184	{
185		// we reserve 10% of the memory for stack allocations
186		#[cfg(not(feature = "mman"))]
187		let stack_reserve: usize = (avail_mem * 10) / 100;
188
189		// At first, we map only a small part into the heap.
190		// Afterwards, we already use the heap and map the rest into
191		// the virtual address space.
192
193		#[cfg(not(feature = "mman"))]
194		let virt_size: usize = (avail_mem - stack_reserve).align_down(LargePageSize::SIZE as usize);
195		#[cfg(feature = "mman")]
196		let virt_size: usize = ((avail_mem * 75) / 100).align_down(LargePageSize::SIZE as usize);
197
198		let layout = PageLayout::from_size_align(virt_size, LargePageSize::SIZE as usize).unwrap();
199		let page_range = PageAlloc::allocate(layout).unwrap();
200		let virt_addr = VirtAddr::from(page_range.start());
201		heap_start_addr = virt_addr;
202
203		info!(
204			"Heap: size {} MB, start address {:p}",
205			virt_size >> 20,
206			virt_addr
207		);
208
209		#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
210		if has_1gib_pages && virt_size > HugePageSize::SIZE as usize {
211			// Mount large pages to the next huge page boundary
212			let npages = (virt_addr.align_up(HugePageSize::SIZE) - virt_addr) / LargePageSize::SIZE;
213			if let Err(n) = paging::map_heap::<LargePageSize>(virt_addr, npages as usize) {
214				map_addr = virt_addr + n as u64 * LargePageSize::SIZE;
215				map_size = virt_size - (map_addr - virt_addr) as usize;
216			} else {
217				map_addr = virt_addr.align_up(HugePageSize::SIZE);
218				map_size = virt_size - (map_addr - virt_addr) as usize;
219			}
220		} else {
221			map_addr = virt_addr;
222			map_size = virt_size;
223		}
224
225		#[cfg(not(any(target_arch = "x86_64", target_arch = "riscv64")))]
226		{
227			map_addr = virt_addr;
228			map_size = virt_size;
229		}
230	}
231
232	#[cfg(any(target_arch = "x86_64", target_arch = "riscv64"))]
233	if has_1gib_pages
234		&& map_size > HugePageSize::SIZE as usize
235		&& map_addr.is_aligned_to(HugePageSize::SIZE)
236	{
237		let size = map_size.align_down(HugePageSize::SIZE as usize);
238		if let Err(num_pages) =
239			paging::map_heap::<HugePageSize>(map_addr, size / HugePageSize::SIZE as usize)
240		{
241			map_size -= num_pages * HugePageSize::SIZE as usize;
242			map_addr += num_pages as u64 * HugePageSize::SIZE;
243		} else {
244			map_size -= size;
245			map_addr += size;
246		}
247	}
248
249	if has_2mib_pages
250		&& map_size > LargePageSize::SIZE as usize
251		&& map_addr.is_aligned_to(LargePageSize::SIZE)
252	{
253		let size = map_size.align_down(LargePageSize::SIZE as usize);
254		if let Err(num_pages) =
255			paging::map_heap::<LargePageSize>(map_addr, size / LargePageSize::SIZE as usize)
256		{
257			map_size -= num_pages * LargePageSize::SIZE as usize;
258			map_addr += num_pages as u64 * LargePageSize::SIZE;
259		} else {
260			map_size -= size;
261			map_addr += size;
262		}
263	}
264
265	if map_size > BasePageSize::SIZE as usize && map_addr.is_aligned_to(BasePageSize::SIZE) {
266		let size = map_size.align_down(BasePageSize::SIZE as usize);
267		if let Err(num_pages) =
268			paging::map_heap::<BasePageSize>(map_addr, size / BasePageSize::SIZE as usize)
269		{
270			map_size -= num_pages * BasePageSize::SIZE as usize;
271			map_addr += num_pages as u64 * BasePageSize::SIZE;
272		} else {
273			map_size -= size;
274			map_addr += size;
275		}
276	}
277
278	let heap_end_addr = map_addr;
279
280	let arena = Span::new(heap_start_addr.as_mut_ptr(), heap_end_addr.as_mut_ptr());
281	unsafe {
282		ALLOCATOR.lock().claim(arena).unwrap();
283	}
284
285	info!("Heap is located at {heap_start_addr:p}..{heap_end_addr:p} ({map_size} Bytes unmapped)");
286}
287
288pub(crate) fn print_information() {
289	info!("{FrameAlloc}");
290	info!("{PageAlloc}");
291}
292
293/// Maps a given physical address and size in virtual space and returns address.
294#[cfg(feature = "pci")]
295pub(crate) fn map(
296	physical_address: PhysAddr,
297	size: usize,
298	writable: bool,
299	no_execution: bool,
300	no_cache: bool,
301) -> VirtAddr {
302	use crate::arch::mm::paging::PageTableEntryFlags;
303	#[cfg(target_arch = "x86_64")]
304	use crate::arch::mm::paging::PageTableEntryFlagsExt;
305
306	let size = size.align_up(BasePageSize::SIZE as usize);
307	let count = size / BasePageSize::SIZE as usize;
308
309	let mut flags = PageTableEntryFlags::empty();
310	flags.normal();
311	if writable {
312		flags.writable();
313	}
314	if no_execution {
315		flags.execute_disable();
316	}
317	if no_cache {
318		flags.device();
319	}
320
321	let layout = PageLayout::from_size(size).unwrap();
322	let page_range = PageAlloc::allocate(layout).unwrap();
323	let virtual_address = VirtAddr::from(page_range.start());
324	arch::mm::paging::map::<BasePageSize>(virtual_address, physical_address, count, flags);
325
326	virtual_address
327}
328
329#[allow(dead_code)]
330/// unmaps virtual address, without 'freeing' physical memory it is mapped to!
331pub(crate) fn unmap(virtual_address: VirtAddr, size: usize) {
332	let size = size.align_up(BasePageSize::SIZE as usize);
333
334	if arch::mm::paging::virtual_to_physical(virtual_address).is_some() {
335		arch::mm::paging::unmap::<BasePageSize>(
336			virtual_address,
337			size / BasePageSize::SIZE as usize,
338		);
339
340		let range = PageRange::from_start_len(virtual_address.as_usize(), size).unwrap();
341		unsafe {
342			PageAlloc::deallocate(range);
343		}
344	} else {
345		panic!("No page table entry for virtual address {virtual_address:p}");
346	}
347}