hermit/arch/x86_64/mm/
paging.rs

1use core::fmt::Debug;
2use core::ptr;
3
4use x86_64::registers::control::{Cr0, Cr0Flags, Cr2, Cr3};
5#[cfg(feature = "common-os")]
6use x86_64::registers::segmentation::SegmentSelector;
7pub use x86_64::structures::idt::InterruptStackFrame as ExceptionStackFrame;
8use x86_64::structures::idt::PageFaultErrorCode;
9pub use x86_64::structures::paging::PageTableFlags as PageTableEntryFlags;
10use x86_64::structures::paging::frame::PhysFrameRange;
11use x86_64::structures::paging::mapper::{MapToError, MappedFrame, TranslateResult, UnmapError};
12use x86_64::structures::paging::page::PageRange;
13use x86_64::structures::paging::{
14	Mapper, OffsetPageTable, Page, PageTable, PageTableIndex, PhysFrame, RecursivePageTable,
15	Size4KiB, Translate,
16};
17
18use crate::arch::x86_64::kernel::processor;
19use crate::arch::x86_64::mm::{PhysAddr, VirtAddr};
20use crate::mm::physicalmem;
21use crate::{env, scheduler};
22
23pub trait PageTableEntryFlagsExt {
24	fn device(&mut self) -> &mut Self;
25
26	fn normal(&mut self) -> &mut Self;
27
28	#[cfg(feature = "acpi")]
29	fn read_only(&mut self) -> &mut Self;
30
31	fn writable(&mut self) -> &mut Self;
32
33	fn execute_disable(&mut self) -> &mut Self;
34
35	#[cfg(feature = "common-os")]
36	fn execute_enable(&mut self) -> &mut Self;
37
38	#[cfg(feature = "common-os")]
39	fn user(&mut self) -> &mut Self;
40
41	#[expect(dead_code)]
42	#[cfg(feature = "common-os")]
43	fn kernel(&mut self) -> &mut Self;
44}
45
46impl PageTableEntryFlagsExt for PageTableEntryFlags {
47	fn device(&mut self) -> &mut Self {
48		self.insert(PageTableEntryFlags::NO_CACHE);
49		self
50	}
51
52	fn normal(&mut self) -> &mut Self {
53		self.remove(PageTableEntryFlags::NO_CACHE);
54		self
55	}
56
57	#[cfg(feature = "acpi")]
58	fn read_only(&mut self) -> &mut Self {
59		self.remove(PageTableEntryFlags::WRITABLE);
60		self
61	}
62
63	fn writable(&mut self) -> &mut Self {
64		self.insert(PageTableEntryFlags::WRITABLE);
65		self
66	}
67
68	fn execute_disable(&mut self) -> &mut Self {
69		self.insert(PageTableEntryFlags::NO_EXECUTE);
70		self
71	}
72
73	#[cfg(feature = "common-os")]
74	fn execute_enable(&mut self) -> &mut Self {
75		self.remove(PageTableEntryFlags::NO_EXECUTE);
76		self
77	}
78
79	#[cfg(feature = "common-os")]
80	fn user(&mut self) -> &mut Self {
81		self.insert(PageTableEntryFlags::USER_ACCESSIBLE);
82		self
83	}
84
85	#[cfg(feature = "common-os")]
86	fn kernel(&mut self) -> &mut Self {
87		self.remove(PageTableEntryFlags::USER_ACCESSIBLE);
88		self
89	}
90}
91
92pub use x86_64::structures::paging::{
93	PageSize, Size1GiB as HugePageSize, Size2MiB as LargePageSize, Size4KiB as BasePageSize,
94};
95
96/// Returns a mapping of the physical memory where physical address is equal to the virtual address (no offset)
97pub unsafe fn identity_mapped_page_table() -> OffsetPageTable<'static> {
98	let level_4_table_addr = Cr3::read().0.start_address().as_u64();
99	let level_4_table_ptr =
100		ptr::with_exposed_provenance_mut::<PageTable>(level_4_table_addr.try_into().unwrap());
101	unsafe {
102		let level_4_table = level_4_table_ptr.as_mut().unwrap();
103		OffsetPageTable::new(level_4_table, x86_64::addr::VirtAddr::new(0x0))
104	}
105}
106
107/// Translate a virtual memory address to a physical one.
108pub fn virtual_to_physical(virtual_address: VirtAddr) -> Option<PhysAddr> {
109	let addr = x86_64::VirtAddr::from(virtual_address);
110
111	let translate_result = unsafe { identity_mapped_page_table() }.translate(addr);
112
113	match translate_result {
114		TranslateResult::NotMapped | TranslateResult::InvalidFrameAddress(_) => {
115			trace!("Uable to determine the physical address of 0x{virtual_address:X}");
116			None
117		}
118		TranslateResult::Mapped { frame, offset, .. } => {
119			Some(PhysAddr::new((frame.start_address() + offset).as_u64()))
120		}
121	}
122}
123
124/// Maps a continuous range of pages.
125///
126/// # Arguments
127///
128/// * `physical_address` - First physical address to map these pages to
129/// * `flags` - Flags from PageTableEntryFlags to set for the page table entry (e.g. WRITABLE or NO_EXECUTE).
130///   The PRESENT flags is set automatically.
131pub fn map<S>(
132	virtual_address: VirtAddr,
133	physical_address: PhysAddr,
134	count: usize,
135	flags: PageTableEntryFlags,
136) where
137	S: PageSize + Debug,
138	for<'a> RecursivePageTable<'a>: Mapper<S>,
139	for<'a> OffsetPageTable<'a>: Mapper<S>,
140{
141	let pages = {
142		let start = Page::<S>::containing_address(virtual_address.into());
143		let end = start + count as u64;
144		Page::range(start, end)
145	};
146
147	let frames = {
148		let start = PhysFrame::<S>::containing_address(physical_address.into());
149		let end = start + count as u64;
150		PhysFrame::range(start, end)
151	};
152
153	let flags = flags | PageTableEntryFlags::PRESENT;
154
155	trace!("Mapping {pages:?} to {frames:?} with {flags:?}");
156
157	unsafe fn map_pages<M, S>(
158		mapper: &mut M,
159		pages: PageRange<S>,
160		frames: PhysFrameRange<S>,
161		flags: PageTableEntryFlags,
162	) -> bool
163	where
164		M: Mapper<S>,
165		S: PageSize + Debug,
166	{
167		let mut frame_allocator = physicalmem::PHYSICAL_FREE_LIST.lock();
168		let mut unmapped = false;
169		for (page, frame) in pages.zip(frames) {
170			// TODO: Require explicit unmaps
171			let unmap = mapper.unmap(page);
172			if let Ok((_frame, flush)) = unmap {
173				unmapped = true;
174				flush.flush();
175				debug!("Had to unmap page {page:?} before mapping.");
176			}
177			let map = unsafe { mapper.map_to(page, frame, flags, &mut *frame_allocator) };
178			map.unwrap().flush();
179		}
180		unmapped
181	}
182
183	let unmapped = unsafe { map_pages(&mut identity_mapped_page_table(), pages, frames, flags) };
184
185	if unmapped {
186		#[cfg(feature = "smp")]
187		crate::arch::x86_64::kernel::apic::ipi_tlb_flush();
188	}
189}
190
191/// Maps `count` pages at address `virt_addr`. If the allocation of a physical memory failed,
192/// the number of successful mapped pages are returned as error value.
193pub fn map_heap<S>(virt_addr: VirtAddr, count: usize) -> Result<(), usize>
194where
195	S: PageSize + Debug,
196	for<'a> RecursivePageTable<'a>: Mapper<S>,
197	for<'a> OffsetPageTable<'a>: Mapper<S>,
198{
199	let flags = {
200		let mut flags = PageTableEntryFlags::empty();
201		flags.normal().writable().execute_disable();
202		flags
203	};
204
205	let virt_addrs = (0..count).map(|n| virt_addr + n as u64 * S::SIZE);
206
207	for (map_counter, virt_addr) in virt_addrs.enumerate() {
208		let phys_addr = physicalmem::allocate_aligned(S::SIZE as usize, S::SIZE as usize)
209			.map_err(|_| map_counter)?;
210		map::<S>(virt_addr, phys_addr, 1, flags);
211	}
212
213	Ok(())
214}
215
216pub fn identity_map<S>(phys_addr: PhysAddr)
217where
218	S: PageSize + Debug,
219	for<'a> RecursivePageTable<'a>: Mapper<S>,
220	for<'a> OffsetPageTable<'a>: Mapper<S>,
221{
222	let frame = PhysFrame::<S>::from_start_address(phys_addr.into()).unwrap();
223	let flags = PageTableEntryFlags::PRESENT
224		| PageTableEntryFlags::WRITABLE
225		| PageTableEntryFlags::NO_EXECUTE;
226	let mut frame_allocator = physicalmem::PHYSICAL_FREE_LIST.lock();
227	let mapper_result =
228		unsafe { identity_mapped_page_table().identity_map(frame, flags, &mut *frame_allocator) };
229
230	match mapper_result {
231		Ok(mapper_flush) => mapper_flush.flush(),
232		Err(MapToError::PageAlreadyMapped(current_frame)) => assert_eq!(current_frame, frame),
233		Err(MapToError::ParentEntryHugePage) => {
234			let page_table = unsafe { identity_mapped_page_table() };
235			let virt_addr = VirtAddr::new(frame.start_address().as_u64()).into();
236			let phys_addr = frame.start_address();
237			assert_eq!(page_table.translate_addr(virt_addr), Some(phys_addr));
238		}
239		Err(err) => panic!("could not identity-map {frame:?}: {err:?}"),
240	}
241}
242
243pub fn unmap<S>(virtual_address: VirtAddr, count: usize)
244where
245	S: PageSize + Debug,
246	for<'a> RecursivePageTable<'a>: Mapper<S>,
247	for<'a> OffsetPageTable<'a>: Mapper<S>,
248{
249	trace!("Unmapping virtual address {virtual_address:p} ({count} pages)");
250
251	let first_page = Page::<S>::containing_address(virtual_address.into());
252	let last_page = first_page + count as u64;
253	let range = Page::range(first_page, last_page);
254
255	for page in range {
256		let unmap_result = unsafe { identity_mapped_page_table() }.unmap(page);
257		match unmap_result {
258			Ok((_frame, flush)) => flush.flush(),
259			// FIXME: Some sentinel pages around stacks are supposed to be unmapped.
260			// We should handle this case there instead of here.
261			Err(UnmapError::PageNotMapped) => {
262				debug!("Tried to unmap {page:?}, which was not mapped.");
263			}
264			Err(err) => panic!("{err:?}"),
265		}
266	}
267}
268
269#[cfg(not(feature = "common-os"))]
270pub(crate) extern "x86-interrupt" fn page_fault_handler(
271	stack_frame: ExceptionStackFrame,
272	error_code: PageFaultErrorCode,
273) {
274	error!("Page fault (#PF)!");
275	error!("page_fault_linear_address = {:p}", Cr2::read().unwrap());
276	error!("error_code = {error_code:?}");
277	error!("fs = {:#X}", processor::readfs());
278	error!("gs = {:#X}", processor::readgs());
279	error!("stack_frame = {stack_frame:#?}");
280	scheduler::abort();
281}
282
283#[cfg(feature = "common-os")]
284pub(crate) extern "x86-interrupt" fn page_fault_handler(
285	mut stack_frame: ExceptionStackFrame,
286	error_code: PageFaultErrorCode,
287) {
288	unsafe {
289		if stack_frame.as_mut().read().code_segment != SegmentSelector(0x08) {
290			core::arch::asm!("swapgs", options(nostack));
291		}
292	}
293	error!("Page fault (#PF)!");
294	error!("page_fault_linear_address = {:p}", Cr2::read().unwrap());
295	error!("error_code = {error_code:?}");
296	error!("fs = {:#X}", processor::readfs());
297	error!("gs = {:#X}", processor::readgs());
298	error!("stack_frame = {stack_frame:#?}");
299	scheduler::abort();
300}
301
302pub fn init() {
303	make_p4_writable();
304}
305
306fn make_p4_writable() {
307	debug!("Making P4 table writable");
308
309	if !env::is_uefi() {
310		return;
311	}
312
313	let mut pt = unsafe { identity_mapped_page_table() };
314
315	let p4_page = {
316		let (p4_frame, _) = Cr3::read_raw();
317		let p4_addr = x86_64::VirtAddr::new(p4_frame.start_address().as_u64());
318		Page::<Size4KiB>::from_start_address(p4_addr).unwrap()
319	};
320
321	let TranslateResult::Mapped { frame, flags, .. } = pt.translate(p4_page.start_address()) else {
322		unreachable!()
323	};
324
325	let make_writable = || unsafe {
326		let flags = flags | PageTableEntryFlags::WRITABLE;
327		match frame {
328			MappedFrame::Size1GiB(_) => pt.set_flags_p3_entry(p4_page, flags).unwrap().ignore(),
329			MappedFrame::Size2MiB(_) => pt.set_flags_p2_entry(p4_page, flags).unwrap().ignore(),
330			MappedFrame::Size4KiB(_) => pt.update_flags(p4_page, flags).unwrap().ignore(),
331		}
332	};
333
334	unsafe fn without_protect<F, R>(f: F) -> R
335	where
336		F: FnOnce() -> R,
337	{
338		let cr0 = Cr0::read();
339		if cr0.contains(Cr0Flags::WRITE_PROTECT) {
340			unsafe { Cr0::write(cr0 - Cr0Flags::WRITE_PROTECT) }
341		}
342		let ret = f();
343		if cr0.contains(Cr0Flags::WRITE_PROTECT) {
344			unsafe { Cr0::write(cr0) }
345		}
346		ret
347	}
348
349	unsafe { without_protect(make_writable) }
350}
351
352pub fn init_page_tables() {}
353
354#[allow(dead_code)]
355unsafe fn disect<PT: Translate>(pt: PT, virt_addr: x86_64::VirtAddr) {
356	use x86_64::structures::paging::mapper::{MappedFrame, TranslateResult};
357
358	match pt.translate(virt_addr) {
359		TranslateResult::Mapped {
360			frame,
361			offset,
362			flags,
363		} => {
364			let phys_addr = frame.start_address() + offset;
365			println!("virt_addr: {virt_addr:p}, phys_addr: {phys_addr:p}, flags: {flags:?}");
366			let indices = [
367				virt_addr.p4_index(),
368				virt_addr.p3_index(),
369				virt_addr.p2_index(),
370				virt_addr.p1_index(),
371			];
372			let valid_indices = match frame {
373				MappedFrame::Size4KiB(_) => &indices[..4],
374				MappedFrame::Size2MiB(_) => &indices[..3],
375				MappedFrame::Size1GiB(_) => &indices[..2],
376			};
377			for (i, page_table_index) in valid_indices.iter().copied().enumerate() {
378				print!("p{}: {}, ", 4 - i, u16::from(page_table_index));
379			}
380			println!();
381			unsafe {
382				print_page_table_entries(valid_indices);
383			}
384		}
385		TranslateResult::NotMapped => println!("virt_addr: {virt_addr:p} not mapped"),
386		TranslateResult::InvalidFrameAddress(phys_addr) => {
387			println!("virt_addr: {virt_addr:p}, phys_addr: {phys_addr:p} (invalid)");
388		}
389	}
390}
391
392#[allow(dead_code)]
393unsafe fn print_page_table_entries(page_table_indices: &[PageTableIndex]) {
394	assert!(page_table_indices.len() <= 4);
395
396	let identity_mapped_page_table = unsafe { identity_mapped_page_table() };
397	let mut pt = identity_mapped_page_table.level_4_table();
398
399	for (i, page_table_index) in page_table_indices.iter().copied().enumerate() {
400		let level = 4 - i;
401		let entry = &pt[page_table_index];
402
403		let indent = &"        "[0..2 * i];
404		let page_table_index = u16::from(page_table_index);
405		println!("{indent}L{level} Entry {page_table_index}: {entry:?}");
406
407		if entry.is_unused() {
408			break;
409		}
410
411		let phys = entry.addr();
412		let virt = x86_64::VirtAddr::new(phys.as_u64());
413		pt = unsafe { &*virt.as_mut_ptr() };
414	}
415}
416
417#[allow(dead_code)]
418pub(crate) unsafe fn print_page_tables(levels: usize) {
419	assert!((1..=4).contains(&levels));
420
421	fn print(table: &x86_64::structures::paging::PageTable, level: usize, min_level: usize) {
422		for (i, entry) in table
423			.iter()
424			.enumerate()
425			.filter(|(_i, entry)| !entry.is_unused())
426		{
427			if level < min_level {
428				break;
429			}
430			let indent = &"        "[0..2 * (4 - level)];
431			println!("{indent}L{level} Entry {i}: {entry:?}");
432
433			if level > min_level && !entry.flags().contains(PageTableEntryFlags::HUGE_PAGE) {
434				let phys = entry.frame().unwrap().start_address();
435				let virt = x86_64::VirtAddr::new(phys.as_u64());
436				let entry_table = unsafe { &*virt.as_mut_ptr() };
437
438				print(entry_table, level - 1, min_level);
439			}
440		}
441	}
442
443	let identity_mapped_page_table = unsafe { identity_mapped_page_table() };
444	let pt = identity_mapped_page_table.level_4_table();
445
446	print(pt, 4, 5 - levels);
447}