hermit/arch/x86_64/mm/
paging.rs

1use core::fmt::Debug;
2use core::ptr;
3
4use x86_64::registers::control::{Cr0, Cr0Flags, Cr2, Cr3};
5#[cfg(feature = "common-os")]
6use x86_64::registers::segmentation::SegmentSelector;
7pub use x86_64::structures::idt::InterruptStackFrame as ExceptionStackFrame;
8use x86_64::structures::idt::PageFaultErrorCode;
9pub use x86_64::structures::paging::PageTableFlags as PageTableEntryFlags;
10use x86_64::structures::paging::frame::PhysFrameRange;
11use x86_64::structures::paging::mapper::{MappedFrame, TranslateResult, UnmapError};
12use x86_64::structures::paging::page::PageRange;
13use x86_64::structures::paging::{
14	Mapper, OffsetPageTable, Page, PageTable, PageTableIndex, PhysFrame, RecursivePageTable,
15	Size4KiB, Translate,
16};
17
18use crate::arch::x86_64::kernel::processor;
19use crate::arch::x86_64::mm::{PhysAddr, VirtAddr, physicalmem};
20use crate::{env, scheduler};
21
22pub trait PageTableEntryFlagsExt {
23	fn device(&mut self) -> &mut Self;
24
25	fn normal(&mut self) -> &mut Self;
26
27	#[cfg(feature = "acpi")]
28	fn read_only(&mut self) -> &mut Self;
29
30	fn writable(&mut self) -> &mut Self;
31
32	fn execute_disable(&mut self) -> &mut Self;
33
34	#[cfg(feature = "common-os")]
35	fn execute_enable(&mut self) -> &mut Self;
36
37	#[cfg(feature = "common-os")]
38	fn user(&mut self) -> &mut Self;
39
40	#[expect(dead_code)]
41	#[cfg(feature = "common-os")]
42	fn kernel(&mut self) -> &mut Self;
43}
44
45impl PageTableEntryFlagsExt for PageTableEntryFlags {
46	fn device(&mut self) -> &mut Self {
47		self.insert(PageTableEntryFlags::NO_CACHE);
48		self
49	}
50
51	fn normal(&mut self) -> &mut Self {
52		self.remove(PageTableEntryFlags::NO_CACHE);
53		self
54	}
55
56	#[cfg(feature = "acpi")]
57	fn read_only(&mut self) -> &mut Self {
58		self.remove(PageTableEntryFlags::WRITABLE);
59		self
60	}
61
62	fn writable(&mut self) -> &mut Self {
63		self.insert(PageTableEntryFlags::WRITABLE);
64		self
65	}
66
67	fn execute_disable(&mut self) -> &mut Self {
68		self.insert(PageTableEntryFlags::NO_EXECUTE);
69		self
70	}
71
72	#[cfg(feature = "common-os")]
73	fn execute_enable(&mut self) -> &mut Self {
74		self.remove(PageTableEntryFlags::NO_EXECUTE);
75		self
76	}
77
78	#[cfg(feature = "common-os")]
79	fn user(&mut self) -> &mut Self {
80		self.insert(PageTableEntryFlags::USER_ACCESSIBLE);
81		self
82	}
83
84	#[cfg(feature = "common-os")]
85	fn kernel(&mut self) -> &mut Self {
86		self.remove(PageTableEntryFlags::USER_ACCESSIBLE);
87		self
88	}
89}
90
91pub use x86_64::structures::paging::{
92	PageSize, Size1GiB as HugePageSize, Size2MiB as LargePageSize, Size4KiB as BasePageSize,
93};
94
95/// Returns a mapping of the physical memory where physical address is equal to the virtual address (no offset)
96pub unsafe fn identity_mapped_page_table() -> OffsetPageTable<'static> {
97	let level_4_table_addr = Cr3::read().0.start_address().as_u64();
98	let level_4_table_ptr =
99		ptr::with_exposed_provenance_mut::<PageTable>(level_4_table_addr.try_into().unwrap());
100	unsafe {
101		let level_4_table = level_4_table_ptr.as_mut().unwrap();
102		OffsetPageTable::new(level_4_table, x86_64::addr::VirtAddr::new(0x0))
103	}
104}
105
106/// Translate a virtual memory address to a physical one.
107pub fn virtual_to_physical(virtual_address: VirtAddr) -> Option<PhysAddr> {
108	let addr = x86_64::VirtAddr::from(virtual_address);
109
110	let translate_result = unsafe { identity_mapped_page_table() }.translate(addr);
111
112	match translate_result {
113		TranslateResult::NotMapped | TranslateResult::InvalidFrameAddress(_) => {
114			trace!("Uable to determine the physical address of 0x{virtual_address:X}");
115			None
116		}
117		TranslateResult::Mapped { frame, offset, .. } => {
118			Some(PhysAddr::new((frame.start_address() + offset).as_u64()))
119		}
120	}
121}
122
123#[cfg(any(feature = "fuse", feature = "vsock", feature = "tcp", feature = "udp"))]
124pub fn virt_to_phys(virtual_address: VirtAddr) -> PhysAddr {
125	virtual_to_physical(virtual_address).unwrap()
126}
127
128/// Maps a continuous range of pages.
129///
130/// # Arguments
131///
132/// * `physical_address` - First physical address to map these pages to
133/// * `flags` - Flags from PageTableEntryFlags to set for the page table entry (e.g. WRITABLE or NO_EXECUTE).
134///   The PRESENT flags is set automatically.
135pub fn map<S>(
136	virtual_address: VirtAddr,
137	physical_address: PhysAddr,
138	count: usize,
139	flags: PageTableEntryFlags,
140) where
141	S: PageSize + Debug,
142	for<'a> RecursivePageTable<'a>: Mapper<S>,
143	for<'a> OffsetPageTable<'a>: Mapper<S>,
144{
145	let pages = {
146		let start = Page::<S>::containing_address(virtual_address.into());
147		let end = start + count as u64;
148		Page::range(start, end)
149	};
150
151	let frames = {
152		let start = PhysFrame::<S>::containing_address(physical_address.into());
153		let end = start + count as u64;
154		PhysFrame::range(start, end)
155	};
156
157	let flags = flags | PageTableEntryFlags::PRESENT;
158
159	trace!("Mapping {pages:?} to {frames:?} with {flags:?}");
160
161	unsafe fn map_pages<M, S>(
162		mapper: &mut M,
163		pages: PageRange<S>,
164		frames: PhysFrameRange<S>,
165		flags: PageTableEntryFlags,
166	) -> bool
167	where
168		M: Mapper<S>,
169		S: PageSize + Debug,
170	{
171		let mut frame_allocator = physicalmem::PHYSICAL_FREE_LIST.lock();
172		let mut unmapped = false;
173		for (page, frame) in pages.zip(frames) {
174			// TODO: Require explicit unmaps
175			let unmap = mapper.unmap(page);
176			if let Ok((_frame, flush)) = unmap {
177				unmapped = true;
178				flush.flush();
179				debug!("Had to unmap page {page:?} before mapping.");
180			}
181			let map = unsafe { mapper.map_to(page, frame, flags, &mut *frame_allocator) };
182			map.unwrap().flush();
183		}
184		unmapped
185	}
186
187	let unmapped = unsafe { map_pages(&mut identity_mapped_page_table(), pages, frames, flags) };
188
189	if unmapped {
190		#[cfg(feature = "smp")]
191		crate::arch::x86_64::kernel::apic::ipi_tlb_flush();
192	}
193}
194
195/// Maps `count` pages at address `virt_addr`. If the allocation of a physical memory failed,
196/// the number of successful mapped pages are returned as error value.
197pub fn map_heap<S>(virt_addr: VirtAddr, count: usize) -> Result<(), usize>
198where
199	S: PageSize + Debug,
200	for<'a> RecursivePageTable<'a>: Mapper<S>,
201	for<'a> OffsetPageTable<'a>: Mapper<S>,
202{
203	let flags = {
204		let mut flags = PageTableEntryFlags::empty();
205		flags.normal().writable().execute_disable();
206		flags
207	};
208
209	let virt_addrs = (0..count).map(|n| virt_addr + n as u64 * S::SIZE);
210
211	for (map_counter, virt_addr) in virt_addrs.enumerate() {
212		let phys_addr = physicalmem::allocate_aligned(S::SIZE as usize, S::SIZE as usize)
213			.map_err(|_| map_counter)?;
214		map::<S>(virt_addr, phys_addr, 1, flags);
215	}
216
217	Ok(())
218}
219
220#[cfg(feature = "acpi")]
221pub fn identity_map<S>(frame: PhysFrame<S>)
222where
223	S: PageSize + Debug,
224	for<'a> RecursivePageTable<'a>: Mapper<S>,
225	for<'a> OffsetPageTable<'a>: Mapper<S>,
226{
227	assert!(
228		frame.start_address().as_u64() < crate::mm::kernel_start_address().as_u64(),
229		"Address {:p} to be identity-mapped is not below Kernel start address",
230		frame.start_address()
231	);
232
233	let flags = PageTableEntryFlags::PRESENT | PageTableEntryFlags::NO_EXECUTE;
234	let mut frame_allocator = physicalmem::PHYSICAL_FREE_LIST.lock();
235	let mapper_result =
236		unsafe { identity_mapped_page_table().identity_map(frame, flags, &mut *frame_allocator) };
237	mapper_result.unwrap().flush();
238}
239
240pub fn unmap<S>(virtual_address: VirtAddr, count: usize)
241where
242	S: PageSize + Debug,
243	for<'a> RecursivePageTable<'a>: Mapper<S>,
244	for<'a> OffsetPageTable<'a>: Mapper<S>,
245{
246	trace!("Unmapping virtual address {virtual_address:p} ({count} pages)");
247
248	let first_page = Page::<S>::containing_address(virtual_address.into());
249	let last_page = first_page + count as u64;
250	let range = Page::range(first_page, last_page);
251
252	for page in range {
253		let unmap_result = unsafe { identity_mapped_page_table() }.unmap(page);
254		match unmap_result {
255			Ok((_frame, flush)) => flush.flush(),
256			// FIXME: Some sentinel pages around stacks are supposed to be unmapped.
257			// We should handle this case there instead of here.
258			Err(UnmapError::PageNotMapped) => {
259				debug!("Tried to unmap {page:?}, which was not mapped.");
260			}
261			Err(err) => panic!("{err:?}"),
262		}
263	}
264}
265
266#[cfg(not(feature = "common-os"))]
267pub(crate) extern "x86-interrupt" fn page_fault_handler(
268	stack_frame: ExceptionStackFrame,
269	error_code: PageFaultErrorCode,
270) {
271	error!("Page fault (#PF)!");
272	error!("page_fault_linear_address = {:p}", Cr2::read().unwrap());
273	error!("error_code = {error_code:?}");
274	error!("fs = {:#X}", processor::readfs());
275	error!("gs = {:#X}", processor::readgs());
276	error!("stack_frame = {stack_frame:#?}");
277	scheduler::abort();
278}
279
280#[cfg(feature = "common-os")]
281pub(crate) extern "x86-interrupt" fn page_fault_handler(
282	mut stack_frame: ExceptionStackFrame,
283	error_code: PageFaultErrorCode,
284) {
285	unsafe {
286		if stack_frame.as_mut().read().code_segment != SegmentSelector(0x08) {
287			core::arch::asm!("swapgs", options(nostack));
288		}
289	}
290	error!("Page fault (#PF)!");
291	error!("page_fault_linear_address = {:p}", Cr2::read().unwrap());
292	error!("error_code = {error_code:?}");
293	error!("fs = {:#X}", processor::readfs());
294	error!("gs = {:#X}", processor::readgs());
295	error!("stack_frame = {stack_frame:#?}");
296	scheduler::abort();
297}
298
299pub fn init() {
300	make_p4_writable();
301}
302
303fn make_p4_writable() {
304	debug!("Making P4 table writable");
305
306	if !env::is_uefi() {
307		return;
308	}
309
310	let mut pt = unsafe { identity_mapped_page_table() };
311
312	let p4_page = {
313		let (p4_frame, _) = Cr3::read_raw();
314		let p4_addr = x86_64::VirtAddr::new(p4_frame.start_address().as_u64());
315		Page::<Size4KiB>::from_start_address(p4_addr).unwrap()
316	};
317
318	let TranslateResult::Mapped { frame, flags, .. } = pt.translate(p4_page.start_address()) else {
319		unreachable!()
320	};
321
322	let make_writable = || unsafe {
323		let flags = flags | PageTableEntryFlags::WRITABLE;
324		match frame {
325			MappedFrame::Size1GiB(_) => pt.set_flags_p3_entry(p4_page, flags).unwrap().ignore(),
326			MappedFrame::Size2MiB(_) => pt.set_flags_p2_entry(p4_page, flags).unwrap().ignore(),
327			MappedFrame::Size4KiB(_) => pt.update_flags(p4_page, flags).unwrap().ignore(),
328		}
329	};
330
331	unsafe fn without_protect<F, R>(f: F) -> R
332	where
333		F: FnOnce() -> R,
334	{
335		let cr0 = Cr0::read();
336		if cr0.contains(Cr0Flags::WRITE_PROTECT) {
337			unsafe { Cr0::write(cr0 - Cr0Flags::WRITE_PROTECT) }
338		}
339		let ret = f();
340		if cr0.contains(Cr0Flags::WRITE_PROTECT) {
341			unsafe { Cr0::write(cr0) }
342		}
343		ret
344	}
345
346	unsafe { without_protect(make_writable) }
347}
348
349pub fn init_page_tables() {}
350
351#[allow(dead_code)]
352unsafe fn disect<PT: Translate>(pt: PT, virt_addr: x86_64::VirtAddr) {
353	use x86_64::structures::paging::mapper::{MappedFrame, TranslateResult};
354
355	match pt.translate(virt_addr) {
356		TranslateResult::Mapped {
357			frame,
358			offset,
359			flags,
360		} => {
361			let phys_addr = frame.start_address() + offset;
362			println!("virt_addr: {virt_addr:p}, phys_addr: {phys_addr:p}, flags: {flags:?}");
363			let indices = [
364				virt_addr.p4_index(),
365				virt_addr.p3_index(),
366				virt_addr.p2_index(),
367				virt_addr.p1_index(),
368			];
369			let valid_indices = match frame {
370				MappedFrame::Size4KiB(_) => &indices[..4],
371				MappedFrame::Size2MiB(_) => &indices[..3],
372				MappedFrame::Size1GiB(_) => &indices[..2],
373			};
374			for (i, page_table_index) in valid_indices.iter().copied().enumerate() {
375				print!("p{}: {}, ", 4 - i, u16::from(page_table_index));
376			}
377			println!();
378			unsafe {
379				print_page_table_entries(valid_indices);
380			}
381		}
382		TranslateResult::NotMapped => println!("virt_addr: {virt_addr:p} not mapped"),
383		TranslateResult::InvalidFrameAddress(phys_addr) => {
384			println!("virt_addr: {virt_addr:p}, phys_addr: {phys_addr:p} (invalid)");
385		}
386	}
387}
388
389#[allow(dead_code)]
390unsafe fn print_page_table_entries(page_table_indices: &[PageTableIndex]) {
391	assert!(page_table_indices.len() <= 4);
392
393	let identity_mapped_page_table = unsafe { identity_mapped_page_table() };
394	let mut pt = identity_mapped_page_table.level_4_table();
395
396	for (i, page_table_index) in page_table_indices.iter().copied().enumerate() {
397		let level = 4 - i;
398		let entry = &pt[page_table_index];
399
400		let indent = &"        "[0..2 * i];
401		let page_table_index = u16::from(page_table_index);
402		println!("{indent}L{level} Entry {page_table_index}: {entry:?}");
403
404		if entry.is_unused() {
405			break;
406		}
407
408		let phys = entry.addr();
409		let virt = x86_64::VirtAddr::new(phys.as_u64());
410		pt = unsafe { &*virt.as_mut_ptr() };
411	}
412}
413
414#[allow(dead_code)]
415pub(crate) unsafe fn print_page_tables(levels: usize) {
416	assert!((1..=4).contains(&levels));
417
418	fn print(table: &x86_64::structures::paging::PageTable, level: usize, min_level: usize) {
419		for (i, entry) in table
420			.iter()
421			.enumerate()
422			.filter(|(_i, entry)| !entry.is_unused())
423		{
424			if level < min_level {
425				break;
426			}
427			let indent = &"        "[0..2 * (4 - level)];
428			println!("{indent}L{level} Entry {i}: {entry:?}");
429
430			if level > min_level && !entry.flags().contains(PageTableEntryFlags::HUGE_PAGE) {
431				let phys = entry.frame().unwrap().start_address();
432				let virt = x86_64::VirtAddr::new(phys.as_u64());
433				let entry_table = unsafe { &*virt.as_mut_ptr() };
434
435				print(entry_table, level - 1, min_level);
436			}
437		}
438	}
439
440	let identity_mapped_page_table = unsafe { identity_mapped_page_table() };
441	let pt = identity_mapped_page_table.level_4_table();
442
443	print(pt, 4, 5 - levels);
444}