hermit/arch/x86_64/mm/
paging.rs

1use core::fmt::Debug;
2use core::ptr;
3
4use x86_64::registers::control::{Cr0, Cr0Flags, Cr2, Cr3};
5#[cfg(feature = "common-os")]
6use x86_64::registers::segmentation::SegmentSelector;
7pub use x86_64::structures::idt::InterruptStackFrame as ExceptionStackFrame;
8use x86_64::structures::idt::PageFaultErrorCode;
9pub use x86_64::structures::paging::PageTableFlags as PageTableEntryFlags;
10use x86_64::structures::paging::frame::PhysFrameRange;
11use x86_64::structures::paging::mapper::{MapToError, MappedFrame, TranslateResult, UnmapError};
12use x86_64::structures::paging::page::PageRange;
13use x86_64::structures::paging::{
14	Mapper, OffsetPageTable, Page, PageTable, PhysFrame, RecursivePageTable, Size4KiB, Translate,
15};
16
17use crate::arch::x86_64::kernel::processor;
18use crate::arch::x86_64::mm::{PhysAddr, VirtAddr};
19use crate::mm::physicalmem;
20use crate::{env, scheduler};
21
22pub trait PageTableEntryFlagsExt {
23	fn device(&mut self) -> &mut Self;
24
25	fn normal(&mut self) -> &mut Self;
26
27	#[cfg(feature = "acpi")]
28	fn read_only(&mut self) -> &mut Self;
29
30	fn writable(&mut self) -> &mut Self;
31
32	fn execute_disable(&mut self) -> &mut Self;
33
34	#[cfg(feature = "common-os")]
35	fn execute_enable(&mut self) -> &mut Self;
36
37	#[cfg(feature = "common-os")]
38	fn user(&mut self) -> &mut Self;
39
40	#[expect(dead_code)]
41	#[cfg(feature = "common-os")]
42	fn kernel(&mut self) -> &mut Self;
43}
44
45impl PageTableEntryFlagsExt for PageTableEntryFlags {
46	fn device(&mut self) -> &mut Self {
47		self.insert(PageTableEntryFlags::NO_CACHE);
48		self
49	}
50
51	fn normal(&mut self) -> &mut Self {
52		self.remove(PageTableEntryFlags::NO_CACHE);
53		self
54	}
55
56	#[cfg(feature = "acpi")]
57	fn read_only(&mut self) -> &mut Self {
58		self.remove(PageTableEntryFlags::WRITABLE);
59		self
60	}
61
62	fn writable(&mut self) -> &mut Self {
63		self.insert(PageTableEntryFlags::WRITABLE);
64		self
65	}
66
67	fn execute_disable(&mut self) -> &mut Self {
68		self.insert(PageTableEntryFlags::NO_EXECUTE);
69		self
70	}
71
72	#[cfg(feature = "common-os")]
73	fn execute_enable(&mut self) -> &mut Self {
74		self.remove(PageTableEntryFlags::NO_EXECUTE);
75		self
76	}
77
78	#[cfg(feature = "common-os")]
79	fn user(&mut self) -> &mut Self {
80		self.insert(PageTableEntryFlags::USER_ACCESSIBLE);
81		self
82	}
83
84	#[cfg(feature = "common-os")]
85	fn kernel(&mut self) -> &mut Self {
86		self.remove(PageTableEntryFlags::USER_ACCESSIBLE);
87		self
88	}
89}
90
91pub use x86_64::structures::paging::{
92	PageSize, Size1GiB as HugePageSize, Size2MiB as LargePageSize, Size4KiB as BasePageSize,
93};
94
95/// Returns a mapping of the physical memory where physical address is equal to the virtual address (no offset)
96pub unsafe fn identity_mapped_page_table() -> OffsetPageTable<'static> {
97	let level_4_table_addr = Cr3::read().0.start_address().as_u64();
98	let level_4_table_ptr =
99		ptr::with_exposed_provenance_mut::<PageTable>(level_4_table_addr.try_into().unwrap());
100	unsafe {
101		let level_4_table = level_4_table_ptr.as_mut().unwrap();
102		OffsetPageTable::new(level_4_table, x86_64::addr::VirtAddr::new(0x0))
103	}
104}
105
106/// Translate a virtual memory address to a physical one.
107pub fn virtual_to_physical(virtual_address: VirtAddr) -> Option<PhysAddr> {
108	let addr = x86_64::VirtAddr::from(virtual_address);
109
110	let translate_result = unsafe { identity_mapped_page_table() }.translate(addr);
111
112	match translate_result {
113		TranslateResult::NotMapped | TranslateResult::InvalidFrameAddress(_) => {
114			trace!("Uable to determine the physical address of 0x{virtual_address:X}");
115			None
116		}
117		TranslateResult::Mapped { frame, offset, .. } => {
118			Some(PhysAddr::new((frame.start_address() + offset).as_u64()))
119		}
120	}
121}
122
123/// Maps a continuous range of pages.
124///
125/// # Arguments
126///
127/// * `physical_address` - First physical address to map these pages to
128/// * `flags` - Flags from PageTableEntryFlags to set for the page table entry (e.g. WRITABLE or NO_EXECUTE).
129///   The PRESENT flags is set automatically.
130pub fn map<S>(
131	virtual_address: VirtAddr,
132	physical_address: PhysAddr,
133	count: usize,
134	flags: PageTableEntryFlags,
135) where
136	S: PageSize + Debug,
137	for<'a> RecursivePageTable<'a>: Mapper<S>,
138	for<'a> OffsetPageTable<'a>: Mapper<S>,
139{
140	let pages = {
141		let start = Page::<S>::containing_address(virtual_address.into());
142		let end = start + count as u64;
143		Page::range(start, end)
144	};
145
146	let frames = {
147		let start = PhysFrame::<S>::containing_address(physical_address.into());
148		let end = start + count as u64;
149		PhysFrame::range(start, end)
150	};
151
152	let flags = flags | PageTableEntryFlags::PRESENT;
153
154	trace!("Mapping {pages:?} to {frames:?} with {flags:?}");
155
156	unsafe fn map_pages<M, S>(
157		mapper: &mut M,
158		pages: PageRange<S>,
159		frames: PhysFrameRange<S>,
160		flags: PageTableEntryFlags,
161	) -> bool
162	where
163		M: Mapper<S>,
164		S: PageSize + Debug,
165	{
166		let mut frame_allocator = physicalmem::PHYSICAL_FREE_LIST.lock();
167		let mut unmapped = false;
168		for (page, frame) in pages.zip(frames) {
169			// TODO: Require explicit unmaps
170			let unmap = mapper.unmap(page);
171			if let Ok((_frame, flush)) = unmap {
172				unmapped = true;
173				flush.flush();
174				debug!("Had to unmap page {page:?} before mapping.");
175			}
176			let map = unsafe { mapper.map_to(page, frame, flags, &mut *frame_allocator) };
177			match map {
178				Ok(mapper_flush) => mapper_flush.flush(),
179				Err(err) => panic!("Could not map {page:?} to {frame:?}: {err:?}"),
180			}
181		}
182		unmapped
183	}
184
185	let unmapped = unsafe { map_pages(&mut identity_mapped_page_table(), pages, frames, flags) };
186
187	if unmapped {
188		#[cfg(feature = "smp")]
189		crate::arch::x86_64::kernel::apic::ipi_tlb_flush();
190	}
191}
192
193/// Maps `count` pages at address `virt_addr`. If the allocation of a physical memory failed,
194/// the number of successful mapped pages are returned as error value.
195pub fn map_heap<S>(virt_addr: VirtAddr, count: usize) -> Result<(), usize>
196where
197	S: PageSize + Debug,
198	for<'a> RecursivePageTable<'a>: Mapper<S>,
199	for<'a> OffsetPageTable<'a>: Mapper<S>,
200{
201	let flags = {
202		let mut flags = PageTableEntryFlags::empty();
203		flags.normal().writable().execute_disable();
204		flags
205	};
206
207	let virt_addrs = (0..count).map(|n| virt_addr + n as u64 * S::SIZE);
208
209	for (map_counter, virt_addr) in virt_addrs.enumerate() {
210		let phys_addr = physicalmem::allocate_aligned(S::SIZE as usize, S::SIZE as usize)
211			.map_err(|_| map_counter)?;
212		map::<S>(virt_addr, phys_addr, 1, flags);
213	}
214
215	Ok(())
216}
217
218pub fn identity_map<S>(phys_addr: PhysAddr)
219where
220	S: PageSize + Debug,
221	for<'a> RecursivePageTable<'a>: Mapper<S>,
222	for<'a> OffsetPageTable<'a>: Mapper<S>,
223{
224	let frame = PhysFrame::<S>::from_start_address(phys_addr.into()).unwrap();
225	let flags = PageTableEntryFlags::PRESENT
226		| PageTableEntryFlags::WRITABLE
227		| PageTableEntryFlags::NO_EXECUTE;
228	let mut frame_allocator = physicalmem::PHYSICAL_FREE_LIST.lock();
229	let mapper_result =
230		unsafe { identity_mapped_page_table().identity_map(frame, flags, &mut *frame_allocator) };
231
232	match mapper_result {
233		Ok(mapper_flush) => mapper_flush.flush(),
234		Err(MapToError::PageAlreadyMapped(current_frame)) => assert_eq!(current_frame, frame),
235		Err(MapToError::ParentEntryHugePage) => {
236			let page_table = unsafe { identity_mapped_page_table() };
237			let virt_addr = VirtAddr::new(frame.start_address().as_u64()).into();
238			let phys_addr = frame.start_address();
239			assert_eq!(page_table.translate_addr(virt_addr), Some(phys_addr));
240		}
241		Err(err) => panic!("could not identity-map {frame:?}: {err:?}"),
242	}
243}
244
245pub fn unmap<S>(virtual_address: VirtAddr, count: usize)
246where
247	S: PageSize + Debug,
248	for<'a> RecursivePageTable<'a>: Mapper<S>,
249	for<'a> OffsetPageTable<'a>: Mapper<S>,
250{
251	trace!("Unmapping virtual address {virtual_address:p} ({count} pages)");
252
253	let first_page = Page::<S>::containing_address(virtual_address.into());
254	let last_page = first_page + count as u64;
255	let range = Page::range(first_page, last_page);
256
257	for page in range {
258		let unmap_result = unsafe { identity_mapped_page_table() }.unmap(page);
259		match unmap_result {
260			Ok((_frame, flush)) => flush.flush(),
261			// FIXME: Some sentinel pages around stacks are supposed to be unmapped.
262			// We should handle this case there instead of here.
263			Err(UnmapError::PageNotMapped) => {
264				debug!("Tried to unmap {page:?}, which was not mapped.");
265			}
266			Err(err) => panic!("{err:?}"),
267		}
268	}
269}
270
271#[cfg(not(feature = "common-os"))]
272pub(crate) extern "x86-interrupt" fn page_fault_handler(
273	stack_frame: ExceptionStackFrame,
274	error_code: PageFaultErrorCode,
275) {
276	error!("Page fault (#PF)!");
277	error!("page_fault_linear_address = {:p}", Cr2::read().unwrap());
278	error!("error_code = {error_code:?}");
279	error!("fs = {:#X}", processor::readfs());
280	error!("gs = {:#X}", processor::readgs());
281	error!("stack_frame = {stack_frame:#?}");
282	scheduler::abort();
283}
284
285#[cfg(feature = "common-os")]
286pub(crate) extern "x86-interrupt" fn page_fault_handler(
287	mut stack_frame: ExceptionStackFrame,
288	error_code: PageFaultErrorCode,
289) {
290	unsafe {
291		if stack_frame.as_mut().read().code_segment != SegmentSelector(0x08) {
292			core::arch::asm!("swapgs", options(nostack));
293		}
294	}
295	error!("Page fault (#PF)!");
296	error!("page_fault_linear_address = {:p}", Cr2::read().unwrap());
297	error!("error_code = {error_code:?}");
298	error!("fs = {:#X}", processor::readfs());
299	error!("gs = {:#X}", processor::readgs());
300	error!("stack_frame = {stack_frame:#?}");
301	scheduler::abort();
302}
303
304pub fn init() {
305	unsafe {
306		log_page_tables();
307	}
308	make_p4_writable();
309}
310
311fn make_p4_writable() {
312	debug!("Making P4 table writable");
313
314	if !env::is_uefi() {
315		return;
316	}
317
318	let mut pt = unsafe { identity_mapped_page_table() };
319
320	let p4_page = {
321		let (p4_frame, _) = Cr3::read_raw();
322		let p4_addr = x86_64::VirtAddr::new(p4_frame.start_address().as_u64());
323		Page::<Size4KiB>::from_start_address(p4_addr).unwrap()
324	};
325
326	let TranslateResult::Mapped { frame, flags, .. } = pt.translate(p4_page.start_address()) else {
327		unreachable!()
328	};
329
330	let make_writable = || unsafe {
331		let flags = flags | PageTableEntryFlags::WRITABLE;
332		match frame {
333			MappedFrame::Size1GiB(_) => pt.set_flags_p3_entry(p4_page, flags).unwrap().ignore(),
334			MappedFrame::Size2MiB(_) => pt.set_flags_p2_entry(p4_page, flags).unwrap().ignore(),
335			MappedFrame::Size4KiB(_) => pt.update_flags(p4_page, flags).unwrap().ignore(),
336		}
337	};
338
339	unsafe fn without_protect<F, R>(f: F) -> R
340	where
341		F: FnOnce() -> R,
342	{
343		let cr0 = Cr0::read();
344		if cr0.contains(Cr0Flags::WRITE_PROTECT) {
345			unsafe { Cr0::write(cr0 - Cr0Flags::WRITE_PROTECT) }
346		}
347		let ret = f();
348		if cr0.contains(Cr0Flags::WRITE_PROTECT) {
349			unsafe { Cr0::write(cr0) }
350		}
351		ret
352	}
353
354	unsafe { without_protect(make_writable) }
355}
356
357pub fn init_page_tables() {}
358
359pub unsafe fn log_page_tables() {
360	use log::Level;
361
362	use self::mapped_page_range_display::OffsetPageTableExt;
363
364	if !log_enabled!(Level::Debug) {
365		return;
366	}
367
368	let page_table = unsafe { identity_mapped_page_table() };
369	debug!("Page tables:\n{}", page_table.display());
370}
371
372pub mod mapped_page_range_display {
373	use core::fmt::{self, Write};
374
375	use x86_64::structures::paging::mapper::PageTableFrameMapping;
376	use x86_64::structures::paging::{MappedPageTable, OffsetPageTable, PageSize};
377
378	use super::mapped_page_table_iter::{
379		self, MappedPageRangeInclusive, MappedPageRangeInclusiveItem,
380		MappedPageTableRangeInclusiveIter,
381	};
382	use super::offset_page_table::PhysOffset;
383
384	#[expect(dead_code)]
385	pub trait MappedPageTableExt<P: PageTableFrameMapping + Clone> {
386		fn display(&self) -> MappedPageTableDisplay<'_, &P>;
387	}
388
389	impl<P: PageTableFrameMapping + Clone> MappedPageTableExt<P> for MappedPageTable<'_, P> {
390		fn display(&self) -> MappedPageTableDisplay<'_, &P> {
391			MappedPageTableDisplay {
392				inner: mapped_page_table_iter::mapped_page_table_range_iter(self),
393			}
394		}
395	}
396
397	pub trait OffsetPageTableExt {
398		fn display(&self) -> MappedPageTableDisplay<'_, PhysOffset>;
399	}
400
401	impl OffsetPageTableExt for OffsetPageTable<'_> {
402		fn display(&self) -> MappedPageTableDisplay<'_, PhysOffset> {
403			MappedPageTableDisplay {
404				inner: mapped_page_table_iter::offset_page_table_range_iter(self),
405			}
406		}
407	}
408
409	pub struct MappedPageTableDisplay<'a, P: PageTableFrameMapping + Clone> {
410		inner: MappedPageTableRangeInclusiveIter<'a, P>,
411	}
412
413	impl<P: PageTableFrameMapping + Clone> fmt::Display for MappedPageTableDisplay<'_, P> {
414		fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
415			let mut has_fields = false;
416
417			for mapped_page_range in self.inner.clone() {
418				if has_fields {
419					f.write_char('\n')?;
420				}
421				write!(f, "{}", mapped_page_range.display())?;
422
423				has_fields = true;
424			}
425
426			Ok(())
427		}
428	}
429
430	pub trait MappedPageRangeInclusiveItemExt {
431		fn display(&self) -> MappedPageRangeInclusiveItemDisplay<'_>;
432	}
433
434	impl MappedPageRangeInclusiveItemExt for MappedPageRangeInclusiveItem {
435		fn display(&self) -> MappedPageRangeInclusiveItemDisplay<'_> {
436			MappedPageRangeInclusiveItemDisplay { inner: self }
437		}
438	}
439
440	pub struct MappedPageRangeInclusiveItemDisplay<'a> {
441		inner: &'a MappedPageRangeInclusiveItem,
442	}
443
444	impl fmt::Display for MappedPageRangeInclusiveItemDisplay<'_> {
445		fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
446			match self.inner {
447				MappedPageRangeInclusiveItem::Size4KiB(range) => range.display().fmt(f),
448				MappedPageRangeInclusiveItem::Size2MiB(range) => range.display().fmt(f),
449				MappedPageRangeInclusiveItem::Size1GiB(range) => range.display().fmt(f),
450			}
451		}
452	}
453
454	pub trait MappedPageRangeInclusiveExt<S: PageSize> {
455		fn display(&self) -> MappedPageRangeInclusiveDisplay<'_, S>;
456	}
457
458	impl<S: PageSize> MappedPageRangeInclusiveExt<S> for MappedPageRangeInclusive<S> {
459		fn display(&self) -> MappedPageRangeInclusiveDisplay<'_, S> {
460			MappedPageRangeInclusiveDisplay { inner: self }
461		}
462	}
463
464	pub struct MappedPageRangeInclusiveDisplay<'a, S: PageSize> {
465		inner: &'a MappedPageRangeInclusive<S>,
466	}
467
468	impl<S: PageSize> fmt::Display for MappedPageRangeInclusiveDisplay<'_, S> {
469		fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
470			let size = S::DEBUG_STR;
471			let len = self.inner.page_range.len();
472			let page_start = self.inner.page_range.start.start_address();
473			let page_end = self.inner.page_range.end.start_address();
474			let frame_start = self.inner.frame_range.start.start_address();
475			let frame_end = self.inner.frame_range.end.start_address();
476			let flags = self.inner.flags;
477			let format_phys = if page_start.as_u64() == frame_start.as_u64() {
478				assert_eq!(page_end.as_u64(), frame_end.as_u64());
479				format_args!("{:>39}", "identity mapped")
480			} else {
481				format_args!("{frame_start:18p}..={frame_end:18p}")
482			};
483			write!(
484				f,
485				"size: {size}, len: {len:5}, virt: {page_start:18p}..={page_end:18p}, phys: {format_phys}, flags: {flags:?}"
486			)
487		}
488	}
489}
490
491pub mod mapped_page_table_iter {
492	//! TODO: try to upstream this to [`x86_64`].
493
494	use core::fmt;
495	use core::ops::{Add, AddAssign, Sub, SubAssign};
496
497	use x86_64::structures::paging::frame::PhysFrameRangeInclusive;
498	use x86_64::structures::paging::mapper::PageTableFrameMapping;
499	use x86_64::structures::paging::page::{AddressNotAligned, PageRangeInclusive};
500	use x86_64::structures::paging::{
501		MappedPageTable, OffsetPageTable, Page, PageSize, PageTable, PageTableFlags,
502		PageTableIndex, PhysFrame, Size1GiB, Size2MiB, Size4KiB,
503	};
504
505	use super::offset_page_table::PhysOffset;
506	use super::walker::{PageTableWalkError, PageTableWalker};
507
508	#[derive(Debug)]
509	pub struct MappedPageRangeInclusive<S: PageSize> {
510		pub page_range: PageRangeInclusive<S>,
511		pub frame_range: PhysFrameRangeInclusive<S>,
512		pub flags: PageTableFlags,
513	}
514
515	impl<S: PageSize> TryFrom<(MappedPage<S>, MappedPage<S>)> for MappedPageRangeInclusive<S> {
516		type Error = TryFromMappedPageError;
517
518		fn try_from((start, end): (MappedPage<S>, MappedPage<S>)) -> Result<Self, Self::Error> {
519			if start.flags != end.flags {
520				return Err(TryFromMappedPageError);
521			}
522
523			Ok(Self {
524				page_range: PageRangeInclusive {
525					start: start.page,
526					end: end.page,
527				},
528				frame_range: PhysFrameRangeInclusive {
529					start: start.frame,
530					end: end.frame,
531				},
532				flags: start.flags,
533			})
534		}
535	}
536
537	#[derive(Debug)]
538	pub enum MappedPageRangeInclusiveItem {
539		Size4KiB(MappedPageRangeInclusive<Size4KiB>),
540		Size2MiB(MappedPageRangeInclusive<Size2MiB>),
541		Size1GiB(MappedPageRangeInclusive<Size1GiB>),
542	}
543
544	impl TryFrom<(MappedPageItem, MappedPageItem)> for MappedPageRangeInclusiveItem {
545		type Error = TryFromMappedPageError;
546
547		fn try_from((start, end): (MappedPageItem, MappedPageItem)) -> Result<Self, Self::Error> {
548			match (start, end) {
549				(MappedPageItem::Size4KiB(start), MappedPageItem::Size4KiB(end)) => {
550					let range = MappedPageRangeInclusive::try_from((start, end))?;
551					Ok(Self::Size4KiB(range))
552				}
553				(MappedPageItem::Size2MiB(start), MappedPageItem::Size2MiB(end)) => {
554					let range = MappedPageRangeInclusive::try_from((start, end))?;
555					Ok(Self::Size2MiB(range))
556				}
557				(MappedPageItem::Size1GiB(start), MappedPageItem::Size1GiB(end)) => {
558					let range = MappedPageRangeInclusive::try_from((start, end))?;
559					Ok(Self::Size1GiB(range))
560				}
561				(_, _) => Err(TryFromMappedPageError),
562			}
563		}
564	}
565
566	#[derive(PartialEq, Eq, Clone, Debug)]
567	pub struct TryFromMappedPageError;
568
569	impl fmt::Display for TryFromMappedPageError {
570		fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
571			f.write_str("provided mapped pages were not compatible")
572		}
573	}
574
575	#[derive(Clone)]
576	pub struct MappedPageTableRangeInclusiveIter<'a, P: PageTableFrameMapping> {
577		inner: MappedPageTableIter<'a, P>,
578		start: Option<MappedPageItem>,
579		end: Option<MappedPageItem>,
580	}
581
582	#[expect(dead_code)]
583	pub fn mapped_page_table_range_iter<'a, P: PageTableFrameMapping>(
584		page_table: &'a MappedPageTable<'a, P>,
585	) -> MappedPageTableRangeInclusiveIter<'a, &'a P> {
586		MappedPageTableRangeInclusiveIter {
587			inner: mapped_page_table_iter(page_table),
588			start: None,
589			end: None,
590		}
591	}
592
593	pub fn offset_page_table_range_iter<'a>(
594		page_table: &'a OffsetPageTable<'a>,
595	) -> MappedPageTableRangeInclusiveIter<'a, PhysOffset> {
596		MappedPageTableRangeInclusiveIter {
597			inner: offset_page_table_iter(page_table),
598			start: None,
599			end: None,
600		}
601	}
602
603	impl<'a, P: PageTableFrameMapping> Iterator for MappedPageTableRangeInclusiveIter<'a, P> {
604		type Item = MappedPageRangeInclusiveItem;
605
606		fn next(&mut self) -> Option<Self::Item> {
607			if self.start.is_none() {
608				self.start = self.inner.next();
609				self.end = self.start;
610			}
611
612			let Some(start) = &mut self.start else {
613				return None;
614			};
615			let end = self.end.as_mut().unwrap();
616
617			for mapped_page in self.inner.by_ref() {
618				if mapped_page == *end + 1 {
619					*end = mapped_page;
620					continue;
621				}
622
623				let range = MappedPageRangeInclusiveItem::try_from((*start, *end)).unwrap();
624				*start = mapped_page;
625				*end = mapped_page;
626				return Some(range);
627			}
628
629			let range = MappedPageRangeInclusiveItem::try_from((*start, *end)).unwrap();
630			self.start = None;
631			self.end = None;
632			Some(range)
633		}
634	}
635
636	#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug)]
637	pub struct MappedPage<S: PageSize> {
638		pub page: Page<S>,
639		pub frame: PhysFrame<S>,
640		pub flags: PageTableFlags,
641	}
642
643	impl<S: PageSize> Add<u64> for MappedPage<S> {
644		type Output = Self;
645
646		fn add(self, rhs: u64) -> Self::Output {
647			Self {
648				page: self.page + rhs,
649				frame: self.frame + rhs,
650				flags: self.flags,
651			}
652		}
653	}
654
655	impl<S: PageSize> Sub<u64> for MappedPage<S> {
656		type Output = Self;
657
658		fn sub(self, rhs: u64) -> Self::Output {
659			Self {
660				page: self.page - rhs,
661				frame: self.frame - rhs,
662				flags: self.flags,
663			}
664		}
665	}
666
667	#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug)]
668	pub enum MappedPageItem {
669		Size4KiB(MappedPage<Size4KiB>),
670		Size2MiB(MappedPage<Size2MiB>),
671		Size1GiB(MappedPage<Size1GiB>),
672	}
673
674	impl Add<u64> for MappedPageItem {
675		type Output = Self;
676
677		fn add(self, rhs: u64) -> Self::Output {
678			match self {
679				Self::Size4KiB(mapped_page) => Self::Size4KiB(mapped_page + rhs),
680				Self::Size2MiB(mapped_page) => Self::Size2MiB(mapped_page + rhs),
681				Self::Size1GiB(mapped_page) => Self::Size1GiB(mapped_page + rhs),
682			}
683		}
684	}
685
686	impl AddAssign<u64> for MappedPageItem {
687		fn add_assign(&mut self, rhs: u64) {
688			*self = *self + rhs;
689		}
690	}
691
692	impl Sub<u64> for MappedPageItem {
693		type Output = Self;
694
695		fn sub(self, rhs: u64) -> Self::Output {
696			match self {
697				Self::Size4KiB(mapped_page) => Self::Size4KiB(mapped_page - rhs),
698				Self::Size2MiB(mapped_page) => Self::Size2MiB(mapped_page - rhs),
699				Self::Size1GiB(mapped_page) => Self::Size1GiB(mapped_page - rhs),
700			}
701		}
702	}
703
704	impl SubAssign<u64> for MappedPageItem {
705		fn sub_assign(&mut self, rhs: u64) {
706			*self = *self - rhs;
707		}
708	}
709
710	#[derive(Clone)]
711	pub struct MappedPageTableIter<'a, P: PageTableFrameMapping> {
712		page_table_walker: PageTableWalker<P>,
713		level_4_table: &'a PageTable,
714		p4_index: u16,
715		p3_index: u16,
716		p2_index: u16,
717		p1_index: u16,
718	}
719
720	pub fn mapped_page_table_iter<'a, P: PageTableFrameMapping>(
721		page_table: &'a MappedPageTable<'a, P>,
722	) -> MappedPageTableIter<'a, &'a P> {
723		MappedPageTableIter {
724			page_table_walker: unsafe {
725				PageTableWalker::new(page_table.page_table_frame_mapping())
726			},
727			level_4_table: page_table.level_4_table(),
728			p4_index: 0,
729			p3_index: 0,
730			p2_index: 0,
731			p1_index: 0,
732		}
733	}
734
735	pub fn offset_page_table_iter<'a>(
736		page_table: &'a OffsetPageTable<'a>,
737	) -> MappedPageTableIter<'a, PhysOffset> {
738		MappedPageTableIter {
739			page_table_walker: unsafe {
740				PageTableWalker::new(PhysOffset {
741					offset: page_table.phys_offset(),
742				})
743			},
744			level_4_table: page_table.level_4_table(),
745			p4_index: 0,
746			p3_index: 0,
747			p2_index: 0,
748			p1_index: 0,
749		}
750	}
751
752	impl<'a, P: PageTableFrameMapping> MappedPageTableIter<'a, P> {
753		fn p4_index(&self) -> Option<PageTableIndex> {
754			if self.p4_index >= 512 {
755				return None;
756			}
757
758			Some(PageTableIndex::new(self.p4_index))
759		}
760
761		fn p3_index(&self) -> Option<PageTableIndex> {
762			if self.p3_index >= 512 {
763				return None;
764			}
765
766			Some(PageTableIndex::new(self.p3_index))
767		}
768
769		fn p2_index(&self) -> Option<PageTableIndex> {
770			if self.p2_index >= 512 {
771				return None;
772			}
773
774			Some(PageTableIndex::new(self.p2_index))
775		}
776
777		fn p1_index(&self) -> Option<PageTableIndex> {
778			if self.p1_index >= 512 {
779				return None;
780			}
781
782			Some(PageTableIndex::new(self.p1_index))
783		}
784
785		fn increment_p4_index(&mut self) -> Option<()> {
786			if self.p4_index >= 511 {
787				self.p4_index += 1;
788				return None;
789			}
790
791			self.p4_index += 1;
792			self.p3_index = 0;
793			self.p2_index = 0;
794			self.p1_index = 0;
795			Some(())
796		}
797
798		fn increment_p3_index(&mut self) -> Option<()> {
799			if self.p3_index == 511 {
800				self.increment_p4_index()?;
801				return None;
802			}
803
804			self.p3_index += 1;
805			self.p2_index = 0;
806			self.p1_index = 0;
807			Some(())
808		}
809
810		fn increment_p2_index(&mut self) -> Option<()> {
811			if self.p2_index == 511 {
812				self.increment_p3_index()?;
813				return None;
814			}
815
816			self.p2_index += 1;
817			self.p1_index = 0;
818			Some(())
819		}
820
821		fn increment_p1_index(&mut self) -> Option<()> {
822			if self.p1_index == 511 {
823				self.increment_p2_index()?;
824				return None;
825			}
826
827			self.p1_index += 1;
828			Some(())
829		}
830
831		fn next_forward(&mut self) -> Option<MappedPageItem> {
832			let p4 = self.level_4_table;
833
834			let p3 = loop {
835				match self.page_table_walker.next_table(&p4[self.p4_index()?]) {
836					Ok(page_table) => break page_table,
837					Err(PageTableWalkError::NotMapped) => self.increment_p4_index()?,
838					Err(PageTableWalkError::MappedToHugePage) => {
839						panic!("level 4 entry has huge page bit set")
840					}
841				}
842			};
843
844			let p2 = loop {
845				match self.page_table_walker.next_table(&p3[self.p3_index()?]) {
846					Ok(page_table) => break page_table,
847					Err(PageTableWalkError::NotMapped) => self.increment_p3_index()?,
848					Err(PageTableWalkError::MappedToHugePage) => {
849						let page =
850							Page::from_page_table_indices_1gib(self.p4_index()?, self.p3_index()?);
851						let entry = &p3[self.p3_index()?];
852						let frame = PhysFrame::containing_address(entry.addr());
853						let flags = entry.flags();
854						let mapped_page =
855							MappedPageItem::Size1GiB(MappedPage { page, frame, flags });
856
857						self.increment_p3_index();
858						return Some(mapped_page);
859					}
860				}
861			};
862
863			let p1 = loop {
864				match self.page_table_walker.next_table(&p2[self.p2_index()?]) {
865					Ok(page_table) => break page_table,
866					Err(PageTableWalkError::NotMapped) => self.increment_p2_index()?,
867					Err(PageTableWalkError::MappedToHugePage) => {
868						let page = Page::from_page_table_indices_2mib(
869							self.p4_index()?,
870							self.p3_index()?,
871							self.p2_index()?,
872						);
873						let entry = &p2[self.p2_index()?];
874						let frame = PhysFrame::containing_address(entry.addr());
875						let flags = entry.flags();
876						let mapped_page =
877							MappedPageItem::Size2MiB(MappedPage { page, frame, flags });
878
879						self.increment_p2_index();
880						return Some(mapped_page);
881					}
882				}
883			};
884
885			loop {
886				let p1_entry = &p1[self.p1_index()?];
887
888				if p1_entry.is_unused() {
889					self.increment_p1_index()?;
890					continue;
891				}
892
893				let frame = match PhysFrame::from_start_address(p1_entry.addr()) {
894					Ok(frame) => frame,
895					Err(AddressNotAligned) => {
896						warn!("Invalid frame address: {:p}", p1_entry.addr());
897						self.increment_p1_index()?;
898						continue;
899					}
900				};
901
902				let page = Page::from_page_table_indices(
903					self.p4_index()?,
904					self.p3_index()?,
905					self.p2_index()?,
906					self.p1_index()?,
907				);
908				let flags = p1_entry.flags();
909				let mapped_page = MappedPageItem::Size4KiB(MappedPage { page, frame, flags });
910
911				self.increment_p1_index();
912				return Some(mapped_page);
913			}
914		}
915	}
916
917	impl<'a, P: PageTableFrameMapping> Iterator for MappedPageTableIter<'a, P> {
918		type Item = MappedPageItem;
919
920		fn next(&mut self) -> Option<Self::Item> {
921			self.next_forward().or_else(|| self.next_forward())
922		}
923	}
924}
925
926mod walker {
927	//! Taken from [`x86_64`]
928
929	use x86_64::structures::paging::PageTable;
930	use x86_64::structures::paging::mapper::PageTableFrameMapping;
931	use x86_64::structures::paging::page_table::{FrameError, PageTableEntry};
932
933	#[derive(Clone, Debug)]
934	pub(super) struct PageTableWalker<P: PageTableFrameMapping> {
935		page_table_frame_mapping: P,
936	}
937
938	impl<P: PageTableFrameMapping> PageTableWalker<P> {
939		#[inline]
940		pub unsafe fn new(page_table_frame_mapping: P) -> Self {
941			Self {
942				page_table_frame_mapping,
943			}
944		}
945
946		/// Internal helper function to get a reference to the page table of the next level.
947		///
948		/// Returns `PageTableWalkError::NotMapped` if the entry is unused. Returns
949		/// `PageTableWalkError::MappedToHugePage` if the `HUGE_PAGE` flag is set
950		/// in the passed entry.
951		#[inline]
952		pub(super) fn next_table<'b>(
953			&self,
954			entry: &'b PageTableEntry,
955		) -> Result<&'b PageTable, PageTableWalkError> {
956			let page_table_ptr = self
957				.page_table_frame_mapping
958				.frame_to_pointer(entry.frame()?);
959			let page_table: &PageTable = unsafe { &*page_table_ptr };
960
961			Ok(page_table)
962		}
963	}
964
965	#[derive(Debug)]
966	pub(super) enum PageTableWalkError {
967		NotMapped,
968		MappedToHugePage,
969	}
970
971	impl From<FrameError> for PageTableWalkError {
972		#[inline]
973		fn from(err: FrameError) -> Self {
974			match err {
975				FrameError::HugeFrame => PageTableWalkError::MappedToHugePage,
976				FrameError::FrameNotPresent => PageTableWalkError::NotMapped,
977			}
978		}
979	}
980}
981
982mod offset_page_table {
983	//! Taken from [`x86_64`]
984
985	use x86_64::VirtAddr;
986	use x86_64::structures::paging::mapper::PageTableFrameMapping;
987	use x86_64::structures::paging::{PageTable, PhysFrame};
988
989	#[derive(Clone, Debug)]
990	pub struct PhysOffset {
991		pub offset: VirtAddr,
992	}
993
994	unsafe impl PageTableFrameMapping for PhysOffset {
995		fn frame_to_pointer(&self, frame: PhysFrame) -> *mut PageTable {
996			let virt = self.offset + frame.start_address().as_u64();
997			virt.as_mut_ptr()
998		}
999	}
1000}