Skip to main content

hermit/arch/x86_64/mm/
paging.rs

1use core::{fmt, ptr};
2
3use free_list::PageLayout;
4use x86_64::registers::control::{Cr0, Cr0Flags, Cr2, Cr3};
5#[cfg(feature = "common-os")]
6use x86_64::registers::segmentation::SegmentSelector;
7pub use x86_64::structures::idt::InterruptStackFrame as ExceptionStackFrame;
8use x86_64::structures::idt::PageFaultErrorCode;
9pub use x86_64::structures::paging::PageTableFlags as PageTableEntryFlags;
10use x86_64::structures::paging::frame::PhysFrameRange;
11use x86_64::structures::paging::mapper::{MapToError, MappedFrame, TranslateResult, UnmapError};
12use x86_64::structures::paging::page::PageRange;
13use x86_64::structures::paging::{
14	FrameAllocator, Mapper, OffsetPageTable, Page, PageTable, PhysFrame, Size4KiB, Translate,
15};
16
17use crate::arch::x86_64::kernel::processor;
18use crate::arch::x86_64::mm::{PhysAddr, VirtAddr};
19use crate::mm::{FrameAlloc, PageRangeAllocator};
20use crate::{env, scheduler};
21
22unsafe impl FrameAllocator<Size4KiB> for FrameAlloc {
23	fn allocate_frame(&mut self) -> Option<PhysFrame<Size4KiB>> {
24		let size = usize::try_from(Size4KiB::SIZE).unwrap();
25		let layout = PageLayout::from_size(size).unwrap();
26
27		let range = FrameAlloc::allocate(layout).ok()?;
28
29		let phys_addr = PhysAddr::from(range.start());
30		Some(PhysFrame::from_start_address(phys_addr.into()).unwrap())
31	}
32}
33
34pub trait PageTableEntryFlagsExt {
35	fn device(&mut self) -> &mut Self;
36
37	fn normal(&mut self) -> &mut Self;
38
39	#[cfg(feature = "acpi")]
40	fn read_only(&mut self) -> &mut Self;
41
42	fn writable(&mut self) -> &mut Self;
43
44	fn execute_disable(&mut self) -> &mut Self;
45
46	#[cfg(feature = "common-os")]
47	fn execute_enable(&mut self) -> &mut Self;
48
49	#[cfg(feature = "common-os")]
50	fn user(&mut self) -> &mut Self;
51
52	#[expect(dead_code)]
53	#[cfg(feature = "common-os")]
54	fn kernel(&mut self) -> &mut Self;
55}
56
57impl PageTableEntryFlagsExt for PageTableEntryFlags {
58	fn device(&mut self) -> &mut Self {
59		self.insert(PageTableEntryFlags::NO_CACHE);
60		self
61	}
62
63	fn normal(&mut self) -> &mut Self {
64		self.remove(PageTableEntryFlags::NO_CACHE);
65		self
66	}
67
68	#[cfg(feature = "acpi")]
69	fn read_only(&mut self) -> &mut Self {
70		self.remove(PageTableEntryFlags::WRITABLE);
71		self
72	}
73
74	fn writable(&mut self) -> &mut Self {
75		self.insert(PageTableEntryFlags::WRITABLE);
76		self
77	}
78
79	fn execute_disable(&mut self) -> &mut Self {
80		self.insert(PageTableEntryFlags::NO_EXECUTE);
81		self
82	}
83
84	#[cfg(feature = "common-os")]
85	fn execute_enable(&mut self) -> &mut Self {
86		self.remove(PageTableEntryFlags::NO_EXECUTE);
87		self
88	}
89
90	#[cfg(feature = "common-os")]
91	fn user(&mut self) -> &mut Self {
92		self.insert(PageTableEntryFlags::USER_ACCESSIBLE);
93		self
94	}
95
96	#[cfg(feature = "common-os")]
97	fn kernel(&mut self) -> &mut Self {
98		self.remove(PageTableEntryFlags::USER_ACCESSIBLE);
99		self
100	}
101}
102
103pub use x86_64::structures::paging::{
104	PageSize, Size1GiB as HugePageSize, Size2MiB as LargePageSize, Size4KiB as BasePageSize,
105};
106
107/// Returns a mapping of the physical memory where physical address is equal to the virtual address (no offset)
108pub unsafe fn identity_mapped_page_table() -> OffsetPageTable<'static> {
109	let level_4_table_addr = Cr3::read().0.start_address().as_u64();
110	let level_4_table_ptr =
111		ptr::with_exposed_provenance_mut::<PageTable>(level_4_table_addr.try_into().unwrap());
112	unsafe {
113		let level_4_table = level_4_table_ptr.as_mut().unwrap();
114		OffsetPageTable::new(level_4_table, x86_64::addr::VirtAddr::new(0x0))
115	}
116}
117
118/// Translate a virtual memory address to a physical one.
119pub fn virtual_to_physical(virtual_address: VirtAddr) -> Option<PhysAddr> {
120	let addr = x86_64::VirtAddr::from(virtual_address);
121
122	let translate_result = unsafe { identity_mapped_page_table() }.translate(addr);
123
124	match translate_result {
125		TranslateResult::NotMapped | TranslateResult::InvalidFrameAddress(_) => {
126			trace!("Unable to determine the physical address of 0x{virtual_address:X}");
127			None
128		}
129		TranslateResult::Mapped { frame, offset, .. } => {
130			Some(PhysAddr::new((frame.start_address() + offset).as_u64()))
131		}
132	}
133}
134
135/// Maps a continuous range of pages.
136///
137/// # Arguments
138///
139/// * `physical_address` - First physical address to map these pages to
140/// * `flags` - Flags from PageTableEntryFlags to set for the page table entry (e.g. WRITABLE or NO_EXECUTE).
141///   The PRESENT flags is set automatically.
142pub fn map<S>(
143	virtual_address: VirtAddr,
144	physical_address: PhysAddr,
145	count: usize,
146	flags: PageTableEntryFlags,
147) where
148	S: PageSize + fmt::Debug,
149	for<'a> OffsetPageTable<'a>: Mapper<S>,
150{
151	let pages = {
152		let start = Page::<S>::containing_address(virtual_address.into());
153		let end = start + count as u64;
154		Page::range(start, end)
155	};
156
157	let frames = {
158		let start = PhysFrame::<S>::containing_address(physical_address.into());
159		let end = start + count as u64;
160		PhysFrame::range(start, end)
161	};
162
163	let flags = flags | PageTableEntryFlags::PRESENT;
164
165	trace!("Mapping {pages:?} to {frames:?} with {flags:?}");
166
167	unsafe fn map_pages<M, S>(
168		mapper: &mut M,
169		pages: PageRange<S>,
170		frames: PhysFrameRange<S>,
171		flags: PageTableEntryFlags,
172	) -> bool
173	where
174		M: Mapper<S>,
175		S: PageSize + fmt::Debug,
176	{
177		let mut unmapped = false;
178		for (page, frame) in pages.zip(frames) {
179			// TODO: Require explicit unmaps
180			let unmap = mapper.unmap(page);
181			if let Ok((_frame, flush)) = unmap {
182				unmapped = true;
183				flush.flush();
184				debug!("Had to unmap page {page:?} before mapping.");
185			}
186			let map = unsafe { mapper.map_to(page, frame, flags, &mut FrameAlloc) };
187			match map {
188				Ok(mapper_flush) => mapper_flush.flush(),
189				Err(err) => panic!("Could not map {page:?} to {frame:?}: {err:?}"),
190			}
191		}
192		unmapped
193	}
194
195	let unmapped = unsafe { map_pages(&mut identity_mapped_page_table(), pages, frames, flags) };
196
197	if unmapped {
198		#[cfg(feature = "smp")]
199		crate::arch::x86_64::kernel::apic::ipi_tlb_flush();
200	}
201}
202
203/// Maps `count` pages at address `virt_addr`. If the allocation of a physical memory failed,
204/// the number of successful mapped pages are returned as error value.
205pub fn map_heap<S>(virt_addr: VirtAddr, count: usize) -> Result<(), usize>
206where
207	S: PageSize + fmt::Debug,
208	for<'a> OffsetPageTable<'a>: Mapper<S>,
209{
210	let flags = {
211		let mut flags = PageTableEntryFlags::empty();
212		flags.normal().writable().execute_disable();
213		flags
214	};
215
216	let virt_addrs = (0..count).map(|n| virt_addr + n as u64 * S::SIZE);
217
218	for (map_counter, virt_addr) in virt_addrs.enumerate() {
219		let layout = PageLayout::from_size_align(S::SIZE as usize, S::SIZE as usize).unwrap();
220		let frame_range = FrameAlloc::allocate(layout).map_err(|_| map_counter)?;
221		let phys_addr = PhysAddr::from(frame_range.start());
222		map::<S>(virt_addr, phys_addr, 1, flags);
223	}
224
225	Ok(())
226}
227
228pub fn identity_map<S>(phys_addr: PhysAddr)
229where
230	S: PageSize + fmt::Debug,
231	for<'a> OffsetPageTable<'a>: Mapper<S>,
232{
233	let frame = PhysFrame::<S>::from_start_address(phys_addr.into()).unwrap();
234	let flags = PageTableEntryFlags::PRESENT
235		| PageTableEntryFlags::WRITABLE
236		| PageTableEntryFlags::NO_EXECUTE;
237	let mapper_result =
238		unsafe { identity_mapped_page_table().identity_map(frame, flags, &mut FrameAlloc) };
239
240	match mapper_result {
241		Ok(mapper_flush) => mapper_flush.flush(),
242		Err(MapToError::PageAlreadyMapped(current_frame)) => assert_eq!(current_frame, frame),
243		Err(MapToError::ParentEntryHugePage) => {
244			let page_table = unsafe { identity_mapped_page_table() };
245			let virt_addr = VirtAddr::new(frame.start_address().as_u64()).into();
246			let phys_addr = frame.start_address();
247			assert_eq!(page_table.translate_addr(virt_addr), Some(phys_addr));
248		}
249		Err(err) => panic!("could not identity-map {frame:?}: {err:?}"),
250	}
251}
252
253pub fn unmap<S>(virtual_address: VirtAddr, count: usize)
254where
255	S: PageSize + fmt::Debug,
256	for<'a> OffsetPageTable<'a>: Mapper<S>,
257{
258	trace!("Unmapping virtual address {virtual_address:p} ({count} pages)");
259
260	let first_page = Page::<S>::containing_address(virtual_address.into());
261	let last_page = first_page + count as u64;
262	let range = Page::range(first_page, last_page);
263
264	for page in range {
265		let unmap_result = unsafe { identity_mapped_page_table() }.unmap(page);
266		match unmap_result {
267			Ok((_frame, flush)) => flush.flush(),
268			// FIXME: Some sentinel pages around stacks are supposed to be unmapped.
269			// We should handle this case there instead of here.
270			Err(UnmapError::PageNotMapped) => {
271				debug!("Tried to unmap {page:?}, which was not mapped.");
272			}
273			Err(err) => panic!("{err:?}"),
274		}
275	}
276}
277
278#[cfg(not(feature = "common-os"))]
279pub(crate) extern "x86-interrupt" fn page_fault_handler(
280	stack_frame: ExceptionStackFrame,
281	error_code: PageFaultErrorCode,
282) {
283	error!("Page fault (#PF)!");
284	error!("page_fault_linear_address = {:p}", Cr2::read().unwrap());
285	error!("error_code = {error_code:?}");
286	error!("fs = {:#X}", processor::readfs());
287	error!("gs = {:#X}", processor::readgs());
288	error!("stack_frame = {stack_frame:#?}");
289	scheduler::abort();
290}
291
292#[cfg(feature = "common-os")]
293pub(crate) extern "x86-interrupt" fn page_fault_handler(
294	mut stack_frame: ExceptionStackFrame,
295	error_code: PageFaultErrorCode,
296) {
297	unsafe {
298		if stack_frame.as_mut().read().code_segment != SegmentSelector(0x08) {
299			core::arch::asm!("swapgs", options(nostack));
300		}
301	}
302	error!("Page fault (#PF)!");
303	error!("page_fault_linear_address = {:p}", Cr2::read().unwrap());
304	error!("error_code = {error_code:?}");
305	error!("fs = {:#X}", processor::readfs());
306	error!("gs = {:#X}", processor::readgs());
307	error!("stack_frame = {stack_frame:#?}");
308	scheduler::abort();
309}
310
311pub fn init() {
312	unsafe {
313		log_page_tables();
314	}
315	make_p4_writable();
316}
317
318fn make_p4_writable() {
319	debug!("Making P4 table writable");
320
321	if !env::is_uefi() {
322		return;
323	}
324
325	let mut pt = unsafe { identity_mapped_page_table() };
326
327	let p4_page = {
328		let (p4_frame, _) = Cr3::read_raw();
329		let p4_addr = x86_64::VirtAddr::new(p4_frame.start_address().as_u64());
330		Page::<Size4KiB>::from_start_address(p4_addr).unwrap()
331	};
332
333	let TranslateResult::Mapped { frame, flags, .. } = pt.translate(p4_page.start_address()) else {
334		unreachable!()
335	};
336
337	let make_writable = || unsafe {
338		let flags = flags | PageTableEntryFlags::WRITABLE;
339		match frame {
340			MappedFrame::Size1GiB(_) => pt.set_flags_p3_entry(p4_page, flags).unwrap().ignore(),
341			MappedFrame::Size2MiB(_) => pt.set_flags_p2_entry(p4_page, flags).unwrap().ignore(),
342			MappedFrame::Size4KiB(_) => pt.update_flags(p4_page, flags).unwrap().ignore(),
343		}
344	};
345
346	unsafe fn without_protect<F, R>(f: F) -> R
347	where
348		F: FnOnce() -> R,
349	{
350		let cr0 = Cr0::read();
351		if cr0.contains(Cr0Flags::WRITE_PROTECT) {
352			unsafe { Cr0::write(cr0 - Cr0Flags::WRITE_PROTECT) }
353		}
354		let ret = f();
355		if cr0.contains(Cr0Flags::WRITE_PROTECT) {
356			unsafe { Cr0::write(cr0) }
357		}
358		ret
359	}
360
361	unsafe { without_protect(make_writable) }
362}
363
364pub unsafe fn log_page_tables() {
365	use log::Level;
366
367	use self::mapped_page_range_display::OffsetPageTableExt;
368
369	if !log_enabled!(Level::Trace) {
370		return;
371	}
372
373	let page_table = unsafe { identity_mapped_page_table() };
374	trace!("Page tables:\n{}", page_table.display());
375}
376
377pub mod mapped_page_range_display {
378	use core::fmt::{self, Write};
379
380	use x86_64::structures::paging::mapper::PageTableFrameMapping;
381	use x86_64::structures::paging::{MappedPageTable, OffsetPageTable, PageSize};
382
383	use super::mapped_page_table_iter::{
384		self, MappedPageRangeInclusive, MappedPageRangeInclusiveItem,
385		MappedPageTableRangeInclusiveIter,
386	};
387	use super::offset_page_table::PhysOffset;
388
389	#[expect(dead_code)]
390	pub trait MappedPageTableExt<P: PageTableFrameMapping + Clone> {
391		fn display(&self) -> MappedPageTableDisplay<'_, &P>;
392	}
393
394	impl<P: PageTableFrameMapping + Clone> MappedPageTableExt<P> for MappedPageTable<'_, P> {
395		fn display(&self) -> MappedPageTableDisplay<'_, &P> {
396			MappedPageTableDisplay {
397				inner: mapped_page_table_iter::mapped_page_table_range_iter(self),
398			}
399		}
400	}
401
402	pub trait OffsetPageTableExt {
403		fn display(&self) -> MappedPageTableDisplay<'_, PhysOffset>;
404	}
405
406	impl OffsetPageTableExt for OffsetPageTable<'_> {
407		fn display(&self) -> MappedPageTableDisplay<'_, PhysOffset> {
408			MappedPageTableDisplay {
409				inner: mapped_page_table_iter::offset_page_table_range_iter(self),
410			}
411		}
412	}
413
414	pub struct MappedPageTableDisplay<'a, P: PageTableFrameMapping + Clone> {
415		inner: MappedPageTableRangeInclusiveIter<'a, P>,
416	}
417
418	impl<P: PageTableFrameMapping + Clone> fmt::Display for MappedPageTableDisplay<'_, P> {
419		fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
420			let mut has_fields = false;
421
422			for mapped_page_range in self.inner.clone() {
423				if has_fields {
424					f.write_char('\n')?;
425				}
426				write!(f, "{}", mapped_page_range.display())?;
427
428				has_fields = true;
429			}
430
431			Ok(())
432		}
433	}
434
435	pub trait MappedPageRangeInclusiveItemExt {
436		fn display(&self) -> MappedPageRangeInclusiveItemDisplay<'_>;
437	}
438
439	impl MappedPageRangeInclusiveItemExt for MappedPageRangeInclusiveItem {
440		fn display(&self) -> MappedPageRangeInclusiveItemDisplay<'_> {
441			MappedPageRangeInclusiveItemDisplay { inner: self }
442		}
443	}
444
445	pub struct MappedPageRangeInclusiveItemDisplay<'a> {
446		inner: &'a MappedPageRangeInclusiveItem,
447	}
448
449	impl fmt::Display for MappedPageRangeInclusiveItemDisplay<'_> {
450		fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
451			match self.inner {
452				MappedPageRangeInclusiveItem::Size4KiB(range) => range.display().fmt(f),
453				MappedPageRangeInclusiveItem::Size2MiB(range) => range.display().fmt(f),
454				MappedPageRangeInclusiveItem::Size1GiB(range) => range.display().fmt(f),
455			}
456		}
457	}
458
459	pub trait MappedPageRangeInclusiveExt<S: PageSize> {
460		fn display(&self) -> MappedPageRangeInclusiveDisplay<'_, S>;
461	}
462
463	impl<S: PageSize> MappedPageRangeInclusiveExt<S> for MappedPageRangeInclusive<S> {
464		fn display(&self) -> MappedPageRangeInclusiveDisplay<'_, S> {
465			MappedPageRangeInclusiveDisplay { inner: self }
466		}
467	}
468
469	pub struct MappedPageRangeInclusiveDisplay<'a, S: PageSize> {
470		inner: &'a MappedPageRangeInclusive<S>,
471	}
472
473	impl<S: PageSize> fmt::Display for MappedPageRangeInclusiveDisplay<'_, S> {
474		fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
475			let size = S::DEBUG_STR;
476			let len = self.inner.page_range.len();
477			let page_start = self.inner.page_range.start.start_address();
478			let page_end = self.inner.page_range.end.start_address();
479			let frame_start = self.inner.frame_range.start.start_address();
480			let frame_end = self.inner.frame_range.end.start_address();
481			let flags = self.inner.flags;
482			let format_phys = if page_start.as_u64() == frame_start.as_u64() {
483				assert_eq!(page_end.as_u64(), frame_end.as_u64());
484				format_args!("{:>39}", "identity mapped")
485			} else {
486				format_args!("{frame_start:18p}..={frame_end:18p}")
487			};
488			write!(
489				f,
490				"size: {size}, len: {len:5}, virt: {page_start:18p}..={page_end:18p}, phys: {format_phys}, flags: {flags:?}"
491			)
492		}
493	}
494}
495
496pub mod mapped_page_table_iter {
497	//! TODO: try to upstream this to [`x86_64`].
498
499	use core::fmt;
500	use core::ops::{Add, AddAssign, Sub, SubAssign};
501
502	use x86_64::structures::paging::frame::PhysFrameRangeInclusive;
503	use x86_64::structures::paging::mapper::PageTableFrameMapping;
504	use x86_64::structures::paging::page::{AddressNotAligned, PageRangeInclusive};
505	use x86_64::structures::paging::{
506		MappedPageTable, OffsetPageTable, Page, PageSize, PageTable, PageTableFlags,
507		PageTableIndex, PhysFrame, Size1GiB, Size2MiB, Size4KiB,
508	};
509
510	use super::offset_page_table::PhysOffset;
511	use super::walker::{PageTableWalkError, PageTableWalker};
512
513	#[derive(Debug)]
514	pub struct MappedPageRangeInclusive<S: PageSize> {
515		pub page_range: PageRangeInclusive<S>,
516		pub frame_range: PhysFrameRangeInclusive<S>,
517		pub flags: PageTableFlags,
518	}
519
520	impl<S: PageSize> TryFrom<(MappedPage<S>, MappedPage<S>)> for MappedPageRangeInclusive<S> {
521		type Error = TryFromMappedPageError;
522
523		fn try_from((start, end): (MappedPage<S>, MappedPage<S>)) -> Result<Self, Self::Error> {
524			if start.flags != end.flags {
525				return Err(TryFromMappedPageError);
526			}
527
528			Ok(Self {
529				page_range: PageRangeInclusive {
530					start: start.page,
531					end: end.page,
532				},
533				frame_range: PhysFrameRangeInclusive {
534					start: start.frame,
535					end: end.frame,
536				},
537				flags: start.flags,
538			})
539		}
540	}
541
542	#[derive(Debug)]
543	pub enum MappedPageRangeInclusiveItem {
544		Size4KiB(MappedPageRangeInclusive<Size4KiB>),
545		Size2MiB(MappedPageRangeInclusive<Size2MiB>),
546		Size1GiB(MappedPageRangeInclusive<Size1GiB>),
547	}
548
549	impl TryFrom<(MappedPageItem, MappedPageItem)> for MappedPageRangeInclusiveItem {
550		type Error = TryFromMappedPageError;
551
552		fn try_from((start, end): (MappedPageItem, MappedPageItem)) -> Result<Self, Self::Error> {
553			match (start, end) {
554				(MappedPageItem::Size4KiB(start), MappedPageItem::Size4KiB(end)) => {
555					let range = MappedPageRangeInclusive::try_from((start, end))?;
556					Ok(Self::Size4KiB(range))
557				}
558				(MappedPageItem::Size2MiB(start), MappedPageItem::Size2MiB(end)) => {
559					let range = MappedPageRangeInclusive::try_from((start, end))?;
560					Ok(Self::Size2MiB(range))
561				}
562				(MappedPageItem::Size1GiB(start), MappedPageItem::Size1GiB(end)) => {
563					let range = MappedPageRangeInclusive::try_from((start, end))?;
564					Ok(Self::Size1GiB(range))
565				}
566				(_, _) => Err(TryFromMappedPageError),
567			}
568		}
569	}
570
571	#[derive(PartialEq, Eq, Clone, Debug)]
572	pub struct TryFromMappedPageError;
573
574	impl fmt::Display for TryFromMappedPageError {
575		fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
576			f.write_str("provided mapped pages were not compatible")
577		}
578	}
579
580	#[derive(Clone)]
581	pub struct MappedPageTableRangeInclusiveIter<'a, P: PageTableFrameMapping> {
582		inner: MappedPageTableIter<'a, P>,
583		start: Option<MappedPageItem>,
584		end: Option<MappedPageItem>,
585	}
586
587	pub fn mapped_page_table_range_iter<'a, P: PageTableFrameMapping>(
588		page_table: &'a MappedPageTable<'a, P>,
589	) -> MappedPageTableRangeInclusiveIter<'a, &'a P> {
590		MappedPageTableRangeInclusiveIter {
591			inner: mapped_page_table_iter(page_table),
592			start: None,
593			end: None,
594		}
595	}
596
597	pub fn offset_page_table_range_iter<'a>(
598		page_table: &'a OffsetPageTable<'a>,
599	) -> MappedPageTableRangeInclusiveIter<'a, PhysOffset> {
600		MappedPageTableRangeInclusiveIter {
601			inner: offset_page_table_iter(page_table),
602			start: None,
603			end: None,
604		}
605	}
606
607	impl<'a, P: PageTableFrameMapping> Iterator for MappedPageTableRangeInclusiveIter<'a, P> {
608		type Item = MappedPageRangeInclusiveItem;
609
610		fn next(&mut self) -> Option<Self::Item> {
611			if self.start.is_none() {
612				self.start = self.inner.next();
613				self.end = self.start;
614			}
615
616			let Some(start) = &mut self.start else {
617				return None;
618			};
619			let end = self.end.as_mut().unwrap();
620
621			for mapped_page in self.inner.by_ref() {
622				if mapped_page == *end + 1 {
623					*end = mapped_page;
624					continue;
625				}
626
627				let range = MappedPageRangeInclusiveItem::try_from((*start, *end)).unwrap();
628				*start = mapped_page;
629				*end = mapped_page;
630				return Some(range);
631			}
632
633			let range = MappedPageRangeInclusiveItem::try_from((*start, *end)).unwrap();
634			self.start = None;
635			self.end = None;
636			Some(range)
637		}
638	}
639
640	#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug)]
641	pub struct MappedPage<S: PageSize> {
642		pub page: Page<S>,
643		pub frame: PhysFrame<S>,
644		pub flags: PageTableFlags,
645	}
646
647	impl<S: PageSize> Add<u64> for MappedPage<S> {
648		type Output = Self;
649
650		fn add(self, rhs: u64) -> Self::Output {
651			Self {
652				page: self.page + rhs,
653				frame: self.frame + rhs,
654				flags: self.flags,
655			}
656		}
657	}
658
659	impl<S: PageSize> Sub<u64> for MappedPage<S> {
660		type Output = Self;
661
662		fn sub(self, rhs: u64) -> Self::Output {
663			Self {
664				page: self.page - rhs,
665				frame: self.frame - rhs,
666				flags: self.flags,
667			}
668		}
669	}
670
671	#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug)]
672	pub enum MappedPageItem {
673		Size4KiB(MappedPage<Size4KiB>),
674		Size2MiB(MappedPage<Size2MiB>),
675		Size1GiB(MappedPage<Size1GiB>),
676	}
677
678	impl Add<u64> for MappedPageItem {
679		type Output = Self;
680
681		fn add(self, rhs: u64) -> Self::Output {
682			match self {
683				Self::Size4KiB(mapped_page) => Self::Size4KiB(mapped_page + rhs),
684				Self::Size2MiB(mapped_page) => Self::Size2MiB(mapped_page + rhs),
685				Self::Size1GiB(mapped_page) => Self::Size1GiB(mapped_page + rhs),
686			}
687		}
688	}
689
690	impl AddAssign<u64> for MappedPageItem {
691		fn add_assign(&mut self, rhs: u64) {
692			*self = *self + rhs;
693		}
694	}
695
696	impl Sub<u64> for MappedPageItem {
697		type Output = Self;
698
699		fn sub(self, rhs: u64) -> Self::Output {
700			match self {
701				Self::Size4KiB(mapped_page) => Self::Size4KiB(mapped_page - rhs),
702				Self::Size2MiB(mapped_page) => Self::Size2MiB(mapped_page - rhs),
703				Self::Size1GiB(mapped_page) => Self::Size1GiB(mapped_page - rhs),
704			}
705		}
706	}
707
708	impl SubAssign<u64> for MappedPageItem {
709		fn sub_assign(&mut self, rhs: u64) {
710			*self = *self - rhs;
711		}
712	}
713
714	#[derive(Clone)]
715	pub struct MappedPageTableIter<'a, P: PageTableFrameMapping> {
716		page_table_walker: PageTableWalker<P>,
717		level_4_table: &'a PageTable,
718		p4_index: u16,
719		p3_index: u16,
720		p2_index: u16,
721		p1_index: u16,
722	}
723
724	pub fn mapped_page_table_iter<'a, P: PageTableFrameMapping>(
725		page_table: &'a MappedPageTable<'a, P>,
726	) -> MappedPageTableIter<'a, &'a P> {
727		MappedPageTableIter {
728			page_table_walker: unsafe {
729				PageTableWalker::new(page_table.page_table_frame_mapping())
730			},
731			level_4_table: page_table.level_4_table(),
732			p4_index: 0,
733			p3_index: 0,
734			p2_index: 0,
735			p1_index: 0,
736		}
737	}
738
739	pub fn offset_page_table_iter<'a>(
740		page_table: &'a OffsetPageTable<'a>,
741	) -> MappedPageTableIter<'a, PhysOffset> {
742		MappedPageTableIter {
743			page_table_walker: unsafe {
744				PageTableWalker::new(PhysOffset {
745					offset: page_table.phys_offset(),
746				})
747			},
748			level_4_table: page_table.level_4_table(),
749			p4_index: 0,
750			p3_index: 0,
751			p2_index: 0,
752			p1_index: 0,
753		}
754	}
755
756	impl<'a, P: PageTableFrameMapping> MappedPageTableIter<'a, P> {
757		fn p4_index(&self) -> Option<PageTableIndex> {
758			if self.p4_index >= 512 {
759				return None;
760			}
761
762			Some(PageTableIndex::new(self.p4_index))
763		}
764
765		fn p3_index(&self) -> Option<PageTableIndex> {
766			if self.p3_index >= 512 {
767				return None;
768			}
769
770			Some(PageTableIndex::new(self.p3_index))
771		}
772
773		fn p2_index(&self) -> Option<PageTableIndex> {
774			if self.p2_index >= 512 {
775				return None;
776			}
777
778			Some(PageTableIndex::new(self.p2_index))
779		}
780
781		fn p1_index(&self) -> Option<PageTableIndex> {
782			if self.p1_index >= 512 {
783				return None;
784			}
785
786			Some(PageTableIndex::new(self.p1_index))
787		}
788
789		fn increment_p4_index(&mut self) -> Option<()> {
790			if self.p4_index >= 511 {
791				self.p4_index += 1;
792				return None;
793			}
794
795			self.p4_index += 1;
796			self.p3_index = 0;
797			self.p2_index = 0;
798			self.p1_index = 0;
799			Some(())
800		}
801
802		fn increment_p3_index(&mut self) -> Option<()> {
803			if self.p3_index == 511 {
804				self.increment_p4_index()?;
805				return None;
806			}
807
808			self.p3_index += 1;
809			self.p2_index = 0;
810			self.p1_index = 0;
811			Some(())
812		}
813
814		fn increment_p2_index(&mut self) -> Option<()> {
815			if self.p2_index == 511 {
816				self.increment_p3_index()?;
817				return None;
818			}
819
820			self.p2_index += 1;
821			self.p1_index = 0;
822			Some(())
823		}
824
825		fn increment_p1_index(&mut self) -> Option<()> {
826			if self.p1_index == 511 {
827				self.increment_p2_index()?;
828				return None;
829			}
830
831			self.p1_index += 1;
832			Some(())
833		}
834
835		fn next_forward(&mut self) -> Option<MappedPageItem> {
836			let p4 = self.level_4_table;
837
838			let p3 = loop {
839				match self.page_table_walker.next_table(&p4[self.p4_index()?]) {
840					Ok(page_table) => break page_table,
841					Err(PageTableWalkError::NotMapped) => self.increment_p4_index()?,
842					Err(PageTableWalkError::MappedToHugePage) => {
843						panic!("level 4 entry has huge page bit set")
844					}
845				}
846			};
847
848			let p2 = loop {
849				match self.page_table_walker.next_table(&p3[self.p3_index()?]) {
850					Ok(page_table) => break page_table,
851					Err(PageTableWalkError::NotMapped) => self.increment_p3_index()?,
852					Err(PageTableWalkError::MappedToHugePage) => {
853						let page =
854							Page::from_page_table_indices_1gib(self.p4_index()?, self.p3_index()?);
855						let entry = &p3[self.p3_index()?];
856						let frame = PhysFrame::containing_address(entry.addr());
857						let flags = entry.flags();
858						let mapped_page =
859							MappedPageItem::Size1GiB(MappedPage { page, frame, flags });
860
861						self.increment_p3_index();
862						return Some(mapped_page);
863					}
864				}
865			};
866
867			let p1 = loop {
868				match self.page_table_walker.next_table(&p2[self.p2_index()?]) {
869					Ok(page_table) => break page_table,
870					Err(PageTableWalkError::NotMapped) => self.increment_p2_index()?,
871					Err(PageTableWalkError::MappedToHugePage) => {
872						let page = Page::from_page_table_indices_2mib(
873							self.p4_index()?,
874							self.p3_index()?,
875							self.p2_index()?,
876						);
877						let entry = &p2[self.p2_index()?];
878						let frame = PhysFrame::containing_address(entry.addr());
879						let flags = entry.flags();
880						let mapped_page =
881							MappedPageItem::Size2MiB(MappedPage { page, frame, flags });
882
883						self.increment_p2_index();
884						return Some(mapped_page);
885					}
886				}
887			};
888
889			loop {
890				let p1_entry = &p1[self.p1_index()?];
891
892				if p1_entry.is_unused() {
893					self.increment_p1_index()?;
894					continue;
895				}
896
897				let frame = match PhysFrame::from_start_address(p1_entry.addr()) {
898					Ok(frame) => frame,
899					Err(AddressNotAligned) => {
900						warn!("Invalid frame address: {:p}", p1_entry.addr());
901						self.increment_p1_index()?;
902						continue;
903					}
904				};
905
906				let page = Page::from_page_table_indices(
907					self.p4_index()?,
908					self.p3_index()?,
909					self.p2_index()?,
910					self.p1_index()?,
911				);
912				let flags = p1_entry.flags();
913				let mapped_page = MappedPageItem::Size4KiB(MappedPage { page, frame, flags });
914
915				self.increment_p1_index();
916				return Some(mapped_page);
917			}
918		}
919	}
920
921	impl<'a, P: PageTableFrameMapping> Iterator for MappedPageTableIter<'a, P> {
922		type Item = MappedPageItem;
923
924		fn next(&mut self) -> Option<Self::Item> {
925			self.next_forward().or_else(|| self.next_forward())
926		}
927	}
928}
929
930mod walker {
931	//! Taken from [`x86_64`]
932
933	use x86_64::structures::paging::PageTable;
934	use x86_64::structures::paging::mapper::PageTableFrameMapping;
935	use x86_64::structures::paging::page_table::{FrameError, PageTableEntry};
936
937	#[derive(Clone, Debug)]
938	pub(super) struct PageTableWalker<P: PageTableFrameMapping> {
939		page_table_frame_mapping: P,
940	}
941
942	impl<P: PageTableFrameMapping> PageTableWalker<P> {
943		#[inline]
944		pub unsafe fn new(page_table_frame_mapping: P) -> Self {
945			Self {
946				page_table_frame_mapping,
947			}
948		}
949
950		/// Internal helper function to get a reference to the page table of the next level.
951		///
952		/// Returns `PageTableWalkError::NotMapped` if the entry is unused. Returns
953		/// `PageTableWalkError::MappedToHugePage` if the `HUGE_PAGE` flag is set
954		/// in the passed entry.
955		#[inline]
956		pub(super) fn next_table<'b>(
957			&self,
958			entry: &'b PageTableEntry,
959		) -> Result<&'b PageTable, PageTableWalkError> {
960			let page_table_ptr = self
961				.page_table_frame_mapping
962				.frame_to_pointer(entry.frame()?);
963			let page_table: &PageTable = unsafe { &*page_table_ptr };
964
965			Ok(page_table)
966		}
967	}
968
969	#[derive(Debug)]
970	pub(super) enum PageTableWalkError {
971		NotMapped,
972		MappedToHugePage,
973	}
974
975	impl From<FrameError> for PageTableWalkError {
976		#[inline]
977		fn from(err: FrameError) -> Self {
978			match err {
979				FrameError::HugeFrame => PageTableWalkError::MappedToHugePage,
980				FrameError::FrameNotPresent => PageTableWalkError::NotMapped,
981			}
982		}
983	}
984}
985
986mod offset_page_table {
987	//! Taken from [`x86_64`]
988
989	use x86_64::VirtAddr;
990	use x86_64::structures::paging::mapper::PageTableFrameMapping;
991	use x86_64::structures::paging::{PageTable, PhysFrame};
992
993	#[derive(Clone, Debug)]
994	pub struct PhysOffset {
995		pub offset: VirtAddr,
996	}
997
998	unsafe impl PageTableFrameMapping for PhysOffset {
999		fn frame_to_pointer(&self, frame: PhysFrame) -> *mut PageTable {
1000			let virt = self.offset + frame.start_address().as_u64();
1001			virt.as_mut_ptr()
1002		}
1003	}
1004}