hermit/arch/x86_64/mm/
paging.rs

1use core::fmt::Debug;
2use core::ptr;
3
4use free_list::PageLayout;
5use x86_64::registers::control::{Cr0, Cr0Flags, Cr2, Cr3};
6#[cfg(feature = "common-os")]
7use x86_64::registers::segmentation::SegmentSelector;
8pub use x86_64::structures::idt::InterruptStackFrame as ExceptionStackFrame;
9use x86_64::structures::idt::PageFaultErrorCode;
10pub use x86_64::structures::paging::PageTableFlags as PageTableEntryFlags;
11use x86_64::structures::paging::frame::PhysFrameRange;
12use x86_64::structures::paging::mapper::{MapToError, MappedFrame, TranslateResult, UnmapError};
13use x86_64::structures::paging::page::PageRange;
14use x86_64::structures::paging::{
15	Mapper, OffsetPageTable, Page, PageTable, PhysFrame, RecursivePageTable, Size4KiB, Translate,
16};
17
18use crate::arch::x86_64::kernel::processor;
19use crate::arch::x86_64::mm::{PhysAddr, VirtAddr};
20use crate::mm::physicalmem;
21use crate::mm::physicalmem::PHYSICAL_FREE_LIST;
22use crate::{env, scheduler};
23
24pub trait PageTableEntryFlagsExt {
25	fn device(&mut self) -> &mut Self;
26
27	fn normal(&mut self) -> &mut Self;
28
29	#[cfg(feature = "acpi")]
30	fn read_only(&mut self) -> &mut Self;
31
32	fn writable(&mut self) -> &mut Self;
33
34	fn execute_disable(&mut self) -> &mut Self;
35
36	#[cfg(feature = "common-os")]
37	fn execute_enable(&mut self) -> &mut Self;
38
39	#[cfg(feature = "common-os")]
40	fn user(&mut self) -> &mut Self;
41
42	#[expect(dead_code)]
43	#[cfg(feature = "common-os")]
44	fn kernel(&mut self) -> &mut Self;
45}
46
47impl PageTableEntryFlagsExt for PageTableEntryFlags {
48	fn device(&mut self) -> &mut Self {
49		self.insert(PageTableEntryFlags::NO_CACHE);
50		self
51	}
52
53	fn normal(&mut self) -> &mut Self {
54		self.remove(PageTableEntryFlags::NO_CACHE);
55		self
56	}
57
58	#[cfg(feature = "acpi")]
59	fn read_only(&mut self) -> &mut Self {
60		self.remove(PageTableEntryFlags::WRITABLE);
61		self
62	}
63
64	fn writable(&mut self) -> &mut Self {
65		self.insert(PageTableEntryFlags::WRITABLE);
66		self
67	}
68
69	fn execute_disable(&mut self) -> &mut Self {
70		self.insert(PageTableEntryFlags::NO_EXECUTE);
71		self
72	}
73
74	#[cfg(feature = "common-os")]
75	fn execute_enable(&mut self) -> &mut Self {
76		self.remove(PageTableEntryFlags::NO_EXECUTE);
77		self
78	}
79
80	#[cfg(feature = "common-os")]
81	fn user(&mut self) -> &mut Self {
82		self.insert(PageTableEntryFlags::USER_ACCESSIBLE);
83		self
84	}
85
86	#[cfg(feature = "common-os")]
87	fn kernel(&mut self) -> &mut Self {
88		self.remove(PageTableEntryFlags::USER_ACCESSIBLE);
89		self
90	}
91}
92
93pub use x86_64::structures::paging::{
94	PageSize, Size1GiB as HugePageSize, Size2MiB as LargePageSize, Size4KiB as BasePageSize,
95};
96
97/// Returns a mapping of the physical memory where physical address is equal to the virtual address (no offset)
98pub unsafe fn identity_mapped_page_table() -> OffsetPageTable<'static> {
99	let level_4_table_addr = Cr3::read().0.start_address().as_u64();
100	let level_4_table_ptr =
101		ptr::with_exposed_provenance_mut::<PageTable>(level_4_table_addr.try_into().unwrap());
102	unsafe {
103		let level_4_table = level_4_table_ptr.as_mut().unwrap();
104		OffsetPageTable::new(level_4_table, x86_64::addr::VirtAddr::new(0x0))
105	}
106}
107
108/// Translate a virtual memory address to a physical one.
109pub fn virtual_to_physical(virtual_address: VirtAddr) -> Option<PhysAddr> {
110	let addr = x86_64::VirtAddr::from(virtual_address);
111
112	let translate_result = unsafe { identity_mapped_page_table() }.translate(addr);
113
114	match translate_result {
115		TranslateResult::NotMapped | TranslateResult::InvalidFrameAddress(_) => {
116			trace!("Uable to determine the physical address of 0x{virtual_address:X}");
117			None
118		}
119		TranslateResult::Mapped { frame, offset, .. } => {
120			Some(PhysAddr::new((frame.start_address() + offset).as_u64()))
121		}
122	}
123}
124
125/// Maps a continuous range of pages.
126///
127/// # Arguments
128///
129/// * `physical_address` - First physical address to map these pages to
130/// * `flags` - Flags from PageTableEntryFlags to set for the page table entry (e.g. WRITABLE or NO_EXECUTE).
131///   The PRESENT flags is set automatically.
132pub fn map<S>(
133	virtual_address: VirtAddr,
134	physical_address: PhysAddr,
135	count: usize,
136	flags: PageTableEntryFlags,
137) where
138	S: PageSize + Debug,
139	for<'a> RecursivePageTable<'a>: Mapper<S>,
140	for<'a> OffsetPageTable<'a>: Mapper<S>,
141{
142	let pages = {
143		let start = Page::<S>::containing_address(virtual_address.into());
144		let end = start + count as u64;
145		Page::range(start, end)
146	};
147
148	let frames = {
149		let start = PhysFrame::<S>::containing_address(physical_address.into());
150		let end = start + count as u64;
151		PhysFrame::range(start, end)
152	};
153
154	let flags = flags | PageTableEntryFlags::PRESENT;
155
156	trace!("Mapping {pages:?} to {frames:?} with {flags:?}");
157
158	unsafe fn map_pages<M, S>(
159		mapper: &mut M,
160		pages: PageRange<S>,
161		frames: PhysFrameRange<S>,
162		flags: PageTableEntryFlags,
163	) -> bool
164	where
165		M: Mapper<S>,
166		S: PageSize + Debug,
167	{
168		let mut frame_allocator = physicalmem::PHYSICAL_FREE_LIST.lock();
169		let mut unmapped = false;
170		for (page, frame) in pages.zip(frames) {
171			// TODO: Require explicit unmaps
172			let unmap = mapper.unmap(page);
173			if let Ok((_frame, flush)) = unmap {
174				unmapped = true;
175				flush.flush();
176				debug!("Had to unmap page {page:?} before mapping.");
177			}
178			let map = unsafe { mapper.map_to(page, frame, flags, &mut *frame_allocator) };
179			match map {
180				Ok(mapper_flush) => mapper_flush.flush(),
181				Err(err) => panic!("Could not map {page:?} to {frame:?}: {err:?}"),
182			}
183		}
184		unmapped
185	}
186
187	let unmapped = unsafe { map_pages(&mut identity_mapped_page_table(), pages, frames, flags) };
188
189	if unmapped {
190		#[cfg(feature = "smp")]
191		crate::arch::x86_64::kernel::apic::ipi_tlb_flush();
192	}
193}
194
195/// Maps `count` pages at address `virt_addr`. If the allocation of a physical memory failed,
196/// the number of successful mapped pages are returned as error value.
197pub fn map_heap<S>(virt_addr: VirtAddr, count: usize) -> Result<(), usize>
198where
199	S: PageSize + Debug,
200	for<'a> RecursivePageTable<'a>: Mapper<S>,
201	for<'a> OffsetPageTable<'a>: Mapper<S>,
202{
203	let flags = {
204		let mut flags = PageTableEntryFlags::empty();
205		flags.normal().writable().execute_disable();
206		flags
207	};
208
209	let virt_addrs = (0..count).map(|n| virt_addr + n as u64 * S::SIZE);
210
211	for (map_counter, virt_addr) in virt_addrs.enumerate() {
212		let layout = PageLayout::from_size_align(S::SIZE as usize, S::SIZE as usize).unwrap();
213		let frame_range = PHYSICAL_FREE_LIST
214			.lock()
215			.allocate(layout)
216			.map_err(|_| map_counter)?;
217		let phys_addr = PhysAddr::from(frame_range.start());
218		map::<S>(virt_addr, phys_addr, 1, flags);
219	}
220
221	Ok(())
222}
223
224pub fn identity_map<S>(phys_addr: PhysAddr)
225where
226	S: PageSize + Debug,
227	for<'a> RecursivePageTable<'a>: Mapper<S>,
228	for<'a> OffsetPageTable<'a>: Mapper<S>,
229{
230	let frame = PhysFrame::<S>::from_start_address(phys_addr.into()).unwrap();
231	let flags = PageTableEntryFlags::PRESENT
232		| PageTableEntryFlags::WRITABLE
233		| PageTableEntryFlags::NO_EXECUTE;
234	let mut frame_allocator = physicalmem::PHYSICAL_FREE_LIST.lock();
235	let mapper_result =
236		unsafe { identity_mapped_page_table().identity_map(frame, flags, &mut *frame_allocator) };
237
238	match mapper_result {
239		Ok(mapper_flush) => mapper_flush.flush(),
240		Err(MapToError::PageAlreadyMapped(current_frame)) => assert_eq!(current_frame, frame),
241		Err(MapToError::ParentEntryHugePage) => {
242			let page_table = unsafe { identity_mapped_page_table() };
243			let virt_addr = VirtAddr::new(frame.start_address().as_u64()).into();
244			let phys_addr = frame.start_address();
245			assert_eq!(page_table.translate_addr(virt_addr), Some(phys_addr));
246		}
247		Err(err) => panic!("could not identity-map {frame:?}: {err:?}"),
248	}
249}
250
251pub fn unmap<S>(virtual_address: VirtAddr, count: usize)
252where
253	S: PageSize + Debug,
254	for<'a> RecursivePageTable<'a>: Mapper<S>,
255	for<'a> OffsetPageTable<'a>: Mapper<S>,
256{
257	trace!("Unmapping virtual address {virtual_address:p} ({count} pages)");
258
259	let first_page = Page::<S>::containing_address(virtual_address.into());
260	let last_page = first_page + count as u64;
261	let range = Page::range(first_page, last_page);
262
263	for page in range {
264		let unmap_result = unsafe { identity_mapped_page_table() }.unmap(page);
265		match unmap_result {
266			Ok((_frame, flush)) => flush.flush(),
267			// FIXME: Some sentinel pages around stacks are supposed to be unmapped.
268			// We should handle this case there instead of here.
269			Err(UnmapError::PageNotMapped) => {
270				debug!("Tried to unmap {page:?}, which was not mapped.");
271			}
272			Err(err) => panic!("{err:?}"),
273		}
274	}
275}
276
277#[cfg(not(feature = "common-os"))]
278pub(crate) extern "x86-interrupt" fn page_fault_handler(
279	stack_frame: ExceptionStackFrame,
280	error_code: PageFaultErrorCode,
281) {
282	error!("Page fault (#PF)!");
283	error!("page_fault_linear_address = {:p}", Cr2::read().unwrap());
284	error!("error_code = {error_code:?}");
285	error!("fs = {:#X}", processor::readfs());
286	error!("gs = {:#X}", processor::readgs());
287	error!("stack_frame = {stack_frame:#?}");
288	scheduler::abort();
289}
290
291#[cfg(feature = "common-os")]
292pub(crate) extern "x86-interrupt" fn page_fault_handler(
293	mut stack_frame: ExceptionStackFrame,
294	error_code: PageFaultErrorCode,
295) {
296	unsafe {
297		if stack_frame.as_mut().read().code_segment != SegmentSelector(0x08) {
298			core::arch::asm!("swapgs", options(nostack));
299		}
300	}
301	error!("Page fault (#PF)!");
302	error!("page_fault_linear_address = {:p}", Cr2::read().unwrap());
303	error!("error_code = {error_code:?}");
304	error!("fs = {:#X}", processor::readfs());
305	error!("gs = {:#X}", processor::readgs());
306	error!("stack_frame = {stack_frame:#?}");
307	scheduler::abort();
308}
309
310pub fn init() {
311	unsafe {
312		log_page_tables();
313	}
314	make_p4_writable();
315}
316
317fn make_p4_writable() {
318	debug!("Making P4 table writable");
319
320	if !env::is_uefi() {
321		return;
322	}
323
324	let mut pt = unsafe { identity_mapped_page_table() };
325
326	let p4_page = {
327		let (p4_frame, _) = Cr3::read_raw();
328		let p4_addr = x86_64::VirtAddr::new(p4_frame.start_address().as_u64());
329		Page::<Size4KiB>::from_start_address(p4_addr).unwrap()
330	};
331
332	let TranslateResult::Mapped { frame, flags, .. } = pt.translate(p4_page.start_address()) else {
333		unreachable!()
334	};
335
336	let make_writable = || unsafe {
337		let flags = flags | PageTableEntryFlags::WRITABLE;
338		match frame {
339			MappedFrame::Size1GiB(_) => pt.set_flags_p3_entry(p4_page, flags).unwrap().ignore(),
340			MappedFrame::Size2MiB(_) => pt.set_flags_p2_entry(p4_page, flags).unwrap().ignore(),
341			MappedFrame::Size4KiB(_) => pt.update_flags(p4_page, flags).unwrap().ignore(),
342		}
343	};
344
345	unsafe fn without_protect<F, R>(f: F) -> R
346	where
347		F: FnOnce() -> R,
348	{
349		let cr0 = Cr0::read();
350		if cr0.contains(Cr0Flags::WRITE_PROTECT) {
351			unsafe { Cr0::write(cr0 - Cr0Flags::WRITE_PROTECT) }
352		}
353		let ret = f();
354		if cr0.contains(Cr0Flags::WRITE_PROTECT) {
355			unsafe { Cr0::write(cr0) }
356		}
357		ret
358	}
359
360	unsafe { without_protect(make_writable) }
361}
362
363pub fn init_page_tables() {}
364
365pub unsafe fn log_page_tables() {
366	use log::Level;
367
368	use self::mapped_page_range_display::OffsetPageTableExt;
369
370	if !log_enabled!(Level::Debug) {
371		return;
372	}
373
374	let page_table = unsafe { identity_mapped_page_table() };
375	debug!("Page tables:\n{}", page_table.display());
376}
377
378pub mod mapped_page_range_display {
379	use core::fmt::{self, Write};
380
381	use x86_64::structures::paging::mapper::PageTableFrameMapping;
382	use x86_64::structures::paging::{MappedPageTable, OffsetPageTable, PageSize};
383
384	use super::mapped_page_table_iter::{
385		self, MappedPageRangeInclusive, MappedPageRangeInclusiveItem,
386		MappedPageTableRangeInclusiveIter,
387	};
388	use super::offset_page_table::PhysOffset;
389
390	#[expect(dead_code)]
391	pub trait MappedPageTableExt<P: PageTableFrameMapping + Clone> {
392		fn display(&self) -> MappedPageTableDisplay<'_, &P>;
393	}
394
395	impl<P: PageTableFrameMapping + Clone> MappedPageTableExt<P> for MappedPageTable<'_, P> {
396		fn display(&self) -> MappedPageTableDisplay<'_, &P> {
397			MappedPageTableDisplay {
398				inner: mapped_page_table_iter::mapped_page_table_range_iter(self),
399			}
400		}
401	}
402
403	pub trait OffsetPageTableExt {
404		fn display(&self) -> MappedPageTableDisplay<'_, PhysOffset>;
405	}
406
407	impl OffsetPageTableExt for OffsetPageTable<'_> {
408		fn display(&self) -> MappedPageTableDisplay<'_, PhysOffset> {
409			MappedPageTableDisplay {
410				inner: mapped_page_table_iter::offset_page_table_range_iter(self),
411			}
412		}
413	}
414
415	pub struct MappedPageTableDisplay<'a, P: PageTableFrameMapping + Clone> {
416		inner: MappedPageTableRangeInclusiveIter<'a, P>,
417	}
418
419	impl<P: PageTableFrameMapping + Clone> fmt::Display for MappedPageTableDisplay<'_, P> {
420		fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
421			let mut has_fields = false;
422
423			for mapped_page_range in self.inner.clone() {
424				if has_fields {
425					f.write_char('\n')?;
426				}
427				write!(f, "{}", mapped_page_range.display())?;
428
429				has_fields = true;
430			}
431
432			Ok(())
433		}
434	}
435
436	pub trait MappedPageRangeInclusiveItemExt {
437		fn display(&self) -> MappedPageRangeInclusiveItemDisplay<'_>;
438	}
439
440	impl MappedPageRangeInclusiveItemExt for MappedPageRangeInclusiveItem {
441		fn display(&self) -> MappedPageRangeInclusiveItemDisplay<'_> {
442			MappedPageRangeInclusiveItemDisplay { inner: self }
443		}
444	}
445
446	pub struct MappedPageRangeInclusiveItemDisplay<'a> {
447		inner: &'a MappedPageRangeInclusiveItem,
448	}
449
450	impl fmt::Display for MappedPageRangeInclusiveItemDisplay<'_> {
451		fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
452			match self.inner {
453				MappedPageRangeInclusiveItem::Size4KiB(range) => range.display().fmt(f),
454				MappedPageRangeInclusiveItem::Size2MiB(range) => range.display().fmt(f),
455				MappedPageRangeInclusiveItem::Size1GiB(range) => range.display().fmt(f),
456			}
457		}
458	}
459
460	pub trait MappedPageRangeInclusiveExt<S: PageSize> {
461		fn display(&self) -> MappedPageRangeInclusiveDisplay<'_, S>;
462	}
463
464	impl<S: PageSize> MappedPageRangeInclusiveExt<S> for MappedPageRangeInclusive<S> {
465		fn display(&self) -> MappedPageRangeInclusiveDisplay<'_, S> {
466			MappedPageRangeInclusiveDisplay { inner: self }
467		}
468	}
469
470	pub struct MappedPageRangeInclusiveDisplay<'a, S: PageSize> {
471		inner: &'a MappedPageRangeInclusive<S>,
472	}
473
474	impl<S: PageSize> fmt::Display for MappedPageRangeInclusiveDisplay<'_, S> {
475		fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
476			let size = S::DEBUG_STR;
477			let len = self.inner.page_range.len();
478			let page_start = self.inner.page_range.start.start_address();
479			let page_end = self.inner.page_range.end.start_address();
480			let frame_start = self.inner.frame_range.start.start_address();
481			let frame_end = self.inner.frame_range.end.start_address();
482			let flags = self.inner.flags;
483			let format_phys = if page_start.as_u64() == frame_start.as_u64() {
484				assert_eq!(page_end.as_u64(), frame_end.as_u64());
485				format_args!("{:>39}", "identity mapped")
486			} else {
487				format_args!("{frame_start:18p}..={frame_end:18p}")
488			};
489			write!(
490				f,
491				"size: {size}, len: {len:5}, virt: {page_start:18p}..={page_end:18p}, phys: {format_phys}, flags: {flags:?}"
492			)
493		}
494	}
495}
496
497pub mod mapped_page_table_iter {
498	//! TODO: try to upstream this to [`x86_64`].
499
500	use core::fmt;
501	use core::ops::{Add, AddAssign, Sub, SubAssign};
502
503	use x86_64::structures::paging::frame::PhysFrameRangeInclusive;
504	use x86_64::structures::paging::mapper::PageTableFrameMapping;
505	use x86_64::structures::paging::page::{AddressNotAligned, PageRangeInclusive};
506	use x86_64::structures::paging::{
507		MappedPageTable, OffsetPageTable, Page, PageSize, PageTable, PageTableFlags,
508		PageTableIndex, PhysFrame, Size1GiB, Size2MiB, Size4KiB,
509	};
510
511	use super::offset_page_table::PhysOffset;
512	use super::walker::{PageTableWalkError, PageTableWalker};
513
514	#[derive(Debug)]
515	pub struct MappedPageRangeInclusive<S: PageSize> {
516		pub page_range: PageRangeInclusive<S>,
517		pub frame_range: PhysFrameRangeInclusive<S>,
518		pub flags: PageTableFlags,
519	}
520
521	impl<S: PageSize> TryFrom<(MappedPage<S>, MappedPage<S>)> for MappedPageRangeInclusive<S> {
522		type Error = TryFromMappedPageError;
523
524		fn try_from((start, end): (MappedPage<S>, MappedPage<S>)) -> Result<Self, Self::Error> {
525			if start.flags != end.flags {
526				return Err(TryFromMappedPageError);
527			}
528
529			Ok(Self {
530				page_range: PageRangeInclusive {
531					start: start.page,
532					end: end.page,
533				},
534				frame_range: PhysFrameRangeInclusive {
535					start: start.frame,
536					end: end.frame,
537				},
538				flags: start.flags,
539			})
540		}
541	}
542
543	#[derive(Debug)]
544	pub enum MappedPageRangeInclusiveItem {
545		Size4KiB(MappedPageRangeInclusive<Size4KiB>),
546		Size2MiB(MappedPageRangeInclusive<Size2MiB>),
547		Size1GiB(MappedPageRangeInclusive<Size1GiB>),
548	}
549
550	impl TryFrom<(MappedPageItem, MappedPageItem)> for MappedPageRangeInclusiveItem {
551		type Error = TryFromMappedPageError;
552
553		fn try_from((start, end): (MappedPageItem, MappedPageItem)) -> Result<Self, Self::Error> {
554			match (start, end) {
555				(MappedPageItem::Size4KiB(start), MappedPageItem::Size4KiB(end)) => {
556					let range = MappedPageRangeInclusive::try_from((start, end))?;
557					Ok(Self::Size4KiB(range))
558				}
559				(MappedPageItem::Size2MiB(start), MappedPageItem::Size2MiB(end)) => {
560					let range = MappedPageRangeInclusive::try_from((start, end))?;
561					Ok(Self::Size2MiB(range))
562				}
563				(MappedPageItem::Size1GiB(start), MappedPageItem::Size1GiB(end)) => {
564					let range = MappedPageRangeInclusive::try_from((start, end))?;
565					Ok(Self::Size1GiB(range))
566				}
567				(_, _) => Err(TryFromMappedPageError),
568			}
569		}
570	}
571
572	#[derive(PartialEq, Eq, Clone, Debug)]
573	pub struct TryFromMappedPageError;
574
575	impl fmt::Display for TryFromMappedPageError {
576		fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
577			f.write_str("provided mapped pages were not compatible")
578		}
579	}
580
581	#[derive(Clone)]
582	pub struct MappedPageTableRangeInclusiveIter<'a, P: PageTableFrameMapping> {
583		inner: MappedPageTableIter<'a, P>,
584		start: Option<MappedPageItem>,
585		end: Option<MappedPageItem>,
586	}
587
588	#[expect(dead_code)]
589	pub fn mapped_page_table_range_iter<'a, P: PageTableFrameMapping>(
590		page_table: &'a MappedPageTable<'a, P>,
591	) -> MappedPageTableRangeInclusiveIter<'a, &'a P> {
592		MappedPageTableRangeInclusiveIter {
593			inner: mapped_page_table_iter(page_table),
594			start: None,
595			end: None,
596		}
597	}
598
599	pub fn offset_page_table_range_iter<'a>(
600		page_table: &'a OffsetPageTable<'a>,
601	) -> MappedPageTableRangeInclusiveIter<'a, PhysOffset> {
602		MappedPageTableRangeInclusiveIter {
603			inner: offset_page_table_iter(page_table),
604			start: None,
605			end: None,
606		}
607	}
608
609	impl<'a, P: PageTableFrameMapping> Iterator for MappedPageTableRangeInclusiveIter<'a, P> {
610		type Item = MappedPageRangeInclusiveItem;
611
612		fn next(&mut self) -> Option<Self::Item> {
613			if self.start.is_none() {
614				self.start = self.inner.next();
615				self.end = self.start;
616			}
617
618			let Some(start) = &mut self.start else {
619				return None;
620			};
621			let end = self.end.as_mut().unwrap();
622
623			for mapped_page in self.inner.by_ref() {
624				if mapped_page == *end + 1 {
625					*end = mapped_page;
626					continue;
627				}
628
629				let range = MappedPageRangeInclusiveItem::try_from((*start, *end)).unwrap();
630				*start = mapped_page;
631				*end = mapped_page;
632				return Some(range);
633			}
634
635			let range = MappedPageRangeInclusiveItem::try_from((*start, *end)).unwrap();
636			self.start = None;
637			self.end = None;
638			Some(range)
639		}
640	}
641
642	#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug)]
643	pub struct MappedPage<S: PageSize> {
644		pub page: Page<S>,
645		pub frame: PhysFrame<S>,
646		pub flags: PageTableFlags,
647	}
648
649	impl<S: PageSize> Add<u64> for MappedPage<S> {
650		type Output = Self;
651
652		fn add(self, rhs: u64) -> Self::Output {
653			Self {
654				page: self.page + rhs,
655				frame: self.frame + rhs,
656				flags: self.flags,
657			}
658		}
659	}
660
661	impl<S: PageSize> Sub<u64> for MappedPage<S> {
662		type Output = Self;
663
664		fn sub(self, rhs: u64) -> Self::Output {
665			Self {
666				page: self.page - rhs,
667				frame: self.frame - rhs,
668				flags: self.flags,
669			}
670		}
671	}
672
673	#[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug)]
674	pub enum MappedPageItem {
675		Size4KiB(MappedPage<Size4KiB>),
676		Size2MiB(MappedPage<Size2MiB>),
677		Size1GiB(MappedPage<Size1GiB>),
678	}
679
680	impl Add<u64> for MappedPageItem {
681		type Output = Self;
682
683		fn add(self, rhs: u64) -> Self::Output {
684			match self {
685				Self::Size4KiB(mapped_page) => Self::Size4KiB(mapped_page + rhs),
686				Self::Size2MiB(mapped_page) => Self::Size2MiB(mapped_page + rhs),
687				Self::Size1GiB(mapped_page) => Self::Size1GiB(mapped_page + rhs),
688			}
689		}
690	}
691
692	impl AddAssign<u64> for MappedPageItem {
693		fn add_assign(&mut self, rhs: u64) {
694			*self = *self + rhs;
695		}
696	}
697
698	impl Sub<u64> for MappedPageItem {
699		type Output = Self;
700
701		fn sub(self, rhs: u64) -> Self::Output {
702			match self {
703				Self::Size4KiB(mapped_page) => Self::Size4KiB(mapped_page - rhs),
704				Self::Size2MiB(mapped_page) => Self::Size2MiB(mapped_page - rhs),
705				Self::Size1GiB(mapped_page) => Self::Size1GiB(mapped_page - rhs),
706			}
707		}
708	}
709
710	impl SubAssign<u64> for MappedPageItem {
711		fn sub_assign(&mut self, rhs: u64) {
712			*self = *self - rhs;
713		}
714	}
715
716	#[derive(Clone)]
717	pub struct MappedPageTableIter<'a, P: PageTableFrameMapping> {
718		page_table_walker: PageTableWalker<P>,
719		level_4_table: &'a PageTable,
720		p4_index: u16,
721		p3_index: u16,
722		p2_index: u16,
723		p1_index: u16,
724	}
725
726	pub fn mapped_page_table_iter<'a, P: PageTableFrameMapping>(
727		page_table: &'a MappedPageTable<'a, P>,
728	) -> MappedPageTableIter<'a, &'a P> {
729		MappedPageTableIter {
730			page_table_walker: unsafe {
731				PageTableWalker::new(page_table.page_table_frame_mapping())
732			},
733			level_4_table: page_table.level_4_table(),
734			p4_index: 0,
735			p3_index: 0,
736			p2_index: 0,
737			p1_index: 0,
738		}
739	}
740
741	pub fn offset_page_table_iter<'a>(
742		page_table: &'a OffsetPageTable<'a>,
743	) -> MappedPageTableIter<'a, PhysOffset> {
744		MappedPageTableIter {
745			page_table_walker: unsafe {
746				PageTableWalker::new(PhysOffset {
747					offset: page_table.phys_offset(),
748				})
749			},
750			level_4_table: page_table.level_4_table(),
751			p4_index: 0,
752			p3_index: 0,
753			p2_index: 0,
754			p1_index: 0,
755		}
756	}
757
758	impl<'a, P: PageTableFrameMapping> MappedPageTableIter<'a, P> {
759		fn p4_index(&self) -> Option<PageTableIndex> {
760			if self.p4_index >= 512 {
761				return None;
762			}
763
764			Some(PageTableIndex::new(self.p4_index))
765		}
766
767		fn p3_index(&self) -> Option<PageTableIndex> {
768			if self.p3_index >= 512 {
769				return None;
770			}
771
772			Some(PageTableIndex::new(self.p3_index))
773		}
774
775		fn p2_index(&self) -> Option<PageTableIndex> {
776			if self.p2_index >= 512 {
777				return None;
778			}
779
780			Some(PageTableIndex::new(self.p2_index))
781		}
782
783		fn p1_index(&self) -> Option<PageTableIndex> {
784			if self.p1_index >= 512 {
785				return None;
786			}
787
788			Some(PageTableIndex::new(self.p1_index))
789		}
790
791		fn increment_p4_index(&mut self) -> Option<()> {
792			if self.p4_index >= 511 {
793				self.p4_index += 1;
794				return None;
795			}
796
797			self.p4_index += 1;
798			self.p3_index = 0;
799			self.p2_index = 0;
800			self.p1_index = 0;
801			Some(())
802		}
803
804		fn increment_p3_index(&mut self) -> Option<()> {
805			if self.p3_index == 511 {
806				self.increment_p4_index()?;
807				return None;
808			}
809
810			self.p3_index += 1;
811			self.p2_index = 0;
812			self.p1_index = 0;
813			Some(())
814		}
815
816		fn increment_p2_index(&mut self) -> Option<()> {
817			if self.p2_index == 511 {
818				self.increment_p3_index()?;
819				return None;
820			}
821
822			self.p2_index += 1;
823			self.p1_index = 0;
824			Some(())
825		}
826
827		fn increment_p1_index(&mut self) -> Option<()> {
828			if self.p1_index == 511 {
829				self.increment_p2_index()?;
830				return None;
831			}
832
833			self.p1_index += 1;
834			Some(())
835		}
836
837		fn next_forward(&mut self) -> Option<MappedPageItem> {
838			let p4 = self.level_4_table;
839
840			let p3 = loop {
841				match self.page_table_walker.next_table(&p4[self.p4_index()?]) {
842					Ok(page_table) => break page_table,
843					Err(PageTableWalkError::NotMapped) => self.increment_p4_index()?,
844					Err(PageTableWalkError::MappedToHugePage) => {
845						panic!("level 4 entry has huge page bit set")
846					}
847				}
848			};
849
850			let p2 = loop {
851				match self.page_table_walker.next_table(&p3[self.p3_index()?]) {
852					Ok(page_table) => break page_table,
853					Err(PageTableWalkError::NotMapped) => self.increment_p3_index()?,
854					Err(PageTableWalkError::MappedToHugePage) => {
855						let page =
856							Page::from_page_table_indices_1gib(self.p4_index()?, self.p3_index()?);
857						let entry = &p3[self.p3_index()?];
858						let frame = PhysFrame::containing_address(entry.addr());
859						let flags = entry.flags();
860						let mapped_page =
861							MappedPageItem::Size1GiB(MappedPage { page, frame, flags });
862
863						self.increment_p3_index();
864						return Some(mapped_page);
865					}
866				}
867			};
868
869			let p1 = loop {
870				match self.page_table_walker.next_table(&p2[self.p2_index()?]) {
871					Ok(page_table) => break page_table,
872					Err(PageTableWalkError::NotMapped) => self.increment_p2_index()?,
873					Err(PageTableWalkError::MappedToHugePage) => {
874						let page = Page::from_page_table_indices_2mib(
875							self.p4_index()?,
876							self.p3_index()?,
877							self.p2_index()?,
878						);
879						let entry = &p2[self.p2_index()?];
880						let frame = PhysFrame::containing_address(entry.addr());
881						let flags = entry.flags();
882						let mapped_page =
883							MappedPageItem::Size2MiB(MappedPage { page, frame, flags });
884
885						self.increment_p2_index();
886						return Some(mapped_page);
887					}
888				}
889			};
890
891			loop {
892				let p1_entry = &p1[self.p1_index()?];
893
894				if p1_entry.is_unused() {
895					self.increment_p1_index()?;
896					continue;
897				}
898
899				let frame = match PhysFrame::from_start_address(p1_entry.addr()) {
900					Ok(frame) => frame,
901					Err(AddressNotAligned) => {
902						warn!("Invalid frame address: {:p}", p1_entry.addr());
903						self.increment_p1_index()?;
904						continue;
905					}
906				};
907
908				let page = Page::from_page_table_indices(
909					self.p4_index()?,
910					self.p3_index()?,
911					self.p2_index()?,
912					self.p1_index()?,
913				);
914				let flags = p1_entry.flags();
915				let mapped_page = MappedPageItem::Size4KiB(MappedPage { page, frame, flags });
916
917				self.increment_p1_index();
918				return Some(mapped_page);
919			}
920		}
921	}
922
923	impl<'a, P: PageTableFrameMapping> Iterator for MappedPageTableIter<'a, P> {
924		type Item = MappedPageItem;
925
926		fn next(&mut self) -> Option<Self::Item> {
927			self.next_forward().or_else(|| self.next_forward())
928		}
929	}
930}
931
932mod walker {
933	//! Taken from [`x86_64`]
934
935	use x86_64::structures::paging::PageTable;
936	use x86_64::structures::paging::mapper::PageTableFrameMapping;
937	use x86_64::structures::paging::page_table::{FrameError, PageTableEntry};
938
939	#[derive(Clone, Debug)]
940	pub(super) struct PageTableWalker<P: PageTableFrameMapping> {
941		page_table_frame_mapping: P,
942	}
943
944	impl<P: PageTableFrameMapping> PageTableWalker<P> {
945		#[inline]
946		pub unsafe fn new(page_table_frame_mapping: P) -> Self {
947			Self {
948				page_table_frame_mapping,
949			}
950		}
951
952		/// Internal helper function to get a reference to the page table of the next level.
953		///
954		/// Returns `PageTableWalkError::NotMapped` if the entry is unused. Returns
955		/// `PageTableWalkError::MappedToHugePage` if the `HUGE_PAGE` flag is set
956		/// in the passed entry.
957		#[inline]
958		pub(super) fn next_table<'b>(
959			&self,
960			entry: &'b PageTableEntry,
961		) -> Result<&'b PageTable, PageTableWalkError> {
962			let page_table_ptr = self
963				.page_table_frame_mapping
964				.frame_to_pointer(entry.frame()?);
965			let page_table: &PageTable = unsafe { &*page_table_ptr };
966
967			Ok(page_table)
968		}
969	}
970
971	#[derive(Debug)]
972	pub(super) enum PageTableWalkError {
973		NotMapped,
974		MappedToHugePage,
975	}
976
977	impl From<FrameError> for PageTableWalkError {
978		#[inline]
979		fn from(err: FrameError) -> Self {
980			match err {
981				FrameError::HugeFrame => PageTableWalkError::MappedToHugePage,
982				FrameError::FrameNotPresent => PageTableWalkError::NotMapped,
983			}
984		}
985	}
986}
987
988mod offset_page_table {
989	//! Taken from [`x86_64`]
990
991	use x86_64::VirtAddr;
992	use x86_64::structures::paging::mapper::PageTableFrameMapping;
993	use x86_64::structures::paging::{PageTable, PhysFrame};
994
995	#[derive(Clone, Debug)]
996	pub struct PhysOffset {
997		pub offset: VirtAddr,
998	}
999
1000	unsafe impl PageTableFrameMapping for PhysOffset {
1001		fn frame_to_pointer(&self, frame: PhysFrame) -> *mut PageTable {
1002			let virt = self.offset + frame.start_address().as_u64();
1003			virt.as_mut_ptr()
1004		}
1005	}
1006}