1use core::fmt::Debug;
2use core::ptr;
3
4use free_list::PageLayout;
5use x86_64::registers::control::{Cr0, Cr0Flags, Cr2, Cr3};
6#[cfg(feature = "common-os")]
7use x86_64::registers::segmentation::SegmentSelector;
8pub use x86_64::structures::idt::InterruptStackFrame as ExceptionStackFrame;
9use x86_64::structures::idt::PageFaultErrorCode;
10pub use x86_64::structures::paging::PageTableFlags as PageTableEntryFlags;
11use x86_64::structures::paging::frame::PhysFrameRange;
12use x86_64::structures::paging::mapper::{MapToError, MappedFrame, TranslateResult, UnmapError};
13use x86_64::structures::paging::page::PageRange;
14use x86_64::structures::paging::{
15 FrameAllocator, Mapper, OffsetPageTable, Page, PageTable, PhysFrame, Size4KiB, Translate,
16};
17
18use crate::arch::x86_64::kernel::processor;
19use crate::arch::x86_64::mm::{PhysAddr, VirtAddr};
20use crate::mm::{FrameAlloc, PageRangeAllocator};
21use crate::{env, scheduler};
22
23unsafe impl FrameAllocator<Size4KiB> for FrameAlloc {
24 fn allocate_frame(&mut self) -> Option<PhysFrame<Size4KiB>> {
25 let size = usize::try_from(Size4KiB::SIZE).unwrap();
26 let layout = PageLayout::from_size(size).unwrap();
27
28 let range = FrameAlloc::allocate(layout).ok()?;
29
30 let phys_addr = PhysAddr::from(range.start());
31 Some(PhysFrame::from_start_address(phys_addr.into()).unwrap())
32 }
33}
34
35pub trait PageTableEntryFlagsExt {
36 fn device(&mut self) -> &mut Self;
37
38 fn normal(&mut self) -> &mut Self;
39
40 #[cfg(feature = "acpi")]
41 fn read_only(&mut self) -> &mut Self;
42
43 fn writable(&mut self) -> &mut Self;
44
45 fn execute_disable(&mut self) -> &mut Self;
46
47 #[cfg(feature = "common-os")]
48 fn execute_enable(&mut self) -> &mut Self;
49
50 #[cfg(feature = "common-os")]
51 fn user(&mut self) -> &mut Self;
52
53 #[expect(dead_code)]
54 #[cfg(feature = "common-os")]
55 fn kernel(&mut self) -> &mut Self;
56}
57
58impl PageTableEntryFlagsExt for PageTableEntryFlags {
59 fn device(&mut self) -> &mut Self {
60 self.insert(PageTableEntryFlags::NO_CACHE);
61 self
62 }
63
64 fn normal(&mut self) -> &mut Self {
65 self.remove(PageTableEntryFlags::NO_CACHE);
66 self
67 }
68
69 #[cfg(feature = "acpi")]
70 fn read_only(&mut self) -> &mut Self {
71 self.remove(PageTableEntryFlags::WRITABLE);
72 self
73 }
74
75 fn writable(&mut self) -> &mut Self {
76 self.insert(PageTableEntryFlags::WRITABLE);
77 self
78 }
79
80 fn execute_disable(&mut self) -> &mut Self {
81 self.insert(PageTableEntryFlags::NO_EXECUTE);
82 self
83 }
84
85 #[cfg(feature = "common-os")]
86 fn execute_enable(&mut self) -> &mut Self {
87 self.remove(PageTableEntryFlags::NO_EXECUTE);
88 self
89 }
90
91 #[cfg(feature = "common-os")]
92 fn user(&mut self) -> &mut Self {
93 self.insert(PageTableEntryFlags::USER_ACCESSIBLE);
94 self
95 }
96
97 #[cfg(feature = "common-os")]
98 fn kernel(&mut self) -> &mut Self {
99 self.remove(PageTableEntryFlags::USER_ACCESSIBLE);
100 self
101 }
102}
103
104pub use x86_64::structures::paging::{
105 PageSize, Size1GiB as HugePageSize, Size2MiB as LargePageSize, Size4KiB as BasePageSize,
106};
107
108pub unsafe fn identity_mapped_page_table() -> OffsetPageTable<'static> {
110 let level_4_table_addr = Cr3::read().0.start_address().as_u64();
111 let level_4_table_ptr =
112 ptr::with_exposed_provenance_mut::<PageTable>(level_4_table_addr.try_into().unwrap());
113 unsafe {
114 let level_4_table = level_4_table_ptr.as_mut().unwrap();
115 OffsetPageTable::new(level_4_table, x86_64::addr::VirtAddr::new(0x0))
116 }
117}
118
119pub fn virtual_to_physical(virtual_address: VirtAddr) -> Option<PhysAddr> {
121 let addr = x86_64::VirtAddr::from(virtual_address);
122
123 let translate_result = unsafe { identity_mapped_page_table() }.translate(addr);
124
125 match translate_result {
126 TranslateResult::NotMapped | TranslateResult::InvalidFrameAddress(_) => {
127 trace!("Unable to determine the physical address of 0x{virtual_address:X}");
128 None
129 }
130 TranslateResult::Mapped { frame, offset, .. } => {
131 Some(PhysAddr::new((frame.start_address() + offset).as_u64()))
132 }
133 }
134}
135
136pub fn map<S>(
144 virtual_address: VirtAddr,
145 physical_address: PhysAddr,
146 count: usize,
147 flags: PageTableEntryFlags,
148) where
149 S: PageSize + Debug,
150 for<'a> OffsetPageTable<'a>: Mapper<S>,
151{
152 let pages = {
153 let start = Page::<S>::containing_address(virtual_address.into());
154 let end = start + count as u64;
155 Page::range(start, end)
156 };
157
158 let frames = {
159 let start = PhysFrame::<S>::containing_address(physical_address.into());
160 let end = start + count as u64;
161 PhysFrame::range(start, end)
162 };
163
164 let flags = flags | PageTableEntryFlags::PRESENT;
165
166 trace!("Mapping {pages:?} to {frames:?} with {flags:?}");
167
168 unsafe fn map_pages<M, S>(
169 mapper: &mut M,
170 pages: PageRange<S>,
171 frames: PhysFrameRange<S>,
172 flags: PageTableEntryFlags,
173 ) -> bool
174 where
175 M: Mapper<S>,
176 S: PageSize + Debug,
177 {
178 let mut unmapped = false;
179 for (page, frame) in pages.zip(frames) {
180 let unmap = mapper.unmap(page);
182 if let Ok((_frame, flush)) = unmap {
183 unmapped = true;
184 flush.flush();
185 debug!("Had to unmap page {page:?} before mapping.");
186 }
187 let map = unsafe { mapper.map_to(page, frame, flags, &mut FrameAlloc) };
188 match map {
189 Ok(mapper_flush) => mapper_flush.flush(),
190 Err(err) => panic!("Could not map {page:?} to {frame:?}: {err:?}"),
191 }
192 }
193 unmapped
194 }
195
196 let unmapped = unsafe { map_pages(&mut identity_mapped_page_table(), pages, frames, flags) };
197
198 if unmapped {
199 #[cfg(feature = "smp")]
200 crate::arch::x86_64::kernel::apic::ipi_tlb_flush();
201 }
202}
203
204pub fn map_heap<S>(virt_addr: VirtAddr, count: usize) -> Result<(), usize>
207where
208 S: PageSize + Debug,
209 for<'a> OffsetPageTable<'a>: Mapper<S>,
210{
211 let flags = {
212 let mut flags = PageTableEntryFlags::empty();
213 flags.normal().writable().execute_disable();
214 flags
215 };
216
217 let virt_addrs = (0..count).map(|n| virt_addr + n as u64 * S::SIZE);
218
219 for (map_counter, virt_addr) in virt_addrs.enumerate() {
220 let layout = PageLayout::from_size_align(S::SIZE as usize, S::SIZE as usize).unwrap();
221 let frame_range = FrameAlloc::allocate(layout).map_err(|_| map_counter)?;
222 let phys_addr = PhysAddr::from(frame_range.start());
223 map::<S>(virt_addr, phys_addr, 1, flags);
224 }
225
226 Ok(())
227}
228
229pub fn identity_map<S>(phys_addr: PhysAddr)
230where
231 S: PageSize + Debug,
232 for<'a> OffsetPageTable<'a>: Mapper<S>,
233{
234 let frame = PhysFrame::<S>::from_start_address(phys_addr.into()).unwrap();
235 let flags = PageTableEntryFlags::PRESENT
236 | PageTableEntryFlags::WRITABLE
237 | PageTableEntryFlags::NO_EXECUTE;
238 let mapper_result =
239 unsafe { identity_mapped_page_table().identity_map(frame, flags, &mut FrameAlloc) };
240
241 match mapper_result {
242 Ok(mapper_flush) => mapper_flush.flush(),
243 Err(MapToError::PageAlreadyMapped(current_frame)) => assert_eq!(current_frame, frame),
244 Err(MapToError::ParentEntryHugePage) => {
245 let page_table = unsafe { identity_mapped_page_table() };
246 let virt_addr = VirtAddr::new(frame.start_address().as_u64()).into();
247 let phys_addr = frame.start_address();
248 assert_eq!(page_table.translate_addr(virt_addr), Some(phys_addr));
249 }
250 Err(err) => panic!("could not identity-map {frame:?}: {err:?}"),
251 }
252}
253
254pub fn unmap<S>(virtual_address: VirtAddr, count: usize)
255where
256 S: PageSize + Debug,
257 for<'a> OffsetPageTable<'a>: Mapper<S>,
258{
259 trace!("Unmapping virtual address {virtual_address:p} ({count} pages)");
260
261 let first_page = Page::<S>::containing_address(virtual_address.into());
262 let last_page = first_page + count as u64;
263 let range = Page::range(first_page, last_page);
264
265 for page in range {
266 let unmap_result = unsafe { identity_mapped_page_table() }.unmap(page);
267 match unmap_result {
268 Ok((_frame, flush)) => flush.flush(),
269 Err(UnmapError::PageNotMapped) => {
272 debug!("Tried to unmap {page:?}, which was not mapped.");
273 }
274 Err(err) => panic!("{err:?}"),
275 }
276 }
277}
278
279#[cfg(not(feature = "common-os"))]
280pub(crate) extern "x86-interrupt" fn page_fault_handler(
281 stack_frame: ExceptionStackFrame,
282 error_code: PageFaultErrorCode,
283) {
284 error!("Page fault (#PF)!");
285 error!("page_fault_linear_address = {:p}", Cr2::read().unwrap());
286 error!("error_code = {error_code:?}");
287 error!("fs = {:#X}", processor::readfs());
288 error!("gs = {:#X}", processor::readgs());
289 error!("stack_frame = {stack_frame:#?}");
290 scheduler::abort();
291}
292
293#[cfg(feature = "common-os")]
294pub(crate) extern "x86-interrupt" fn page_fault_handler(
295 mut stack_frame: ExceptionStackFrame,
296 error_code: PageFaultErrorCode,
297) {
298 unsafe {
299 if stack_frame.as_mut().read().code_segment != SegmentSelector(0x08) {
300 core::arch::asm!("swapgs", options(nostack));
301 }
302 }
303 error!("Page fault (#PF)!");
304 error!("page_fault_linear_address = {:p}", Cr2::read().unwrap());
305 error!("error_code = {error_code:?}");
306 error!("fs = {:#X}", processor::readfs());
307 error!("gs = {:#X}", processor::readgs());
308 error!("stack_frame = {stack_frame:#?}");
309 scheduler::abort();
310}
311
312pub fn init() {
313 unsafe {
314 log_page_tables();
315 }
316 make_p4_writable();
317}
318
319fn make_p4_writable() {
320 debug!("Making P4 table writable");
321
322 if !env::is_uefi() {
323 return;
324 }
325
326 let mut pt = unsafe { identity_mapped_page_table() };
327
328 let p4_page = {
329 let (p4_frame, _) = Cr3::read_raw();
330 let p4_addr = x86_64::VirtAddr::new(p4_frame.start_address().as_u64());
331 Page::<Size4KiB>::from_start_address(p4_addr).unwrap()
332 };
333
334 let TranslateResult::Mapped { frame, flags, .. } = pt.translate(p4_page.start_address()) else {
335 unreachable!()
336 };
337
338 let make_writable = || unsafe {
339 let flags = flags | PageTableEntryFlags::WRITABLE;
340 match frame {
341 MappedFrame::Size1GiB(_) => pt.set_flags_p3_entry(p4_page, flags).unwrap().ignore(),
342 MappedFrame::Size2MiB(_) => pt.set_flags_p2_entry(p4_page, flags).unwrap().ignore(),
343 MappedFrame::Size4KiB(_) => pt.update_flags(p4_page, flags).unwrap().ignore(),
344 }
345 };
346
347 unsafe fn without_protect<F, R>(f: F) -> R
348 where
349 F: FnOnce() -> R,
350 {
351 let cr0 = Cr0::read();
352 if cr0.contains(Cr0Flags::WRITE_PROTECT) {
353 unsafe { Cr0::write(cr0 - Cr0Flags::WRITE_PROTECT) }
354 }
355 let ret = f();
356 if cr0.contains(Cr0Flags::WRITE_PROTECT) {
357 unsafe { Cr0::write(cr0) }
358 }
359 ret
360 }
361
362 unsafe { without_protect(make_writable) }
363}
364
365pub unsafe fn log_page_tables() {
366 use log::Level;
367
368 use self::mapped_page_range_display::OffsetPageTableExt;
369
370 if !log_enabled!(Level::Trace) {
371 return;
372 }
373
374 let page_table = unsafe { identity_mapped_page_table() };
375 trace!("Page tables:\n{}", page_table.display());
376}
377
378pub mod mapped_page_range_display {
379 use core::fmt::{self, Write};
380
381 use x86_64::structures::paging::mapper::PageTableFrameMapping;
382 use x86_64::structures::paging::{MappedPageTable, OffsetPageTable, PageSize};
383
384 use super::mapped_page_table_iter::{
385 self, MappedPageRangeInclusive, MappedPageRangeInclusiveItem,
386 MappedPageTableRangeInclusiveIter,
387 };
388 use super::offset_page_table::PhysOffset;
389
390 #[expect(dead_code)]
391 pub trait MappedPageTableExt<P: PageTableFrameMapping + Clone> {
392 fn display(&self) -> MappedPageTableDisplay<'_, &P>;
393 }
394
395 impl<P: PageTableFrameMapping + Clone> MappedPageTableExt<P> for MappedPageTable<'_, P> {
396 fn display(&self) -> MappedPageTableDisplay<'_, &P> {
397 MappedPageTableDisplay {
398 inner: mapped_page_table_iter::mapped_page_table_range_iter(self),
399 }
400 }
401 }
402
403 pub trait OffsetPageTableExt {
404 fn display(&self) -> MappedPageTableDisplay<'_, PhysOffset>;
405 }
406
407 impl OffsetPageTableExt for OffsetPageTable<'_> {
408 fn display(&self) -> MappedPageTableDisplay<'_, PhysOffset> {
409 MappedPageTableDisplay {
410 inner: mapped_page_table_iter::offset_page_table_range_iter(self),
411 }
412 }
413 }
414
415 pub struct MappedPageTableDisplay<'a, P: PageTableFrameMapping + Clone> {
416 inner: MappedPageTableRangeInclusiveIter<'a, P>,
417 }
418
419 impl<P: PageTableFrameMapping + Clone> fmt::Display for MappedPageTableDisplay<'_, P> {
420 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
421 let mut has_fields = false;
422
423 for mapped_page_range in self.inner.clone() {
424 if has_fields {
425 f.write_char('\n')?;
426 }
427 write!(f, "{}", mapped_page_range.display())?;
428
429 has_fields = true;
430 }
431
432 Ok(())
433 }
434 }
435
436 pub trait MappedPageRangeInclusiveItemExt {
437 fn display(&self) -> MappedPageRangeInclusiveItemDisplay<'_>;
438 }
439
440 impl MappedPageRangeInclusiveItemExt for MappedPageRangeInclusiveItem {
441 fn display(&self) -> MappedPageRangeInclusiveItemDisplay<'_> {
442 MappedPageRangeInclusiveItemDisplay { inner: self }
443 }
444 }
445
446 pub struct MappedPageRangeInclusiveItemDisplay<'a> {
447 inner: &'a MappedPageRangeInclusiveItem,
448 }
449
450 impl fmt::Display for MappedPageRangeInclusiveItemDisplay<'_> {
451 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
452 match self.inner {
453 MappedPageRangeInclusiveItem::Size4KiB(range) => range.display().fmt(f),
454 MappedPageRangeInclusiveItem::Size2MiB(range) => range.display().fmt(f),
455 MappedPageRangeInclusiveItem::Size1GiB(range) => range.display().fmt(f),
456 }
457 }
458 }
459
460 pub trait MappedPageRangeInclusiveExt<S: PageSize> {
461 fn display(&self) -> MappedPageRangeInclusiveDisplay<'_, S>;
462 }
463
464 impl<S: PageSize> MappedPageRangeInclusiveExt<S> for MappedPageRangeInclusive<S> {
465 fn display(&self) -> MappedPageRangeInclusiveDisplay<'_, S> {
466 MappedPageRangeInclusiveDisplay { inner: self }
467 }
468 }
469
470 pub struct MappedPageRangeInclusiveDisplay<'a, S: PageSize> {
471 inner: &'a MappedPageRangeInclusive<S>,
472 }
473
474 impl<S: PageSize> fmt::Display for MappedPageRangeInclusiveDisplay<'_, S> {
475 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
476 let size = S::DEBUG_STR;
477 let len = self.inner.page_range.len();
478 let page_start = self.inner.page_range.start.start_address();
479 let page_end = self.inner.page_range.end.start_address();
480 let frame_start = self.inner.frame_range.start.start_address();
481 let frame_end = self.inner.frame_range.end.start_address();
482 let flags = self.inner.flags;
483 let format_phys = if page_start.as_u64() == frame_start.as_u64() {
484 assert_eq!(page_end.as_u64(), frame_end.as_u64());
485 format_args!("{:>39}", "identity mapped")
486 } else {
487 format_args!("{frame_start:18p}..={frame_end:18p}")
488 };
489 write!(
490 f,
491 "size: {size}, len: {len:5}, virt: {page_start:18p}..={page_end:18p}, phys: {format_phys}, flags: {flags:?}"
492 )
493 }
494 }
495}
496
497pub mod mapped_page_table_iter {
498 use core::fmt;
501 use core::ops::{Add, AddAssign, Sub, SubAssign};
502
503 use x86_64::structures::paging::frame::PhysFrameRangeInclusive;
504 use x86_64::structures::paging::mapper::PageTableFrameMapping;
505 use x86_64::structures::paging::page::{AddressNotAligned, PageRangeInclusive};
506 use x86_64::structures::paging::{
507 MappedPageTable, OffsetPageTable, Page, PageSize, PageTable, PageTableFlags,
508 PageTableIndex, PhysFrame, Size1GiB, Size2MiB, Size4KiB,
509 };
510
511 use super::offset_page_table::PhysOffset;
512 use super::walker::{PageTableWalkError, PageTableWalker};
513
514 #[derive(Debug)]
515 pub struct MappedPageRangeInclusive<S: PageSize> {
516 pub page_range: PageRangeInclusive<S>,
517 pub frame_range: PhysFrameRangeInclusive<S>,
518 pub flags: PageTableFlags,
519 }
520
521 impl<S: PageSize> TryFrom<(MappedPage<S>, MappedPage<S>)> for MappedPageRangeInclusive<S> {
522 type Error = TryFromMappedPageError;
523
524 fn try_from((start, end): (MappedPage<S>, MappedPage<S>)) -> Result<Self, Self::Error> {
525 if start.flags != end.flags {
526 return Err(TryFromMappedPageError);
527 }
528
529 Ok(Self {
530 page_range: PageRangeInclusive {
531 start: start.page,
532 end: end.page,
533 },
534 frame_range: PhysFrameRangeInclusive {
535 start: start.frame,
536 end: end.frame,
537 },
538 flags: start.flags,
539 })
540 }
541 }
542
543 #[derive(Debug)]
544 pub enum MappedPageRangeInclusiveItem {
545 Size4KiB(MappedPageRangeInclusive<Size4KiB>),
546 Size2MiB(MappedPageRangeInclusive<Size2MiB>),
547 Size1GiB(MappedPageRangeInclusive<Size1GiB>),
548 }
549
550 impl TryFrom<(MappedPageItem, MappedPageItem)> for MappedPageRangeInclusiveItem {
551 type Error = TryFromMappedPageError;
552
553 fn try_from((start, end): (MappedPageItem, MappedPageItem)) -> Result<Self, Self::Error> {
554 match (start, end) {
555 (MappedPageItem::Size4KiB(start), MappedPageItem::Size4KiB(end)) => {
556 let range = MappedPageRangeInclusive::try_from((start, end))?;
557 Ok(Self::Size4KiB(range))
558 }
559 (MappedPageItem::Size2MiB(start), MappedPageItem::Size2MiB(end)) => {
560 let range = MappedPageRangeInclusive::try_from((start, end))?;
561 Ok(Self::Size2MiB(range))
562 }
563 (MappedPageItem::Size1GiB(start), MappedPageItem::Size1GiB(end)) => {
564 let range = MappedPageRangeInclusive::try_from((start, end))?;
565 Ok(Self::Size1GiB(range))
566 }
567 (_, _) => Err(TryFromMappedPageError),
568 }
569 }
570 }
571
572 #[derive(PartialEq, Eq, Clone, Debug)]
573 pub struct TryFromMappedPageError;
574
575 impl fmt::Display for TryFromMappedPageError {
576 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
577 f.write_str("provided mapped pages were not compatible")
578 }
579 }
580
581 #[derive(Clone)]
582 pub struct MappedPageTableRangeInclusiveIter<'a, P: PageTableFrameMapping> {
583 inner: MappedPageTableIter<'a, P>,
584 start: Option<MappedPageItem>,
585 end: Option<MappedPageItem>,
586 }
587
588 pub fn mapped_page_table_range_iter<'a, P: PageTableFrameMapping>(
589 page_table: &'a MappedPageTable<'a, P>,
590 ) -> MappedPageTableRangeInclusiveIter<'a, &'a P> {
591 MappedPageTableRangeInclusiveIter {
592 inner: mapped_page_table_iter(page_table),
593 start: None,
594 end: None,
595 }
596 }
597
598 pub fn offset_page_table_range_iter<'a>(
599 page_table: &'a OffsetPageTable<'a>,
600 ) -> MappedPageTableRangeInclusiveIter<'a, PhysOffset> {
601 MappedPageTableRangeInclusiveIter {
602 inner: offset_page_table_iter(page_table),
603 start: None,
604 end: None,
605 }
606 }
607
608 impl<'a, P: PageTableFrameMapping> Iterator for MappedPageTableRangeInclusiveIter<'a, P> {
609 type Item = MappedPageRangeInclusiveItem;
610
611 fn next(&mut self) -> Option<Self::Item> {
612 if self.start.is_none() {
613 self.start = self.inner.next();
614 self.end = self.start;
615 }
616
617 let Some(start) = &mut self.start else {
618 return None;
619 };
620 let end = self.end.as_mut().unwrap();
621
622 for mapped_page in self.inner.by_ref() {
623 if mapped_page == *end + 1 {
624 *end = mapped_page;
625 continue;
626 }
627
628 let range = MappedPageRangeInclusiveItem::try_from((*start, *end)).unwrap();
629 *start = mapped_page;
630 *end = mapped_page;
631 return Some(range);
632 }
633
634 let range = MappedPageRangeInclusiveItem::try_from((*start, *end)).unwrap();
635 self.start = None;
636 self.end = None;
637 Some(range)
638 }
639 }
640
641 #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug)]
642 pub struct MappedPage<S: PageSize> {
643 pub page: Page<S>,
644 pub frame: PhysFrame<S>,
645 pub flags: PageTableFlags,
646 }
647
648 impl<S: PageSize> Add<u64> for MappedPage<S> {
649 type Output = Self;
650
651 fn add(self, rhs: u64) -> Self::Output {
652 Self {
653 page: self.page + rhs,
654 frame: self.frame + rhs,
655 flags: self.flags,
656 }
657 }
658 }
659
660 impl<S: PageSize> Sub<u64> for MappedPage<S> {
661 type Output = Self;
662
663 fn sub(self, rhs: u64) -> Self::Output {
664 Self {
665 page: self.page - rhs,
666 frame: self.frame - rhs,
667 flags: self.flags,
668 }
669 }
670 }
671
672 #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Copy, Debug)]
673 pub enum MappedPageItem {
674 Size4KiB(MappedPage<Size4KiB>),
675 Size2MiB(MappedPage<Size2MiB>),
676 Size1GiB(MappedPage<Size1GiB>),
677 }
678
679 impl Add<u64> for MappedPageItem {
680 type Output = Self;
681
682 fn add(self, rhs: u64) -> Self::Output {
683 match self {
684 Self::Size4KiB(mapped_page) => Self::Size4KiB(mapped_page + rhs),
685 Self::Size2MiB(mapped_page) => Self::Size2MiB(mapped_page + rhs),
686 Self::Size1GiB(mapped_page) => Self::Size1GiB(mapped_page + rhs),
687 }
688 }
689 }
690
691 impl AddAssign<u64> for MappedPageItem {
692 fn add_assign(&mut self, rhs: u64) {
693 *self = *self + rhs;
694 }
695 }
696
697 impl Sub<u64> for MappedPageItem {
698 type Output = Self;
699
700 fn sub(self, rhs: u64) -> Self::Output {
701 match self {
702 Self::Size4KiB(mapped_page) => Self::Size4KiB(mapped_page - rhs),
703 Self::Size2MiB(mapped_page) => Self::Size2MiB(mapped_page - rhs),
704 Self::Size1GiB(mapped_page) => Self::Size1GiB(mapped_page - rhs),
705 }
706 }
707 }
708
709 impl SubAssign<u64> for MappedPageItem {
710 fn sub_assign(&mut self, rhs: u64) {
711 *self = *self - rhs;
712 }
713 }
714
715 #[derive(Clone)]
716 pub struct MappedPageTableIter<'a, P: PageTableFrameMapping> {
717 page_table_walker: PageTableWalker<P>,
718 level_4_table: &'a PageTable,
719 p4_index: u16,
720 p3_index: u16,
721 p2_index: u16,
722 p1_index: u16,
723 }
724
725 pub fn mapped_page_table_iter<'a, P: PageTableFrameMapping>(
726 page_table: &'a MappedPageTable<'a, P>,
727 ) -> MappedPageTableIter<'a, &'a P> {
728 MappedPageTableIter {
729 page_table_walker: unsafe {
730 PageTableWalker::new(page_table.page_table_frame_mapping())
731 },
732 level_4_table: page_table.level_4_table(),
733 p4_index: 0,
734 p3_index: 0,
735 p2_index: 0,
736 p1_index: 0,
737 }
738 }
739
740 pub fn offset_page_table_iter<'a>(
741 page_table: &'a OffsetPageTable<'a>,
742 ) -> MappedPageTableIter<'a, PhysOffset> {
743 MappedPageTableIter {
744 page_table_walker: unsafe {
745 PageTableWalker::new(PhysOffset {
746 offset: page_table.phys_offset(),
747 })
748 },
749 level_4_table: page_table.level_4_table(),
750 p4_index: 0,
751 p3_index: 0,
752 p2_index: 0,
753 p1_index: 0,
754 }
755 }
756
757 impl<'a, P: PageTableFrameMapping> MappedPageTableIter<'a, P> {
758 fn p4_index(&self) -> Option<PageTableIndex> {
759 if self.p4_index >= 512 {
760 return None;
761 }
762
763 Some(PageTableIndex::new(self.p4_index))
764 }
765
766 fn p3_index(&self) -> Option<PageTableIndex> {
767 if self.p3_index >= 512 {
768 return None;
769 }
770
771 Some(PageTableIndex::new(self.p3_index))
772 }
773
774 fn p2_index(&self) -> Option<PageTableIndex> {
775 if self.p2_index >= 512 {
776 return None;
777 }
778
779 Some(PageTableIndex::new(self.p2_index))
780 }
781
782 fn p1_index(&self) -> Option<PageTableIndex> {
783 if self.p1_index >= 512 {
784 return None;
785 }
786
787 Some(PageTableIndex::new(self.p1_index))
788 }
789
790 fn increment_p4_index(&mut self) -> Option<()> {
791 if self.p4_index >= 511 {
792 self.p4_index += 1;
793 return None;
794 }
795
796 self.p4_index += 1;
797 self.p3_index = 0;
798 self.p2_index = 0;
799 self.p1_index = 0;
800 Some(())
801 }
802
803 fn increment_p3_index(&mut self) -> Option<()> {
804 if self.p3_index == 511 {
805 self.increment_p4_index()?;
806 return None;
807 }
808
809 self.p3_index += 1;
810 self.p2_index = 0;
811 self.p1_index = 0;
812 Some(())
813 }
814
815 fn increment_p2_index(&mut self) -> Option<()> {
816 if self.p2_index == 511 {
817 self.increment_p3_index()?;
818 return None;
819 }
820
821 self.p2_index += 1;
822 self.p1_index = 0;
823 Some(())
824 }
825
826 fn increment_p1_index(&mut self) -> Option<()> {
827 if self.p1_index == 511 {
828 self.increment_p2_index()?;
829 return None;
830 }
831
832 self.p1_index += 1;
833 Some(())
834 }
835
836 fn next_forward(&mut self) -> Option<MappedPageItem> {
837 let p4 = self.level_4_table;
838
839 let p3 = loop {
840 match self.page_table_walker.next_table(&p4[self.p4_index()?]) {
841 Ok(page_table) => break page_table,
842 Err(PageTableWalkError::NotMapped) => self.increment_p4_index()?,
843 Err(PageTableWalkError::MappedToHugePage) => {
844 panic!("level 4 entry has huge page bit set")
845 }
846 }
847 };
848
849 let p2 = loop {
850 match self.page_table_walker.next_table(&p3[self.p3_index()?]) {
851 Ok(page_table) => break page_table,
852 Err(PageTableWalkError::NotMapped) => self.increment_p3_index()?,
853 Err(PageTableWalkError::MappedToHugePage) => {
854 let page =
855 Page::from_page_table_indices_1gib(self.p4_index()?, self.p3_index()?);
856 let entry = &p3[self.p3_index()?];
857 let frame = PhysFrame::containing_address(entry.addr());
858 let flags = entry.flags();
859 let mapped_page =
860 MappedPageItem::Size1GiB(MappedPage { page, frame, flags });
861
862 self.increment_p3_index();
863 return Some(mapped_page);
864 }
865 }
866 };
867
868 let p1 = loop {
869 match self.page_table_walker.next_table(&p2[self.p2_index()?]) {
870 Ok(page_table) => break page_table,
871 Err(PageTableWalkError::NotMapped) => self.increment_p2_index()?,
872 Err(PageTableWalkError::MappedToHugePage) => {
873 let page = Page::from_page_table_indices_2mib(
874 self.p4_index()?,
875 self.p3_index()?,
876 self.p2_index()?,
877 );
878 let entry = &p2[self.p2_index()?];
879 let frame = PhysFrame::containing_address(entry.addr());
880 let flags = entry.flags();
881 let mapped_page =
882 MappedPageItem::Size2MiB(MappedPage { page, frame, flags });
883
884 self.increment_p2_index();
885 return Some(mapped_page);
886 }
887 }
888 };
889
890 loop {
891 let p1_entry = &p1[self.p1_index()?];
892
893 if p1_entry.is_unused() {
894 self.increment_p1_index()?;
895 continue;
896 }
897
898 let frame = match PhysFrame::from_start_address(p1_entry.addr()) {
899 Ok(frame) => frame,
900 Err(AddressNotAligned) => {
901 warn!("Invalid frame address: {:p}", p1_entry.addr());
902 self.increment_p1_index()?;
903 continue;
904 }
905 };
906
907 let page = Page::from_page_table_indices(
908 self.p4_index()?,
909 self.p3_index()?,
910 self.p2_index()?,
911 self.p1_index()?,
912 );
913 let flags = p1_entry.flags();
914 let mapped_page = MappedPageItem::Size4KiB(MappedPage { page, frame, flags });
915
916 self.increment_p1_index();
917 return Some(mapped_page);
918 }
919 }
920 }
921
922 impl<'a, P: PageTableFrameMapping> Iterator for MappedPageTableIter<'a, P> {
923 type Item = MappedPageItem;
924
925 fn next(&mut self) -> Option<Self::Item> {
926 self.next_forward().or_else(|| self.next_forward())
927 }
928 }
929}
930
931mod walker {
932 use x86_64::structures::paging::PageTable;
935 use x86_64::structures::paging::mapper::PageTableFrameMapping;
936 use x86_64::structures::paging::page_table::{FrameError, PageTableEntry};
937
938 #[derive(Clone, Debug)]
939 pub(super) struct PageTableWalker<P: PageTableFrameMapping> {
940 page_table_frame_mapping: P,
941 }
942
943 impl<P: PageTableFrameMapping> PageTableWalker<P> {
944 #[inline]
945 pub unsafe fn new(page_table_frame_mapping: P) -> Self {
946 Self {
947 page_table_frame_mapping,
948 }
949 }
950
951 #[inline]
957 pub(super) fn next_table<'b>(
958 &self,
959 entry: &'b PageTableEntry,
960 ) -> Result<&'b PageTable, PageTableWalkError> {
961 let page_table_ptr = self
962 .page_table_frame_mapping
963 .frame_to_pointer(entry.frame()?);
964 let page_table: &PageTable = unsafe { &*page_table_ptr };
965
966 Ok(page_table)
967 }
968 }
969
970 #[derive(Debug)]
971 pub(super) enum PageTableWalkError {
972 NotMapped,
973 MappedToHugePage,
974 }
975
976 impl From<FrameError> for PageTableWalkError {
977 #[inline]
978 fn from(err: FrameError) -> Self {
979 match err {
980 FrameError::HugeFrame => PageTableWalkError::MappedToHugePage,
981 FrameError::FrameNotPresent => PageTableWalkError::NotMapped,
982 }
983 }
984 }
985}
986
987mod offset_page_table {
988 use x86_64::VirtAddr;
991 use x86_64::structures::paging::mapper::PageTableFrameMapping;
992 use x86_64::structures::paging::{PageTable, PhysFrame};
993
994 #[derive(Clone, Debug)]
995 pub struct PhysOffset {
996 pub offset: VirtAddr,
997 }
998
999 unsafe impl PageTableFrameMapping for PhysOffset {
1000 fn frame_to_pointer(&self, frame: PhysFrame) -> *mut PageTable {
1001 let virt = self.offset + frame.start_address().as_u64();
1002 virt.as_mut_ptr()
1003 }
1004 }
1005}