x86_64/structures/paging/
page_table.rs

1//! Abstractions for page tables and page table entries.
2
3use core::fmt;
4#[cfg(feature = "step_trait")]
5use core::iter::Step;
6use core::ops::{Index, IndexMut};
7#[cfg(feature = "memory_encryption")]
8use core::sync::atomic::{AtomicU64, Ordering};
9
10use super::{PageSize, PhysFrame, Size4KiB};
11use crate::addr::PhysAddr;
12
13use bitflags::bitflags;
14use dep_const_fn::const_fn;
15
16/// The error returned by the `PageTableEntry::frame` method.
17#[derive(Debug, Clone, Copy, PartialEq)]
18pub enum FrameError {
19    /// The entry does not have the `PRESENT` flag set, so it isn't currently mapped to a frame.
20    FrameNotPresent,
21    /// The entry does have the `HUGE_PAGE` flag set. The `frame` method has a standard 4KiB frame
22    /// as return type, so a huge frame can't be returned.
23    HugeFrame,
24}
25
26/// The mask used to remove flags from a page table entry to obtain the physical address
27#[cfg(feature = "memory_encryption")]
28pub(crate) static PHYSICAL_ADDRESS_MASK: AtomicU64 = AtomicU64::new(0x000f_ffff_ffff_f000u64);
29
30/// A 64-bit page table entry.
31#[derive(Clone)]
32#[repr(transparent)]
33pub struct PageTableEntry {
34    entry: u64,
35}
36
37impl PageTableEntry {
38    /// Creates an unused page table entry.
39    #[inline]
40    pub const fn new() -> Self {
41        PageTableEntry { entry: 0 }
42    }
43
44    /// Returns whether this entry is zero.
45    #[inline]
46    pub const fn is_unused(&self) -> bool {
47        self.entry == 0
48    }
49
50    /// Sets this entry to zero.
51    #[inline]
52    pub fn set_unused(&mut self) {
53        self.entry = 0;
54    }
55
56    /// Returns the flags of this entry.
57    #[inline]
58    #[const_fn(cfg(not(feature = "memory_encryption")))]
59    pub const fn flags(&self) -> PageTableFlags {
60        PageTableFlags::from_bits_retain(self.entry & !Self::physical_address_mask())
61    }
62
63    /// Returns the physical address mapped by this entry, might be zero.
64    #[inline]
65    pub fn addr(&self) -> PhysAddr {
66        PhysAddr::new(self.entry & Self::physical_address_mask())
67    }
68
69    /// Returns the physical frame mapped by this entry.
70    ///
71    /// Returns the following errors:
72    ///
73    /// - `FrameError::FrameNotPresent` if the entry doesn't have the `PRESENT` flag set.
74    /// - `FrameError::HugeFrame` if the entry has the `HUGE_PAGE` flag set (for huge pages the
75    ///   `addr` function must be used)
76    #[inline]
77    pub fn frame(&self) -> Result<PhysFrame, FrameError> {
78        if !self.flags().contains(PageTableFlags::PRESENT) {
79            Err(FrameError::FrameNotPresent)
80        } else if self.flags().contains(PageTableFlags::HUGE_PAGE) {
81            Err(FrameError::HugeFrame)
82        } else {
83            Ok(PhysFrame::containing_address(self.addr()))
84        }
85    }
86
87    /// Map the entry to the specified physical address with the specified flags.
88    #[inline]
89    pub fn set_addr(&mut self, addr: PhysAddr, flags: PageTableFlags) {
90        assert!(addr.is_aligned(Size4KiB::SIZE));
91        self.entry = (addr.as_u64()) | flags.bits();
92    }
93
94    /// Map the entry to the specified physical frame with the specified flags.
95    #[inline]
96    pub fn set_frame(&mut self, frame: PhysFrame, flags: PageTableFlags) {
97        assert!(!flags.contains(PageTableFlags::HUGE_PAGE));
98        self.set_addr(frame.start_address(), flags)
99    }
100
101    /// Sets the flags of this entry.
102    #[inline]
103    pub fn set_flags(&mut self, flags: PageTableFlags) {
104        self.entry = self.addr().as_u64() | flags.bits();
105    }
106
107    #[inline(always)]
108    #[cfg(not(feature = "memory_encryption"))]
109    const fn physical_address_mask() -> u64 {
110        0x000f_ffff_ffff_f000u64
111    }
112
113    #[inline(always)]
114    #[cfg(feature = "memory_encryption")]
115    fn physical_address_mask() -> u64 {
116        PHYSICAL_ADDRESS_MASK.load(Ordering::Relaxed)
117    }
118}
119
120impl Default for PageTableEntry {
121    #[inline]
122    fn default() -> Self {
123        Self::new()
124    }
125}
126
127impl fmt::Debug for PageTableEntry {
128    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
129        let mut f = f.debug_struct("PageTableEntry");
130        f.field("addr", &self.addr());
131        f.field("flags", &self.flags());
132        f.finish()
133    }
134}
135
136bitflags! {
137    /// Possible flags for a page table entry.
138    #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
139    pub struct PageTableFlags: u64 {
140        /// Specifies whether the mapped frame or page table is loaded in memory.
141        const PRESENT =         1;
142        /// Controls whether writes to the mapped frames are allowed.
143        ///
144        /// If this bit is unset in a level 1 page table entry, the mapped frame is read-only.
145        /// If this bit is unset in a higher level page table entry the complete range of mapped
146        /// pages is read-only.
147        const WRITABLE =        1 << 1;
148        /// Controls whether accesses from userspace (i.e. ring 3) are permitted.
149        const USER_ACCESSIBLE = 1 << 2;
150        /// If this bit is set, a “write-through” policy is used for the cache, else a “write-back”
151        /// policy is used.
152        const WRITE_THROUGH =   1 << 3;
153        /// Disables caching for the pointed entry is cacheable.
154        const NO_CACHE =        1 << 4;
155        /// Set by the CPU when the mapped frame or page table is accessed.
156        const ACCESSED =        1 << 5;
157        /// Set by the CPU on a write to the mapped frame.
158        const DIRTY =           1 << 6;
159        /// Specifies that the entry maps a huge frame instead of a page table. Only allowed in
160        /// P2 or P3 tables.
161        const HUGE_PAGE =       1 << 7;
162        /// Indicates that the mapping is present in all address spaces, so it isn't flushed from
163        /// the TLB on an address space switch.
164        const GLOBAL =          1 << 8;
165        /// Available to the OS, can be used to store additional data, e.g. custom flags.
166        const BIT_9 =           1 << 9;
167        /// Available to the OS, can be used to store additional data, e.g. custom flags.
168        const BIT_10 =          1 << 10;
169        /// Available to the OS, can be used to store additional data, e.g. custom flags.
170        const BIT_11 =          1 << 11;
171        /// Available to the OS, can be used to store additional data, e.g. custom flags.
172        const BIT_52 =          1 << 52;
173        /// Available to the OS, can be used to store additional data, e.g. custom flags.
174        const BIT_53 =          1 << 53;
175        /// Available to the OS, can be used to store additional data, e.g. custom flags.
176        const BIT_54 =          1 << 54;
177        /// Available to the OS, can be used to store additional data, e.g. custom flags.
178        const BIT_55 =          1 << 55;
179        /// Available to the OS, can be used to store additional data, e.g. custom flags.
180        const BIT_56 =          1 << 56;
181        /// Available to the OS, can be used to store additional data, e.g. custom flags.
182        const BIT_57 =          1 << 57;
183        /// Available to the OS, can be used to store additional data, e.g. custom flags.
184        const BIT_58 =          1 << 58;
185        /// Available to the OS, can be used to store additional data, e.g. custom flags.
186        const BIT_59 =          1 << 59;
187        /// Available to the OS, can be used to store additional data, e.g. custom flags.
188        const BIT_60 =          1 << 60;
189        /// Available to the OS, can be used to store additional data, e.g. custom flags.
190        const BIT_61 =          1 << 61;
191        /// Available to the OS, can be used to store additional data, e.g. custom flags.
192        const BIT_62 =          1 << 62;
193        /// Forbid code execution from the mapped frames.
194        ///
195        /// Can be only used when the no-execute page protection feature is enabled in the EFER
196        /// register.
197        const NO_EXECUTE =      1 << 63;
198    }
199}
200
201/// The number of entries in a page table.
202const ENTRY_COUNT: usize = 512;
203
204/// Represents a page table.
205///
206/// Always page-sized.
207///
208/// This struct implements the `Index` and `IndexMut` traits, so the entries can be accessed
209/// through index operations. For example, `page_table[15]` returns the 16th page table entry.
210///
211/// Note that while this type implements [`Clone`], the users must be careful not to introduce
212/// mutable aliasing by using the cloned page tables.
213#[repr(align(4096))]
214#[repr(C)]
215#[derive(Clone)]
216pub struct PageTable {
217    entries: [PageTableEntry; ENTRY_COUNT],
218}
219
220impl PageTable {
221    /// Creates an empty page table.
222    #[inline]
223    pub const fn new() -> Self {
224        const EMPTY: PageTableEntry = PageTableEntry::new();
225        PageTable {
226            entries: [EMPTY; ENTRY_COUNT],
227        }
228    }
229
230    /// Clears all entries.
231    #[inline]
232    pub fn zero(&mut self) {
233        for entry in self.iter_mut() {
234            entry.set_unused();
235        }
236    }
237
238    /// Returns an iterator over the entries of the page table.
239    #[inline]
240    pub fn iter(&self) -> impl Iterator<Item = &PageTableEntry> {
241        (0..512).map(move |i| &self.entries[i])
242    }
243
244    /// Returns an iterator that allows modifying the entries of the page table.
245    #[inline]
246    pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut PageTableEntry> {
247        // Note that we intentionally don't just return `self.entries.iter()`:
248        // Some users may choose to create a reference to a page table at
249        // `0xffff_ffff_ffff_f000`. This causes problems because calculating
250        // the end pointer of the page tables causes an overflow. Therefore
251        // creating page tables at that address is unsound and must be avoided.
252        // Unfortunately creating such page tables is quite common when
253        // recursive page tables are used, so we try to avoid calculating the
254        // end pointer if possible. `core::slice::Iter` calculates the end
255        // pointer to determine when it should stop yielding elements. Because
256        // we want to avoid calculating the end pointer, we don't use
257        // `core::slice::Iter`, we implement our own iterator that doesn't
258        // calculate the end pointer. This doesn't make creating page tables at
259        // that address sound, but it avoids some easy to trigger
260        // miscompilations.
261        let ptr = self.entries.as_mut_ptr();
262        (0..512).map(move |i| unsafe { &mut *ptr.add(i) })
263    }
264
265    /// Checks if the page table is empty (all entries are zero).
266    #[inline]
267    pub fn is_empty(&self) -> bool {
268        self.iter().all(|entry| entry.is_unused())
269    }
270}
271
272impl Index<usize> for PageTable {
273    type Output = PageTableEntry;
274
275    #[inline]
276    fn index(&self, index: usize) -> &Self::Output {
277        &self.entries[index]
278    }
279}
280
281impl IndexMut<usize> for PageTable {
282    #[inline]
283    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
284        &mut self.entries[index]
285    }
286}
287
288impl Index<PageTableIndex> for PageTable {
289    type Output = PageTableEntry;
290
291    #[inline]
292    fn index(&self, index: PageTableIndex) -> &Self::Output {
293        &self.entries[usize::from(index)]
294    }
295}
296
297impl IndexMut<PageTableIndex> for PageTable {
298    #[inline]
299    fn index_mut(&mut self, index: PageTableIndex) -> &mut Self::Output {
300        &mut self.entries[usize::from(index)]
301    }
302}
303
304impl Default for PageTable {
305    fn default() -> Self {
306        Self::new()
307    }
308}
309
310impl fmt::Debug for PageTable {
311    #[inline]
312    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
313        self.entries[..].fmt(f)
314    }
315}
316
317/// A 9-bit index into a page table.
318///
319/// Can be used to select one of the 512 entries of a page table.
320///
321/// Guaranteed to only ever contain 0..512.
322#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
323pub struct PageTableIndex(u16);
324
325impl PageTableIndex {
326    /// Creates a new index from the given `u16`. Panics if the given value is >=512.
327    #[inline]
328    pub const fn new(index: u16) -> Self {
329        assert!((index as usize) < ENTRY_COUNT);
330        Self(index)
331    }
332
333    /// Creates a new index from the given `u16`. Throws away bits if the value is >=512.
334    #[inline]
335    pub const fn new_truncate(index: u16) -> Self {
336        Self(index % ENTRY_COUNT as u16)
337    }
338
339    #[inline]
340    pub(crate) const fn into_u64(self) -> u64 {
341        self.0 as u64
342    }
343}
344
345impl From<PageTableIndex> for u16 {
346    #[inline]
347    fn from(index: PageTableIndex) -> Self {
348        index.0
349    }
350}
351
352impl From<PageTableIndex> for u32 {
353    #[inline]
354    fn from(index: PageTableIndex) -> Self {
355        u32::from(index.0)
356    }
357}
358
359impl From<PageTableIndex> for u64 {
360    #[inline]
361    fn from(index: PageTableIndex) -> Self {
362        index.into_u64()
363    }
364}
365
366impl From<PageTableIndex> for usize {
367    #[inline]
368    fn from(index: PageTableIndex) -> Self {
369        usize::from(index.0)
370    }
371}
372
373#[cfg(feature = "step_trait")]
374impl Step for PageTableIndex {
375    #[inline]
376    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
377        Step::steps_between(&start.0, &end.0)
378    }
379
380    #[inline]
381    fn forward_checked(start: Self, count: usize) -> Option<Self> {
382        let idx = usize::from(start).checked_add(count)?;
383        (idx < ENTRY_COUNT).then(|| Self::new(idx as u16))
384    }
385
386    #[inline]
387    fn backward_checked(start: Self, count: usize) -> Option<Self> {
388        let idx = usize::from(start).checked_sub(count)?;
389        Some(Self::new(idx as u16))
390    }
391}
392
393/// A 12-bit offset into a 4KiB Page.
394///
395/// This type is returned by the `VirtAddr::page_offset` method.
396///
397/// Guaranteed to only ever contain 0..4096.
398#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
399pub struct PageOffset(u16);
400
401impl PageOffset {
402    /// Creates a new offset from the given `u16`. Panics if the passed value is >=4096.
403    #[inline]
404    pub fn new(offset: u16) -> Self {
405        assert!(offset < (1 << 12));
406        Self(offset)
407    }
408
409    /// Creates a new offset from the given `u16`. Throws away bits if the value is >=4096.
410    #[inline]
411    pub const fn new_truncate(offset: u16) -> Self {
412        Self(offset % (1 << 12))
413    }
414}
415
416impl From<PageOffset> for u16 {
417    #[inline]
418    fn from(offset: PageOffset) -> Self {
419        offset.0
420    }
421}
422
423impl From<PageOffset> for u32 {
424    #[inline]
425    fn from(offset: PageOffset) -> Self {
426        u32::from(offset.0)
427    }
428}
429
430impl From<PageOffset> for u64 {
431    #[inline]
432    fn from(offset: PageOffset) -> Self {
433        u64::from(offset.0)
434    }
435}
436
437impl From<PageOffset> for usize {
438    #[inline]
439    fn from(offset: PageOffset) -> Self {
440        usize::from(offset.0)
441    }
442}
443
444#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
445/// A value between 1 and 4.
446pub enum PageTableLevel {
447    /// Represents the level for a page table.
448    One = 1,
449    /// Represents the level for a page directory.
450    Two,
451    /// Represents the level for a page-directory pointer.
452    Three,
453    /// Represents the level for a page-map level-4.
454    Four,
455}
456
457impl PageTableLevel {
458    /// Returns the next lower level or `None` for level 1
459    pub const fn next_lower_level(self) -> Option<Self> {
460        match self {
461            PageTableLevel::Four => Some(PageTableLevel::Three),
462            PageTableLevel::Three => Some(PageTableLevel::Two),
463            PageTableLevel::Two => Some(PageTableLevel::One),
464            PageTableLevel::One => None,
465        }
466    }
467
468    /// Returns the next higher level or `None` for level 4
469    pub const fn next_higher_level(self) -> Option<Self> {
470        match self {
471            PageTableLevel::Four => None,
472            PageTableLevel::Three => Some(PageTableLevel::Four),
473            PageTableLevel::Two => Some(PageTableLevel::Three),
474            PageTableLevel::One => Some(PageTableLevel::Two),
475        }
476    }
477
478    /// Returns the alignment for the address space described by a table of this level.
479    pub const fn table_address_space_alignment(self) -> u64 {
480        1u64 << (self as u8 * 9 + 12)
481    }
482
483    /// Returns the alignment for the address space described by an entry in a table of this level.
484    pub const fn entry_address_space_alignment(self) -> u64 {
485        1u64 << (((self as u8 - 1) * 9) + 12)
486    }
487}