x86_64/structures/paging/
page_table.rs

1//! Abstractions for page tables and page table entries.
2
3use core::fmt;
4#[cfg(feature = "step_trait")]
5use core::iter::Step;
6use core::ops::{Index, IndexMut};
7
8use super::{PageSize, PhysFrame, Size4KiB};
9use crate::addr::PhysAddr;
10
11use bitflags::bitflags;
12
13/// The error returned by the `PageTableEntry::frame` method.
14#[derive(Debug, Clone, Copy, PartialEq)]
15pub enum FrameError {
16    /// The entry does not have the `PRESENT` flag set, so it isn't currently mapped to a frame.
17    FrameNotPresent,
18    /// The entry does have the `HUGE_PAGE` flag set. The `frame` method has a standard 4KiB frame
19    /// as return type, so a huge frame can't be returned.
20    HugeFrame,
21}
22
23/// A 64-bit page table entry.
24#[derive(Clone)]
25#[repr(transparent)]
26pub struct PageTableEntry {
27    entry: u64,
28}
29
30impl PageTableEntry {
31    /// Creates an unused page table entry.
32    #[inline]
33    pub const fn new() -> Self {
34        PageTableEntry { entry: 0 }
35    }
36
37    /// Returns whether this entry is zero.
38    #[inline]
39    pub const fn is_unused(&self) -> bool {
40        self.entry == 0
41    }
42
43    /// Sets this entry to zero.
44    #[inline]
45    pub fn set_unused(&mut self) {
46        self.entry = 0;
47    }
48
49    /// Returns the flags of this entry.
50    #[inline]
51    pub const fn flags(&self) -> PageTableFlags {
52        PageTableFlags::from_bits_truncate(self.entry)
53    }
54
55    /// Returns the physical address mapped by this entry, might be zero.
56    #[inline]
57    pub fn addr(&self) -> PhysAddr {
58        PhysAddr::new(self.entry & 0x000f_ffff_ffff_f000)
59    }
60
61    /// Returns the physical frame mapped by this entry.
62    ///
63    /// Returns the following errors:
64    ///
65    /// - `FrameError::FrameNotPresent` if the entry doesn't have the `PRESENT` flag set.
66    /// - `FrameError::HugeFrame` if the entry has the `HUGE_PAGE` flag set (for huge pages the
67    ///    `addr` function must be used)
68    #[inline]
69    pub fn frame(&self) -> Result<PhysFrame, FrameError> {
70        if !self.flags().contains(PageTableFlags::PRESENT) {
71            Err(FrameError::FrameNotPresent)
72        } else if self.flags().contains(PageTableFlags::HUGE_PAGE) {
73            Err(FrameError::HugeFrame)
74        } else {
75            Ok(PhysFrame::containing_address(self.addr()))
76        }
77    }
78
79    /// Map the entry to the specified physical address with the specified flags.
80    #[inline]
81    pub fn set_addr(&mut self, addr: PhysAddr, flags: PageTableFlags) {
82        assert!(addr.is_aligned(Size4KiB::SIZE));
83        self.entry = (addr.as_u64()) | flags.bits();
84    }
85
86    /// Map the entry to the specified physical frame with the specified flags.
87    #[inline]
88    pub fn set_frame(&mut self, frame: PhysFrame, flags: PageTableFlags) {
89        assert!(!flags.contains(PageTableFlags::HUGE_PAGE));
90        self.set_addr(frame.start_address(), flags)
91    }
92
93    /// Sets the flags of this entry.
94    #[inline]
95    pub fn set_flags(&mut self, flags: PageTableFlags) {
96        self.entry = self.addr().as_u64() | flags.bits();
97    }
98}
99
100impl Default for PageTableEntry {
101    #[inline]
102    fn default() -> Self {
103        Self::new()
104    }
105}
106
107impl fmt::Debug for PageTableEntry {
108    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
109        let mut f = f.debug_struct("PageTableEntry");
110        f.field("addr", &self.addr());
111        f.field("flags", &self.flags());
112        f.finish()
113    }
114}
115
116bitflags! {
117    /// Possible flags for a page table entry.
118    #[derive(PartialEq, Eq, PartialOrd, Ord, Hash, Debug, Clone, Copy)]
119    pub struct PageTableFlags: u64 {
120        /// Specifies whether the mapped frame or page table is loaded in memory.
121        const PRESENT =         1;
122        /// Controls whether writes to the mapped frames are allowed.
123        ///
124        /// If this bit is unset in a level 1 page table entry, the mapped frame is read-only.
125        /// If this bit is unset in a higher level page table entry the complete range of mapped
126        /// pages is read-only.
127        const WRITABLE =        1 << 1;
128        /// Controls whether accesses from userspace (i.e. ring 3) are permitted.
129        const USER_ACCESSIBLE = 1 << 2;
130        /// If this bit is set, a “write-through” policy is used for the cache, else a “write-back”
131        /// policy is used.
132        const WRITE_THROUGH =   1 << 3;
133        /// Disables caching for the pointed entry is cacheable.
134        const NO_CACHE =        1 << 4;
135        /// Set by the CPU when the mapped frame or page table is accessed.
136        const ACCESSED =        1 << 5;
137        /// Set by the CPU on a write to the mapped frame.
138        const DIRTY =           1 << 6;
139        /// Specifies that the entry maps a huge frame instead of a page table. Only allowed in
140        /// P2 or P3 tables.
141        const HUGE_PAGE =       1 << 7;
142        /// Indicates that the mapping is present in all address spaces, so it isn't flushed from
143        /// the TLB on an address space switch.
144        const GLOBAL =          1 << 8;
145        /// Available to the OS, can be used to store additional data, e.g. custom flags.
146        const BIT_9 =           1 << 9;
147        /// Available to the OS, can be used to store additional data, e.g. custom flags.
148        const BIT_10 =          1 << 10;
149        /// Available to the OS, can be used to store additional data, e.g. custom flags.
150        const BIT_11 =          1 << 11;
151        /// Available to the OS, can be used to store additional data, e.g. custom flags.
152        const BIT_52 =          1 << 52;
153        /// Available to the OS, can be used to store additional data, e.g. custom flags.
154        const BIT_53 =          1 << 53;
155        /// Available to the OS, can be used to store additional data, e.g. custom flags.
156        const BIT_54 =          1 << 54;
157        /// Available to the OS, can be used to store additional data, e.g. custom flags.
158        const BIT_55 =          1 << 55;
159        /// Available to the OS, can be used to store additional data, e.g. custom flags.
160        const BIT_56 =          1 << 56;
161        /// Available to the OS, can be used to store additional data, e.g. custom flags.
162        const BIT_57 =          1 << 57;
163        /// Available to the OS, can be used to store additional data, e.g. custom flags.
164        const BIT_58 =          1 << 58;
165        /// Available to the OS, can be used to store additional data, e.g. custom flags.
166        const BIT_59 =          1 << 59;
167        /// Available to the OS, can be used to store additional data, e.g. custom flags.
168        const BIT_60 =          1 << 60;
169        /// Available to the OS, can be used to store additional data, e.g. custom flags.
170        const BIT_61 =          1 << 61;
171        /// Available to the OS, can be used to store additional data, e.g. custom flags.
172        const BIT_62 =          1 << 62;
173        /// Forbid code execution from the mapped frames.
174        ///
175        /// Can be only used when the no-execute page protection feature is enabled in the EFER
176        /// register.
177        const NO_EXECUTE =      1 << 63;
178    }
179}
180
181/// The number of entries in a page table.
182const ENTRY_COUNT: usize = 512;
183
184/// Represents a page table.
185///
186/// Always page-sized.
187///
188/// This struct implements the `Index` and `IndexMut` traits, so the entries can be accessed
189/// through index operations. For example, `page_table[15]` returns the 16th page table entry.
190///
191/// Note that while this type implements [`Clone`], the users must be careful not to introduce
192/// mutable aliasing by using the cloned page tables.
193#[repr(align(4096))]
194#[repr(C)]
195#[derive(Clone)]
196pub struct PageTable {
197    entries: [PageTableEntry; ENTRY_COUNT],
198}
199
200impl PageTable {
201    /// Creates an empty page table.
202    #[inline]
203    pub const fn new() -> Self {
204        const EMPTY: PageTableEntry = PageTableEntry::new();
205        PageTable {
206            entries: [EMPTY; ENTRY_COUNT],
207        }
208    }
209
210    /// Clears all entries.
211    #[inline]
212    pub fn zero(&mut self) {
213        for entry in self.iter_mut() {
214            entry.set_unused();
215        }
216    }
217
218    /// Returns an iterator over the entries of the page table.
219    #[inline]
220    pub fn iter(&self) -> impl Iterator<Item = &PageTableEntry> {
221        (0..512).map(move |i| &self.entries[i])
222    }
223
224    /// Returns an iterator that allows modifying the entries of the page table.
225    #[inline]
226    pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut PageTableEntry> {
227        // Note that we intentionally don't just return `self.entries.iter()`:
228        // Some users may choose to create a reference to a page table at
229        // `0xffff_ffff_ffff_f000`. This causes problems because calculating
230        // the end pointer of the page tables causes an overflow. Therefore
231        // creating page tables at that address is unsound and must be avoided.
232        // Unfortunately creating such page tables is quite common when
233        // recursive page tables are used, so we try to avoid calculating the
234        // end pointer if possible. `core::slice::Iter` calculates the end
235        // pointer to determine when it should stop yielding elements. Because
236        // we want to avoid calculating the end pointer, we don't use
237        // `core::slice::Iter`, we implement our own iterator that doesn't
238        // calculate the end pointer. This doesn't make creating page tables at
239        // that address sound, but it avoids some easy to trigger
240        // miscompilations.
241        let ptr = self.entries.as_mut_ptr();
242        (0..512).map(move |i| unsafe { &mut *ptr.add(i) })
243    }
244
245    /// Checks if the page table is empty (all entries are zero).
246    #[inline]
247    pub fn is_empty(&self) -> bool {
248        self.iter().all(|entry| entry.is_unused())
249    }
250}
251
252impl Index<usize> for PageTable {
253    type Output = PageTableEntry;
254
255    #[inline]
256    fn index(&self, index: usize) -> &Self::Output {
257        &self.entries[index]
258    }
259}
260
261impl IndexMut<usize> for PageTable {
262    #[inline]
263    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
264        &mut self.entries[index]
265    }
266}
267
268impl Index<PageTableIndex> for PageTable {
269    type Output = PageTableEntry;
270
271    #[inline]
272    fn index(&self, index: PageTableIndex) -> &Self::Output {
273        &self.entries[usize::from(index)]
274    }
275}
276
277impl IndexMut<PageTableIndex> for PageTable {
278    #[inline]
279    fn index_mut(&mut self, index: PageTableIndex) -> &mut Self::Output {
280        &mut self.entries[usize::from(index)]
281    }
282}
283
284impl Default for PageTable {
285    fn default() -> Self {
286        Self::new()
287    }
288}
289
290impl fmt::Debug for PageTable {
291    #[inline]
292    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
293        self.entries[..].fmt(f)
294    }
295}
296
297/// A 9-bit index into a page table.
298///
299/// Can be used to select one of the 512 entries of a page table.
300///
301/// Guaranteed to only ever contain 0..512.
302#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
303pub struct PageTableIndex(u16);
304
305impl PageTableIndex {
306    /// Creates a new index from the given `u16`. Panics if the given value is >=512.
307    #[inline]
308    pub const fn new(index: u16) -> Self {
309        assert!((index as usize) < ENTRY_COUNT);
310        Self(index)
311    }
312
313    /// Creates a new index from the given `u16`. Throws away bits if the value is >=512.
314    #[inline]
315    pub const fn new_truncate(index: u16) -> Self {
316        Self(index % ENTRY_COUNT as u16)
317    }
318
319    #[inline]
320    pub(crate) const fn into_u64(self) -> u64 {
321        self.0 as u64
322    }
323}
324
325impl From<PageTableIndex> for u16 {
326    #[inline]
327    fn from(index: PageTableIndex) -> Self {
328        index.0
329    }
330}
331
332impl From<PageTableIndex> for u32 {
333    #[inline]
334    fn from(index: PageTableIndex) -> Self {
335        u32::from(index.0)
336    }
337}
338
339impl From<PageTableIndex> for u64 {
340    #[inline]
341    fn from(index: PageTableIndex) -> Self {
342        index.into_u64()
343    }
344}
345
346impl From<PageTableIndex> for usize {
347    #[inline]
348    fn from(index: PageTableIndex) -> Self {
349        usize::from(index.0)
350    }
351}
352
353#[cfg(feature = "step_trait")]
354impl Step for PageTableIndex {
355    #[inline]
356    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
357        Step::steps_between(&start.0, &end.0)
358    }
359
360    #[inline]
361    fn forward_checked(start: Self, count: usize) -> Option<Self> {
362        let idx = usize::from(start).checked_add(count)?;
363        (idx < ENTRY_COUNT).then(|| Self::new(idx as u16))
364    }
365
366    #[inline]
367    fn backward_checked(start: Self, count: usize) -> Option<Self> {
368        let idx = usize::from(start).checked_sub(count)?;
369        Some(Self::new(idx as u16))
370    }
371}
372
373/// A 12-bit offset into a 4KiB Page.
374///
375/// This type is returned by the `VirtAddr::page_offset` method.
376///
377/// Guaranteed to only ever contain 0..4096.
378#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
379pub struct PageOffset(u16);
380
381impl PageOffset {
382    /// Creates a new offset from the given `u16`. Panics if the passed value is >=4096.
383    #[inline]
384    pub fn new(offset: u16) -> Self {
385        assert!(offset < (1 << 12));
386        Self(offset)
387    }
388
389    /// Creates a new offset from the given `u16`. Throws away bits if the value is >=4096.
390    #[inline]
391    pub const fn new_truncate(offset: u16) -> Self {
392        Self(offset % (1 << 12))
393    }
394}
395
396impl From<PageOffset> for u16 {
397    #[inline]
398    fn from(offset: PageOffset) -> Self {
399        offset.0
400    }
401}
402
403impl From<PageOffset> for u32 {
404    #[inline]
405    fn from(offset: PageOffset) -> Self {
406        u32::from(offset.0)
407    }
408}
409
410impl From<PageOffset> for u64 {
411    #[inline]
412    fn from(offset: PageOffset) -> Self {
413        u64::from(offset.0)
414    }
415}
416
417impl From<PageOffset> for usize {
418    #[inline]
419    fn from(offset: PageOffset) -> Self {
420        usize::from(offset.0)
421    }
422}
423
424#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
425/// A value between 1 and 4.
426pub enum PageTableLevel {
427    /// Represents the level for a page table.
428    One = 1,
429    /// Represents the level for a page directory.
430    Two,
431    /// Represents the level for a page-directory pointer.
432    Three,
433    /// Represents the level for a page-map level-4.
434    Four,
435}
436
437impl PageTableLevel {
438    /// Returns the next lower level or `None` for level 1
439    pub const fn next_lower_level(self) -> Option<Self> {
440        match self {
441            PageTableLevel::Four => Some(PageTableLevel::Three),
442            PageTableLevel::Three => Some(PageTableLevel::Two),
443            PageTableLevel::Two => Some(PageTableLevel::One),
444            PageTableLevel::One => None,
445        }
446    }
447
448    /// Returns the next higher level or `None` for level 4
449    pub const fn next_higher_level(self) -> Option<Self> {
450        match self {
451            PageTableLevel::Four => None,
452            PageTableLevel::Three => Some(PageTableLevel::Four),
453            PageTableLevel::Two => Some(PageTableLevel::Three),
454            PageTableLevel::One => Some(PageTableLevel::Two),
455        }
456    }
457
458    /// Returns the alignment for the address space described by a table of this level.
459    pub const fn table_address_space_alignment(self) -> u64 {
460        1u64 << (self as u8 * 9 + 12)
461    }
462
463    /// Returns the alignment for the address space described by an entry in a table of this level.
464    pub const fn entry_address_space_alignment(self) -> u64 {
465        1u64 << (((self as u8 - 1) * 9) + 12)
466    }
467}