x86_64/
addr.rs

1//! Physical and virtual addresses manipulation
2
3use core::convert::TryFrom;
4use core::fmt;
5#[cfg(feature = "step_trait")]
6use core::iter::Step;
7use core::ops::{Add, AddAssign, Sub, SubAssign};
8#[cfg(feature = "memory_encryption")]
9use core::sync::atomic::Ordering;
10
11#[cfg(feature = "memory_encryption")]
12use crate::structures::mem_encrypt::ENC_BIT_MASK;
13use crate::structures::paging::page_table::PageTableLevel;
14use crate::structures::paging::{PageOffset, PageTableIndex};
15
16use bit_field::BitField;
17use dep_const_fn::const_fn;
18
19const ADDRESS_SPACE_SIZE: u64 = 0x1_0000_0000_0000;
20
21/// A canonical 64-bit virtual memory address.
22///
23/// This is a wrapper type around an `u64`, so it is always 8 bytes, even when compiled
24/// on non 64-bit systems. The
25/// [`TryFrom`](https://doc.rust-lang.org/std/convert/trait.TryFrom.html) trait can be used for performing conversions
26/// between `u64` and `usize`.
27///
28/// On `x86_64`, only the 48 lower bits of a virtual address can be used. The top 16 bits need
29/// to be copies of bit 47, i.e. the most significant bit. Addresses that fulfil this criterion
30/// are called “canonical”. This type guarantees that it always represents a canonical address.
31#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
32#[repr(transparent)]
33pub struct VirtAddr(u64);
34
35/// A 64-bit physical memory address.
36///
37/// This is a wrapper type around an `u64`, so it is always 8 bytes, even when compiled
38/// on non 64-bit systems. The
39/// [`TryFrom`](https://doc.rust-lang.org/std/convert/trait.TryFrom.html) trait can be used for performing conversions
40/// between `u64` and `usize`.
41///
42/// On `x86_64`, only the 52 lower bits of a physical address can be used. The top 12 bits need
43/// to be zero. This type guarantees that it always represents a valid physical address.
44#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
45#[repr(transparent)]
46pub struct PhysAddr(u64);
47
48/// A passed `u64` was not a valid virtual address.
49///
50/// This means that bits 48 to 64 are not
51/// a valid sign extension and are not null either. So automatic sign extension would have
52/// overwritten possibly meaningful bits. This likely indicates a bug, for example an invalid
53/// address calculation.
54///
55/// Contains the invalid address.
56pub struct VirtAddrNotValid(pub u64);
57
58impl core::fmt::Debug for VirtAddrNotValid {
59    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
60        f.debug_tuple("VirtAddrNotValid")
61            .field(&format_args!("{:#x}", self.0))
62            .finish()
63    }
64}
65
66impl VirtAddr {
67    /// Creates a new canonical virtual address.
68    ///
69    /// The provided address should already be canonical. If you want to check
70    /// whether an address is canonical, use [`try_new`](Self::try_new).
71    ///
72    /// ## Panics
73    ///
74    /// This function panics if the bits in the range 48 to 64 are invalid
75    /// (i.e. are not a proper sign extension of bit 47).
76    #[inline]
77    pub const fn new(addr: u64) -> VirtAddr {
78        // TODO: Replace with .ok().expect(msg) when that works on stable.
79        match Self::try_new(addr) {
80            Ok(v) => v,
81            Err(_) => panic!("virtual address must be sign extended in bits 48 to 64"),
82        }
83    }
84
85    /// Tries to create a new canonical virtual address.
86    ///
87    /// This function checks wether the given address is canonical
88    /// and returns an error otherwise. An address is canonical
89    /// if bits 48 to 64 are a correct sign
90    /// extension (i.e. copies of bit 47).
91    #[inline]
92    pub const fn try_new(addr: u64) -> Result<VirtAddr, VirtAddrNotValid> {
93        let v = Self::new_truncate(addr);
94        if v.0 == addr {
95            Ok(v)
96        } else {
97            Err(VirtAddrNotValid(addr))
98        }
99    }
100
101    /// Creates a new canonical virtual address, throwing out bits 48..64.
102    ///
103    /// This function performs sign extension of bit 47 to make the address
104    /// canonical, overwriting bits 48 to 64. If you want to check whether an
105    /// address is canonical, use [`new`](Self::new) or [`try_new`](Self::try_new).
106    #[inline]
107    pub const fn new_truncate(addr: u64) -> VirtAddr {
108        // By doing the right shift as a signed operation (on a i64), it will
109        // sign extend the value, repeating the leftmost bit.
110        VirtAddr(((addr << 16) as i64 >> 16) as u64)
111    }
112
113    /// Creates a new virtual address, without any checks.
114    ///
115    /// ## Safety
116    ///
117    /// You must make sure bits 48..64 are equal to bit 47. This is not checked.
118    #[inline]
119    pub const unsafe fn new_unsafe(addr: u64) -> VirtAddr {
120        VirtAddr(addr)
121    }
122
123    /// Creates a virtual address that points to `0`.
124    #[inline]
125    pub const fn zero() -> VirtAddr {
126        VirtAddr(0)
127    }
128
129    /// Converts the address to an `u64`.
130    #[inline]
131    pub const fn as_u64(self) -> u64 {
132        self.0
133    }
134
135    /// Creates a virtual address from the given pointer
136    #[cfg(target_pointer_width = "64")]
137    #[inline]
138    pub fn from_ptr<T: ?Sized>(ptr: *const T) -> Self {
139        Self::new(ptr as *const () as u64)
140    }
141
142    /// Converts the address to a raw pointer.
143    #[cfg(target_pointer_width = "64")]
144    #[inline]
145    pub const fn as_ptr<T>(self) -> *const T {
146        self.as_u64() as *const T
147    }
148
149    /// Converts the address to a mutable raw pointer.
150    #[cfg(target_pointer_width = "64")]
151    #[inline]
152    pub const fn as_mut_ptr<T>(self) -> *mut T {
153        self.as_ptr::<T>() as *mut T
154    }
155
156    /// Convenience method for checking if a virtual address is null.
157    #[inline]
158    pub const fn is_null(self) -> bool {
159        self.0 == 0
160    }
161
162    /// Aligns the virtual address upwards to the given alignment.
163    ///
164    /// See the `align_up` function for more information.
165    ///
166    /// # Panics
167    ///
168    /// This function panics if the resulting address is higher than
169    /// `0xffff_ffff_ffff_ffff`.
170    #[inline]
171    pub fn align_up<U>(self, align: U) -> Self
172    where
173        U: Into<u64>,
174    {
175        VirtAddr::new_truncate(align_up(self.0, align.into()))
176    }
177
178    /// Aligns the virtual address downwards to the given alignment.
179    ///
180    /// See the `align_down` function for more information.
181    #[inline]
182    pub fn align_down<U>(self, align: U) -> Self
183    where
184        U: Into<u64>,
185    {
186        self.align_down_u64(align.into())
187    }
188
189    /// Aligns the virtual address downwards to the given alignment.
190    ///
191    /// See the `align_down` function for more information.
192    #[inline]
193    pub(crate) const fn align_down_u64(self, align: u64) -> Self {
194        VirtAddr::new_truncate(align_down(self.0, align))
195    }
196
197    /// Checks whether the virtual address has the demanded alignment.
198    #[inline]
199    pub fn is_aligned<U>(self, align: U) -> bool
200    where
201        U: Into<u64>,
202    {
203        self.is_aligned_u64(align.into())
204    }
205
206    /// Checks whether the virtual address has the demanded alignment.
207    #[inline]
208    pub(crate) const fn is_aligned_u64(self, align: u64) -> bool {
209        self.align_down_u64(align).as_u64() == self.as_u64()
210    }
211
212    /// Returns the 12-bit page offset of this virtual address.
213    #[inline]
214    pub const fn page_offset(self) -> PageOffset {
215        PageOffset::new_truncate(self.0 as u16)
216    }
217
218    /// Returns the 9-bit level 1 page table index.
219    #[inline]
220    pub const fn p1_index(self) -> PageTableIndex {
221        PageTableIndex::new_truncate((self.0 >> 12) as u16)
222    }
223
224    /// Returns the 9-bit level 2 page table index.
225    #[inline]
226    pub const fn p2_index(self) -> PageTableIndex {
227        PageTableIndex::new_truncate((self.0 >> 12 >> 9) as u16)
228    }
229
230    /// Returns the 9-bit level 3 page table index.
231    #[inline]
232    pub const fn p3_index(self) -> PageTableIndex {
233        PageTableIndex::new_truncate((self.0 >> 12 >> 9 >> 9) as u16)
234    }
235
236    /// Returns the 9-bit level 4 page table index.
237    #[inline]
238    pub const fn p4_index(self) -> PageTableIndex {
239        PageTableIndex::new_truncate((self.0 >> 12 >> 9 >> 9 >> 9) as u16)
240    }
241
242    /// Returns the 9-bit level page table index.
243    #[inline]
244    pub const fn page_table_index(self, level: PageTableLevel) -> PageTableIndex {
245        PageTableIndex::new_truncate((self.0 >> 12 >> ((level as u8 - 1) * 9)) as u16)
246    }
247
248    // FIXME: Move this into the `Step` impl, once `Step` is stabilized.
249    #[cfg(feature = "step_trait")]
250    pub(crate) fn steps_between_impl(start: &Self, end: &Self) -> (usize, Option<usize>) {
251        if let Some(steps) = Self::steps_between_u64(start, end) {
252            let steps = usize::try_from(steps).ok();
253            (steps.unwrap_or(usize::MAX), steps)
254        } else {
255            (0, None)
256        }
257    }
258
259    /// An implementation of steps_between that returns u64. Note that this
260    /// function always returns the exact bound, so it doesn't need to return a
261    /// lower and upper bound like steps_between does.
262    #[cfg(any(feature = "instructions", feature = "step_trait"))]
263    pub(crate) fn steps_between_u64(start: &Self, end: &Self) -> Option<u64> {
264        let mut steps = end.0.checked_sub(start.0)?;
265
266        // Mask away extra bits that appear while jumping the gap.
267        steps &= 0xffff_ffff_ffff;
268
269        Some(steps)
270    }
271
272    // FIXME: Move this into the `Step` impl, once `Step` is stabilized.
273    #[inline]
274    pub(crate) fn forward_checked_impl(start: Self, count: usize) -> Option<Self> {
275        Self::forward_checked_u64(start, u64::try_from(count).ok()?)
276    }
277
278    /// An implementation of forward_checked that takes u64 instead of usize.
279    #[inline]
280    pub(crate) fn forward_checked_u64(start: Self, count: u64) -> Option<Self> {
281        if count > ADDRESS_SPACE_SIZE {
282            return None;
283        }
284
285        let mut addr = start.0.checked_add(count)?;
286
287        match addr.get_bits(47..) {
288            0x1 => {
289                // Jump the gap by sign extending the 47th bit.
290                addr.set_bits(47.., 0x1ffff);
291            }
292            0x2 => {
293                // Address overflow
294                return None;
295            }
296            _ => {}
297        }
298
299        Some(unsafe { Self::new_unsafe(addr) })
300    }
301
302    /// An implementation of backward_checked that takes u64 instead of usize.
303    #[cfg(feature = "step_trait")]
304    #[inline]
305    pub(crate) fn backward_checked_u64(start: Self, count: u64) -> Option<Self> {
306        if count > ADDRESS_SPACE_SIZE {
307            return None;
308        }
309
310        let mut addr = start.0.checked_sub(count)?;
311
312        match addr.get_bits(47..) {
313            0x1fffe => {
314                // Jump the gap by sign extending the 47th bit.
315                addr.set_bits(47.., 0);
316            }
317            0x1fffd => {
318                // Address underflow
319                return None;
320            }
321            _ => {}
322        }
323
324        Some(unsafe { Self::new_unsafe(addr) })
325    }
326}
327
328impl fmt::Debug for VirtAddr {
329    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
330        f.debug_tuple("VirtAddr")
331            .field(&format_args!("{:#x}", self.0))
332            .finish()
333    }
334}
335
336impl fmt::Binary for VirtAddr {
337    #[inline]
338    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
339        fmt::Binary::fmt(&self.0, f)
340    }
341}
342
343impl fmt::LowerHex for VirtAddr {
344    #[inline]
345    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
346        fmt::LowerHex::fmt(&self.0, f)
347    }
348}
349
350impl fmt::Octal for VirtAddr {
351    #[inline]
352    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
353        fmt::Octal::fmt(&self.0, f)
354    }
355}
356
357impl fmt::UpperHex for VirtAddr {
358    #[inline]
359    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
360        fmt::UpperHex::fmt(&self.0, f)
361    }
362}
363
364impl fmt::Pointer for VirtAddr {
365    #[inline]
366    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
367        fmt::Pointer::fmt(&(self.0 as *const ()), f)
368    }
369}
370
371impl Add<u64> for VirtAddr {
372    type Output = Self;
373
374    #[cfg_attr(not(feature = "step_trait"), allow(rustdoc::broken_intra_doc_links))]
375    /// Add an offset to a virtual address.
376    ///
377    /// This function performs normal arithmetic addition and doesn't jump the
378    /// address gap. If you're looking for a successor operation that jumps the
379    /// address gap, use [`Step::forward`].
380    ///
381    /// # Panics
382    ///
383    /// This function will panic on overflow or if the result is not a
384    /// canonical address.
385    #[inline]
386    fn add(self, rhs: u64) -> Self::Output {
387        VirtAddr::try_new(
388            self.0
389                .checked_add(rhs)
390                .expect("attempt to add with overflow"),
391        )
392        .expect("attempt to add resulted in non-canonical virtual address")
393    }
394}
395
396impl AddAssign<u64> for VirtAddr {
397    #[cfg_attr(not(feature = "step_trait"), allow(rustdoc::broken_intra_doc_links))]
398    /// Add an offset to a virtual address.
399    ///
400    /// This function performs normal arithmetic addition and doesn't jump the
401    /// address gap. If you're looking for a successor operation that jumps the
402    /// address gap, use [`Step::forward`].
403    ///
404    /// # Panics
405    ///
406    /// This function will panic on overflow or if the result is not a
407    /// canonical address.
408    #[inline]
409    fn add_assign(&mut self, rhs: u64) {
410        *self = *self + rhs;
411    }
412}
413
414impl Sub<u64> for VirtAddr {
415    type Output = Self;
416
417    #[cfg_attr(not(feature = "step_trait"), allow(rustdoc::broken_intra_doc_links))]
418    /// Subtract an offset from a virtual address.
419    ///
420    /// This function performs normal arithmetic subtraction and doesn't jump
421    /// the address gap. If you're looking for a predecessor operation that
422    /// jumps the address gap, use [`Step::backward`].
423    ///
424    /// # Panics
425    ///
426    /// This function will panic on overflow or if the result is not a
427    /// canonical address.
428    #[inline]
429    fn sub(self, rhs: u64) -> Self::Output {
430        VirtAddr::try_new(
431            self.0
432                .checked_sub(rhs)
433                .expect("attempt to subtract with overflow"),
434        )
435        .expect("attempt to subtract resulted in non-canonical virtual address")
436    }
437}
438
439impl SubAssign<u64> for VirtAddr {
440    #[cfg_attr(not(feature = "step_trait"), allow(rustdoc::broken_intra_doc_links))]
441    /// Subtract an offset from a virtual address.
442    ///
443    /// This function performs normal arithmetic subtraction and doesn't jump
444    /// the address gap. If you're looking for a predecessor operation that
445    /// jumps the address gap, use [`Step::backward`].
446    ///
447    /// # Panics
448    ///
449    /// This function will panic on overflow or if the result is not a
450    /// canonical address.
451    #[inline]
452    fn sub_assign(&mut self, rhs: u64) {
453        *self = *self - rhs;
454    }
455}
456
457impl Sub<VirtAddr> for VirtAddr {
458    type Output = u64;
459
460    /// Returns the difference between two addresses.
461    ///
462    /// # Panics
463    ///
464    /// This function will panic on overflow.
465    #[inline]
466    fn sub(self, rhs: VirtAddr) -> Self::Output {
467        self.as_u64()
468            .checked_sub(rhs.as_u64())
469            .expect("attempt to subtract with overflow")
470    }
471}
472
473#[cfg(feature = "step_trait")]
474impl Step for VirtAddr {
475    #[inline]
476    fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
477        Self::steps_between_impl(start, end)
478    }
479
480    #[inline]
481    fn forward_checked(start: Self, count: usize) -> Option<Self> {
482        Self::forward_checked_impl(start, count)
483    }
484
485    #[inline]
486    fn backward_checked(start: Self, count: usize) -> Option<Self> {
487        Self::backward_checked_u64(start, u64::try_from(count).ok()?)
488    }
489}
490
491/// A passed `u64` was not a valid physical address.
492///
493/// This means that bits 52 to 64 were not all null.
494///
495/// Contains the invalid address.
496pub struct PhysAddrNotValid(pub u64);
497
498impl core::fmt::Debug for PhysAddrNotValid {
499    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
500        f.debug_tuple("PhysAddrNotValid")
501            .field(&format_args!("{:#x}", self.0))
502            .finish()
503    }
504}
505
506impl PhysAddr {
507    /// Creates a new physical address.
508    ///
509    /// ## Panics
510    ///
511    /// This function panics if a bit in the range 52 to 64 is set.
512    // If the `memory_encryption` feature has been enabled and an encryption bit has been
513    // configured, this also panics if the encryption bit is manually set in the address.
514    #[inline]
515    #[const_fn(cfg(not(feature = "memory_encryption")))]
516    pub const fn new(addr: u64) -> Self {
517        // TODO: Replace with .ok().expect(msg) when that works on stable.
518        match Self::try_new(addr) {
519            Ok(p) => p,
520            Err(_) => panic!("physical addresses must not have any bits in the range 52 to 64 set"),
521        }
522    }
523
524    /// Creates a new physical address, throwing bits 52..64 away.
525    #[cfg(not(feature = "memory_encryption"))]
526    #[inline]
527    pub const fn new_truncate(addr: u64) -> PhysAddr {
528        PhysAddr(addr % (1 << 52))
529    }
530
531    /// Creates a new physical address, throwing bits 52..64 and the encryption bit away.
532    #[cfg(feature = "memory_encryption")]
533    #[inline]
534    pub fn new_truncate(addr: u64) -> PhysAddr {
535        PhysAddr((addr % (1 << 52)) & !ENC_BIT_MASK.load(Ordering::Relaxed))
536    }
537
538    /// Creates a new physical address, without any checks.
539    ///
540    /// ## Safety
541    ///
542    /// You must make sure bits 52..64 are zero. This is not checked.
543    #[inline]
544    pub const unsafe fn new_unsafe(addr: u64) -> PhysAddr {
545        PhysAddr(addr)
546    }
547
548    /// Tries to create a new physical address.
549    ///
550    /// Fails if any bits in the range 52 to 64 are set.
551    /// If the `memory_encryption` feature has been enabled and an encryption bit has been
552    /// configured, this also fails if the encryption bit is manually set in the address.
553    #[inline]
554    #[const_fn(cfg(not(feature = "memory_encryption")))]
555    pub const fn try_new(addr: u64) -> Result<Self, PhysAddrNotValid> {
556        let p = Self::new_truncate(addr);
557        if p.0 == addr {
558            Ok(p)
559        } else {
560            Err(PhysAddrNotValid(addr))
561        }
562    }
563
564    /// Creates a physical address that points to `0`.
565    #[inline]
566    pub const fn zero() -> PhysAddr {
567        PhysAddr(0)
568    }
569
570    /// Converts the address to an `u64`.
571    #[inline]
572    pub const fn as_u64(self) -> u64 {
573        self.0
574    }
575
576    /// Convenience method for checking if a physical address is null.
577    #[inline]
578    pub const fn is_null(self) -> bool {
579        self.0 == 0
580    }
581
582    /// Aligns the physical address upwards to the given alignment.
583    ///
584    /// See the `align_up` function for more information.
585    ///
586    /// # Panics
587    ///
588    /// This function panics if the resulting address has a bit in the range 52
589    /// to 64 set.
590    #[inline]
591    pub fn align_up<U>(self, align: U) -> Self
592    where
593        U: Into<u64>,
594    {
595        PhysAddr::new(align_up(self.0, align.into()))
596    }
597
598    /// Aligns the physical address downwards to the given alignment.
599    ///
600    /// See the `align_down` function for more information.
601    #[inline]
602    pub fn align_down<U>(self, align: U) -> Self
603    where
604        U: Into<u64>,
605    {
606        self.align_down_u64(align.into())
607    }
608
609    /// Aligns the physical address downwards to the given alignment.
610    ///
611    /// See the `align_down` function for more information.
612    #[inline]
613    pub(crate) const fn align_down_u64(self, align: u64) -> Self {
614        PhysAddr(align_down(self.0, align))
615    }
616
617    /// Checks whether the physical address has the demanded alignment.
618    #[inline]
619    pub fn is_aligned<U>(self, align: U) -> bool
620    where
621        U: Into<u64>,
622    {
623        self.is_aligned_u64(align.into())
624    }
625
626    /// Checks whether the physical address has the demanded alignment.
627    #[inline]
628    pub(crate) const fn is_aligned_u64(self, align: u64) -> bool {
629        self.align_down_u64(align).as_u64() == self.as_u64()
630    }
631}
632
633impl fmt::Debug for PhysAddr {
634    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
635        f.debug_tuple("PhysAddr")
636            .field(&format_args!("{:#x}", self.0))
637            .finish()
638    }
639}
640
641impl fmt::Binary for PhysAddr {
642    #[inline]
643    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
644        fmt::Binary::fmt(&self.0, f)
645    }
646}
647
648impl fmt::LowerHex for PhysAddr {
649    #[inline]
650    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
651        fmt::LowerHex::fmt(&self.0, f)
652    }
653}
654
655impl fmt::Octal for PhysAddr {
656    #[inline]
657    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
658        fmt::Octal::fmt(&self.0, f)
659    }
660}
661
662impl fmt::UpperHex for PhysAddr {
663    #[inline]
664    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
665        fmt::UpperHex::fmt(&self.0, f)
666    }
667}
668
669impl fmt::Pointer for PhysAddr {
670    #[inline]
671    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
672        fmt::Pointer::fmt(&(self.0 as *const ()), f)
673    }
674}
675
676impl Add<u64> for PhysAddr {
677    type Output = Self;
678    #[inline]
679    fn add(self, rhs: u64) -> Self::Output {
680        PhysAddr::new(self.0.checked_add(rhs).unwrap())
681    }
682}
683
684impl AddAssign<u64> for PhysAddr {
685    #[inline]
686    fn add_assign(&mut self, rhs: u64) {
687        *self = *self + rhs;
688    }
689}
690
691impl Sub<u64> for PhysAddr {
692    type Output = Self;
693    #[inline]
694    fn sub(self, rhs: u64) -> Self::Output {
695        PhysAddr::new(self.0.checked_sub(rhs).unwrap())
696    }
697}
698
699impl SubAssign<u64> for PhysAddr {
700    #[inline]
701    fn sub_assign(&mut self, rhs: u64) {
702        *self = *self - rhs;
703    }
704}
705
706impl Sub<PhysAddr> for PhysAddr {
707    type Output = u64;
708    #[inline]
709    fn sub(self, rhs: PhysAddr) -> Self::Output {
710        self.as_u64().checked_sub(rhs.as_u64()).unwrap()
711    }
712}
713
714/// Align address downwards.
715///
716/// Returns the greatest `x` with alignment `align` so that `x <= addr`.
717///
718/// Panics if the alignment is not a power of two.
719#[inline]
720pub const fn align_down(addr: u64, align: u64) -> u64 {
721    assert!(align.is_power_of_two(), "`align` must be a power of two");
722    addr & !(align - 1)
723}
724
725/// Align address upwards.
726///
727/// Returns the smallest `x` with alignment `align` so that `x >= addr`.
728///
729/// Panics if the alignment is not a power of two or if an overflow occurs.
730#[inline]
731pub const fn align_up(addr: u64, align: u64) -> u64 {
732    assert!(align.is_power_of_two(), "`align` must be a power of two");
733    let align_mask = align - 1;
734    if addr & align_mask == 0 {
735        addr // already aligned
736    } else {
737        // FIXME: Replace with .expect, once `Option::expect` is const.
738        if let Some(aligned) = (addr | align_mask).checked_add(1) {
739            aligned
740        } else {
741            panic!("attempt to add with overflow")
742        }
743    }
744}
745
746#[cfg(test)]
747mod tests {
748    use super::*;
749
750    #[test]
751    #[should_panic]
752    pub fn add_overflow_virtaddr() {
753        let _ = VirtAddr::new(0xffff_ffff_ffff_ffff) + 1;
754    }
755
756    #[test]
757    #[should_panic]
758    pub fn add_overflow_physaddr() {
759        let _ = PhysAddr::new(0x000f_ffff_ffff_ffff) + 0xffff_0000_0000_0000;
760    }
761
762    #[test]
763    #[should_panic]
764    pub fn sub_underflow_virtaddr() {
765        let _ = VirtAddr::new(0) - 1;
766    }
767
768    #[test]
769    #[should_panic]
770    pub fn sub_overflow_physaddr() {
771        let _ = PhysAddr::new(0) - 1;
772    }
773
774    #[test]
775    pub fn virtaddr_new_truncate() {
776        assert_eq!(VirtAddr::new_truncate(0), VirtAddr(0));
777        assert_eq!(VirtAddr::new_truncate(1 << 47), VirtAddr(0xfffff << 47));
778        assert_eq!(VirtAddr::new_truncate(123), VirtAddr(123));
779        assert_eq!(VirtAddr::new_truncate(123 << 47), VirtAddr(0xfffff << 47));
780    }
781
782    #[test]
783    #[cfg(feature = "step_trait")]
784    fn virtaddr_step_forward() {
785        assert_eq!(Step::forward(VirtAddr(0), 0), VirtAddr(0));
786        assert_eq!(Step::forward(VirtAddr(0), 1), VirtAddr(1));
787        assert_eq!(
788            Step::forward(VirtAddr(0x7fff_ffff_ffff), 1),
789            VirtAddr(0xffff_8000_0000_0000)
790        );
791        assert_eq!(
792            Step::forward(VirtAddr(0xffff_8000_0000_0000), 1),
793            VirtAddr(0xffff_8000_0000_0001)
794        );
795        assert_eq!(
796            Step::forward_checked(VirtAddr(0xffff_ffff_ffff_ffff), 1),
797            None
798        );
799        #[cfg(target_pointer_width = "64")]
800        assert_eq!(
801            Step::forward(VirtAddr(0x7fff_ffff_ffff), 0x1234_5678_9abd),
802            VirtAddr(0xffff_9234_5678_9abc)
803        );
804        #[cfg(target_pointer_width = "64")]
805        assert_eq!(
806            Step::forward(VirtAddr(0x7fff_ffff_ffff), 0x8000_0000_0000),
807            VirtAddr(0xffff_ffff_ffff_ffff)
808        );
809        #[cfg(target_pointer_width = "64")]
810        assert_eq!(
811            Step::forward(VirtAddr(0x7fff_ffff_ff00), 0x8000_0000_00ff),
812            VirtAddr(0xffff_ffff_ffff_ffff)
813        );
814        #[cfg(target_pointer_width = "64")]
815        assert_eq!(
816            Step::forward_checked(VirtAddr(0x7fff_ffff_ff00), 0x8000_0000_0100),
817            None
818        );
819        #[cfg(target_pointer_width = "64")]
820        assert_eq!(
821            Step::forward_checked(VirtAddr(0x7fff_ffff_ffff), 0x8000_0000_0001),
822            None
823        );
824    }
825
826    #[test]
827    #[cfg(feature = "step_trait")]
828    fn virtaddr_step_backward() {
829        assert_eq!(Step::backward(VirtAddr(0), 0), VirtAddr(0));
830        assert_eq!(Step::backward_checked(VirtAddr(0), 1), None);
831        assert_eq!(Step::backward(VirtAddr(1), 1), VirtAddr(0));
832        assert_eq!(
833            Step::backward(VirtAddr(0xffff_8000_0000_0000), 1),
834            VirtAddr(0x7fff_ffff_ffff)
835        );
836        assert_eq!(
837            Step::backward(VirtAddr(0xffff_8000_0000_0001), 1),
838            VirtAddr(0xffff_8000_0000_0000)
839        );
840        #[cfg(target_pointer_width = "64")]
841        assert_eq!(
842            Step::backward(VirtAddr(0xffff_9234_5678_9abc), 0x1234_5678_9abd),
843            VirtAddr(0x7fff_ffff_ffff)
844        );
845        #[cfg(target_pointer_width = "64")]
846        assert_eq!(
847            Step::backward(VirtAddr(0xffff_8000_0000_0000), 0x8000_0000_0000),
848            VirtAddr(0)
849        );
850        #[cfg(target_pointer_width = "64")]
851        assert_eq!(
852            Step::backward(VirtAddr(0xffff_8000_0000_0000), 0x7fff_ffff_ff01),
853            VirtAddr(0xff)
854        );
855        #[cfg(target_pointer_width = "64")]
856        assert_eq!(
857            Step::backward_checked(VirtAddr(0xffff_8000_0000_0000), 0x8000_0000_0001),
858            None
859        );
860    }
861
862    #[test]
863    #[cfg(feature = "step_trait")]
864    fn virtaddr_steps_between() {
865        assert_eq!(
866            Step::steps_between(&VirtAddr(0), &VirtAddr(0)),
867            (0, Some(0))
868        );
869        assert_eq!(
870            Step::steps_between(&VirtAddr(0), &VirtAddr(1)),
871            (1, Some(1))
872        );
873        assert_eq!(Step::steps_between(&VirtAddr(1), &VirtAddr(0)), (0, None));
874        assert_eq!(
875            Step::steps_between(
876                &VirtAddr(0x7fff_ffff_ffff),
877                &VirtAddr(0xffff_8000_0000_0000)
878            ),
879            (1, Some(1))
880        );
881        assert_eq!(
882            Step::steps_between(
883                &VirtAddr(0xffff_8000_0000_0000),
884                &VirtAddr(0x7fff_ffff_ffff)
885            ),
886            (0, None)
887        );
888        assert_eq!(
889            Step::steps_between(
890                &VirtAddr(0xffff_8000_0000_0000),
891                &VirtAddr(0xffff_8000_0000_0000)
892            ),
893            (0, Some(0))
894        );
895        assert_eq!(
896            Step::steps_between(
897                &VirtAddr(0xffff_8000_0000_0000),
898                &VirtAddr(0xffff_8000_0000_0001)
899            ),
900            (1, Some(1))
901        );
902        assert_eq!(
903            Step::steps_between(
904                &VirtAddr(0xffff_8000_0000_0001),
905                &VirtAddr(0xffff_8000_0000_0000)
906            ),
907            (0, None)
908        );
909        // Make sure that we handle `steps > u32::MAX` correctly on 32-bit
910        // targets. On 64-bit targets, `0x1_0000_0000` fits into `usize`, so we
911        // can return exact lower and upper bounds. On 32-bit targets,
912        // `0x1_0000_0000` doesn't fit into `usize`, so we only return an lower
913        // bound of `usize::MAX` and don't return an upper bound.
914        #[cfg(target_pointer_width = "64")]
915        assert_eq!(
916            Step::steps_between(&VirtAddr(0), &VirtAddr(0x1_0000_0000)),
917            (0x1_0000_0000, Some(0x1_0000_0000))
918        );
919        #[cfg(not(target_pointer_width = "64"))]
920        assert_eq!(
921            Step::steps_between(&VirtAddr(0), &VirtAddr(0x1_0000_0000)),
922            (usize::MAX, None)
923        );
924    }
925
926    #[test]
927    pub fn test_align_up() {
928        // align 1
929        assert_eq!(align_up(0, 1), 0);
930        assert_eq!(align_up(1234, 1), 1234);
931        assert_eq!(align_up(0xffff_ffff_ffff_ffff, 1), 0xffff_ffff_ffff_ffff);
932        // align 2
933        assert_eq!(align_up(0, 2), 0);
934        assert_eq!(align_up(1233, 2), 1234);
935        assert_eq!(align_up(0xffff_ffff_ffff_fffe, 2), 0xffff_ffff_ffff_fffe);
936        // address 0
937        assert_eq!(align_up(0, 128), 0);
938        assert_eq!(align_up(0, 1), 0);
939        assert_eq!(align_up(0, 2), 0);
940        assert_eq!(align_up(0, 0x8000_0000_0000_0000), 0);
941    }
942
943    #[test]
944    fn test_virt_addr_align_up() {
945        // Make sure the 47th bit is extended.
946        assert_eq!(
947            VirtAddr::new(0x7fff_ffff_ffff).align_up(2u64),
948            VirtAddr::new(0xffff_8000_0000_0000)
949        );
950    }
951
952    #[test]
953    fn test_virt_addr_align_down() {
954        // Make sure the 47th bit is extended.
955        assert_eq!(
956            VirtAddr::new(0xffff_8000_0000_0000).align_down(1u64 << 48),
957            VirtAddr::new(0)
958        );
959    }
960
961    #[test]
962    #[should_panic]
963    fn test_virt_addr_align_up_overflow() {
964        VirtAddr::new(0xffff_ffff_ffff_ffff).align_up(2u64);
965    }
966
967    #[test]
968    #[should_panic]
969    fn test_phys_addr_align_up_overflow() {
970        PhysAddr::new(0x000f_ffff_ffff_ffff).align_up(2u64);
971    }
972
973    #[test]
974    #[cfg(target_pointer_width = "64")]
975    fn test_from_ptr_array() {
976        let slice = &[1, 2, 3, 4, 5];
977        // Make sure that from_ptr(slice) is the address of the first element
978        assert_eq!(
979            VirtAddr::from_ptr(slice.as_slice()),
980            VirtAddr::from_ptr(&slice[0])
981        );
982    }
983}
984
985#[cfg(kani)]
986mod proofs {
987    use super::*;
988
989    // The next two proof harnesses prove the correctness of the `forward`
990    // implementation of VirtAddr.
991
992    // This harness proves that our implementation can correctly take 0 or 1
993    // step starting from any address.
994    #[kani::proof]
995    fn forward_base_case() {
996        let start_raw: u64 = kani::any();
997        let Ok(start) = VirtAddr::try_new(start_raw) else {
998            return;
999        };
1000
1001        // Adding 0 to any address should always yield the same address.
1002        let same = Step::forward(start, 0);
1003        assert!(start == same);
1004
1005        // Manually calculate the expected address after stepping once.
1006        let expected = match start_raw {
1007            // Adding 1 to addresses in this range don't require gap jumps, so
1008            // we can just add 1.
1009            0x0000_0000_0000_0000..=0x0000_7fff_ffff_fffe => Some(start_raw + 1),
1010            // Adding 1 to this address jumps the gap.
1011            0x0000_7fff_ffff_ffff => Some(0xffff_8000_0000_0000),
1012            // The range of non-canonical addresses.
1013            0x0000_8000_0000_0000..=0xffff_7fff_ffff_ffff => unreachable!(),
1014            // Adding 1 to addresses in this range don't require gap jumps, so
1015            // we can just add 1.
1016            0xffff_8000_0000_0000..=0xffff_ffff_ffff_fffe => Some(start_raw + 1),
1017            // Adding 1 to this address causes an overflow.
1018            0xffff_ffff_ffff_ffff => None,
1019        };
1020        if let Some(expected) = expected {
1021            // Verify that `expected` is a valid address.
1022            assert!(VirtAddr::try_new(expected).is_ok());
1023        }
1024        // Verify `forward_checked`.
1025        let next = Step::forward_checked(start, 1);
1026        assert!(next.map(VirtAddr::as_u64) == expected);
1027    }
1028
1029    // This harness proves that the result of taking two small steps is the
1030    // same as taking one combined large step.
1031    #[kani::proof]
1032    fn forward_induction_step() {
1033        let start_raw: u64 = kani::any();
1034        let Ok(start) = VirtAddr::try_new(start_raw) else {
1035            return;
1036        };
1037
1038        let count1: usize = kani::any();
1039        let count2: usize = kani::any();
1040        // If we can take two small steps...
1041        let Some(next1) = Step::forward_checked(start, count1) else {
1042            return;
1043        };
1044        let Some(next2) = Step::forward_checked(next1, count2) else {
1045            return;
1046        };
1047
1048        // ...then we can also take one combined large step.
1049        let count_both = count1 + count2;
1050        let next_both = Step::forward(start, count_both);
1051        assert!(next2 == next_both);
1052    }
1053
1054    // The next two proof harnesses prove the correctness of the `backward`
1055    // implementation of VirtAddr using the `forward` implementation which
1056    // we've already proven to be correct.
1057    // They do this by proving the symmetry between those two functions.
1058
1059    // This harness proves the correctness of the implementation of `backward`
1060    // for all inputs for which `forward_checked` succeeds.
1061    #[kani::proof]
1062    fn forward_implies_backward() {
1063        let start_raw: u64 = kani::any();
1064        let Ok(start) = VirtAddr::try_new(start_raw) else {
1065            return;
1066        };
1067        let count: usize = kani::any();
1068
1069        // If `forward_checked` succeeds...
1070        let Some(end) = Step::forward_checked(start, count) else {
1071            return;
1072        };
1073
1074        // ...then `backward` succeeds as well.
1075        let start2 = Step::backward(end, count);
1076        assert!(start == start2);
1077    }
1078
1079    // This harness proves that for all inputs for which `backward_checked`
1080    // succeeds, `forward` succeeds as well.
1081    #[kani::proof]
1082    fn backward_implies_forward() {
1083        let end_raw: u64 = kani::any();
1084        let Ok(end) = VirtAddr::try_new(end_raw) else {
1085            return;
1086        };
1087        let count: usize = kani::any();
1088
1089        // If `backward_checked` succeeds...
1090        let Some(start) = Step::backward_checked(end, count) else {
1091            return;
1092        };
1093
1094        // ...then `forward` succeeds as well.
1095        let end2 = Step::forward(start, count);
1096        assert!(end == end2);
1097    }
1098
1099    // The next two proof harnesses prove the correctness of the
1100    // `steps_between` implementation of VirtAddr using the `forward`
1101    // implementation which we've already proven to be correct.
1102    // They do this by proving the symmetry between those two functions.
1103
1104    // This harness proves the correctness of the implementation of
1105    // `steps_between` for all inputs for which `forward_checked` succeeds.
1106    #[kani::proof]
1107    fn forward_implies_steps_between() {
1108        let start: u64 = kani::any();
1109        let Ok(start) = VirtAddr::try_new(start) else {
1110            return;
1111        };
1112        let count: usize = kani::any();
1113
1114        // If `forward_checked` succeeds...
1115        let Some(end) = Step::forward_checked(start, count) else {
1116            return;
1117        };
1118
1119        // ...then `steps_between` succeeds as well.
1120        assert!(Step::steps_between(&start, &end) == (count, Some(count)));
1121    }
1122
1123    // This harness proves that for all inputs for which `steps_between`
1124    // succeeds, `forward` succeeds as well.
1125    #[kani::proof]
1126    fn steps_between_implies_forward() {
1127        let start: u64 = kani::any();
1128        let Ok(start) = VirtAddr::try_new(start) else {
1129            return;
1130        };
1131        let end: u64 = kani::any();
1132        let Ok(end) = VirtAddr::try_new(end) else {
1133            return;
1134        };
1135
1136        // If `steps_between` succeeds...
1137        let Some(count) = Step::steps_between(&start, &end).1 else {
1138            return;
1139        };
1140
1141        // ...then `forward` succeeds as well.
1142        assert!(Step::forward(start, count) == end);
1143    }
1144}