memory_addresses/arch/
x86_64.rs

1//! Physical and virtual addresses manipulation
2
3use align_address::Align;
4use core::fmt;
5#[cfg(feature = "conv-x86")]
6use x86::bits64::paging::{PAddr as x86_PAddr, VAddr as x86_VAddr};
7
8use crate::impl_address;
9
10use x86_64::structures::paging::page_table::PageTableLevel;
11use x86_64::structures::paging::{PageOffset, PageTableIndex};
12
13/// A canonical 64-bit virtual memory address.
14///
15/// This is a wrapper type around an `u64`, so it is always 8 bytes, even when compiled
16/// on non 64-bit systems. The
17/// [`TryFrom`](https://doc.rust-lang.org/std/convert/trait.TryFrom.html) trait can be used for performing conversions
18/// between `u64` and `usize`.
19///
20/// On `x86_64`, only the 48 lower bits of a virtual address can be used. The top 16 bits need
21/// to be copies of bit 47, i.e. the most significant bit. Addresses that fulfil this criterion
22/// are called “canonical”. This type guarantees that it always represents a canonical address.
23#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
24#[repr(transparent)]
25pub struct VirtAddr(u64);
26
27impl_address!(VirtAddr, u64);
28
29/// A 64-bit physical memory address.
30///
31/// This is a wrapper type around an `u64`, so it is always 8 bytes, even when compiled
32/// on non 64-bit systems. The
33/// [`TryFrom`](https://doc.rust-lang.org/std/convert/trait.TryFrom.html) trait can be used for performing conversions
34/// between `u64` and `usize`.
35///
36/// On `x86_64`, only the 52 lower bits of a physical address can be used. The top 12 bits need
37/// to be zero. This type guarantees that it always represents a valid physical address.
38#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
39#[repr(transparent)]
40pub struct PhysAddr(u64);
41
42impl_address!(PhysAddr, u64);
43
44/// A passed `u64` was not a valid virtual address.
45///
46/// This means that bits 48 to 64 are not
47/// a valid sign extension and are not null either. So automatic sign extension would have
48/// overwritten possibly meaningful bits. This likely indicates a bug, for example an invalid
49/// address calculation.
50///
51/// Contains the invalid address.
52pub struct VirtAddrNotValid(pub u64);
53
54impl core::fmt::Debug for VirtAddrNotValid {
55    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
56        f.debug_tuple("VirtAddrNotValid")
57            .field(&format_args!("{:#x}", self.0))
58            .finish()
59    }
60}
61
62impl VirtAddr {
63    /// Creates a new canonical virtual address.
64    ///
65    /// The provided address should already be canonical. If you want to check
66    /// whether an address is canonical, use [`try_new`](Self::try_new).
67    ///
68    /// ## Panics
69    ///
70    /// This function panics if the bits in the range 48 to 64 are invalid
71    /// (i.e. are not a proper sign extension of bit 47).
72    #[inline]
73    pub const fn new(addr: u64) -> VirtAddr {
74        // TODO: Replace with .ok().expect(msg) when that works on stable.
75        match Self::try_new(addr) {
76            Ok(v) => v,
77            Err(_) => panic!("virtual address must be sign extended in bits 48 to 64"),
78        }
79    }
80
81    /// Tries to create a new canonical virtual address.
82    ///
83    /// This function checks wether the given address is canonical
84    /// and returns an error otherwise. An address is canonical
85    /// if bits 48 to 64 are a correct sign
86    /// extension (i.e. copies of bit 47).
87    #[inline]
88    pub const fn try_new(addr: u64) -> Result<VirtAddr, VirtAddrNotValid> {
89        let v = Self::new_truncate(addr);
90        if v.0 == addr {
91            Ok(v)
92        } else {
93            Err(VirtAddrNotValid(addr))
94        }
95    }
96
97    /// Creates a new canonical virtual address, throwing out bits 48..64.
98    ///
99    /// This function performs sign extension of bit 47 to make the address
100    /// canonical, overwriting bits 48 to 64. If you want to check whether an
101    /// address is canonical, use [`new`](Self::new) or [`try_new`](Self::try_new).
102    #[inline]
103    pub const fn new_truncate(addr: u64) -> VirtAddr {
104        // By doing the right shift as a signed operation (on a i64), it will
105        // sign extend the value, repeating the leftmost bit.
106        VirtAddr(((addr << 16) as i64 >> 16) as u64)
107    }
108
109    /// Creates a virtual address from the given pointer
110    #[cfg(target_pointer_width = "64")]
111    #[inline]
112    pub fn from_ptr<T: ?Sized>(ptr: *const T) -> Self {
113        Self::new(ptr as *const () as u64)
114    }
115
116    /// Converts the address to a raw pointer.
117    #[cfg(target_pointer_width = "64")]
118    #[inline]
119    pub const fn as_ptr<T>(self) -> *const T {
120        self.as_u64() as *const T
121    }
122
123    /// Converts the address to a mutable raw pointer.
124    #[cfg(target_pointer_width = "64")]
125    #[inline]
126    pub const fn as_mut_ptr<T>(self) -> *mut T {
127        self.as_ptr::<T>() as *mut T
128    }
129
130    #[cfg(target_pointer_width = "64")]
131    // if the target_pointer_width is 64, usize = u64 so we can safely transform.
132    pub const fn as_usize(&self) -> usize {
133        self.0 as usize
134    }
135
136    /// Checks whether the virtual address has the demanded alignment.
137    #[inline]
138    pub fn is_aligned(self, align: u64) -> bool {
139        self.align_down(align).as_u64() == self.as_u64()
140    }
141
142    /// Returns the 12-bit page offset of this virtual address.
143    #[inline]
144    pub const fn page_offset(self) -> PageOffset {
145        PageOffset::new_truncate(self.0 as u16)
146    }
147
148    /// Returns the 9-bit level 1 page table index.
149    #[inline]
150    pub const fn p1_index(self) -> PageTableIndex {
151        PageTableIndex::new_truncate((self.0 >> 12) as u16)
152    }
153
154    /// Returns the 9-bit level 2 page table index.
155    #[inline]
156    pub const fn p2_index(self) -> PageTableIndex {
157        PageTableIndex::new_truncate((self.0 >> 12 >> 9) as u16)
158    }
159
160    /// Returns the 9-bit level 3 page table index.
161    #[inline]
162    pub const fn p3_index(self) -> PageTableIndex {
163        PageTableIndex::new_truncate((self.0 >> 12 >> 9 >> 9) as u16)
164    }
165
166    /// Returns the 9-bit level 4 page table index.
167    #[inline]
168    pub const fn p4_index(self) -> PageTableIndex {
169        PageTableIndex::new_truncate((self.0 >> 12 >> 9 >> 9 >> 9) as u16)
170    }
171
172    /// Returns the 9-bit level page table index.
173    #[inline]
174    pub const fn page_table_index(self, level: PageTableLevel) -> PageTableIndex {
175        PageTableIndex::new_truncate((self.0 >> 12 >> ((level as u8 - 1) * 9)) as u16)
176    }
177}
178
179impl Align<u64> for VirtAddr {
180    #[inline]
181    fn align_down(self, align: u64) -> Self {
182        Self::new_truncate(self.0.align_down(align))
183    }
184
185    #[inline]
186    fn align_up(self, align: u64) -> Self {
187        Self::new_truncate(self.0.align_up(align))
188    }
189}
190
191#[cfg(target_pointer_width = "64")]
192// if the target_pointer_width is 64, usize = u64 so we can safely transform.
193impl From<usize> for VirtAddr {
194    fn from(addr: usize) -> VirtAddr {
195        Self::new_truncate(addr as u64)
196    }
197}
198
199#[cfg(target_pointer_width = "64")]
200// if the target_pointer_width is 64, usize = u64 so we can safely add
201impl core::ops::Add<usize> for VirtAddr {
202    type Output = Self;
203    #[inline]
204    fn add(self, rhs: usize) -> Self::Output {
205        VirtAddr::new(self.0 + rhs as u64)
206    }
207}
208
209#[cfg(target_pointer_width = "64")]
210// if the target_pointer_width is 64, usize = u64 so we can safely add
211impl core::ops::AddAssign<usize> for VirtAddr {
212    #[inline]
213    fn add_assign(&mut self, rhs: usize) {
214        *self = *self + rhs;
215    }
216}
217
218#[cfg(target_pointer_width = "64")]
219// if the target_pointer_width is 64, usize = u64 so we can safely sub
220impl core::ops::Sub<usize> for VirtAddr {
221    type Output = Self;
222    #[inline]
223    fn sub(self, rhs: usize) -> Self::Output {
224        VirtAddr::new(self.0.checked_sub(rhs as u64).unwrap())
225    }
226}
227
228#[cfg(target_pointer_width = "64")]
229// if the target_pointer_width is 64, usize = u64 so we can safely sub
230impl core::ops::SubAssign<usize> for VirtAddr {
231    #[inline]
232    fn sub_assign(&mut self, rhs: usize) {
233        *self = *self - rhs;
234    }
235}
236
237#[cfg(feature = "conv-x86_64")]
238impl From<x86_64::VirtAddr> for VirtAddr {
239    fn from(addr: x86_64::VirtAddr) -> Self {
240        Self(addr.as_u64())
241    }
242}
243#[cfg(feature = "conv-x86_64")]
244impl From<&x86_64::VirtAddr> for VirtAddr {
245    fn from(addr: &x86_64::VirtAddr) -> Self {
246        Self(addr.as_u64())
247    }
248}
249
250#[cfg(feature = "conv-x86_64")]
251impl From<VirtAddr> for x86_64::VirtAddr {
252    fn from(addr: VirtAddr) -> x86_64::VirtAddr {
253        x86_64::VirtAddr::new(addr.0)
254    }
255}
256
257#[cfg(feature = "conv-x86_64")]
258impl From<&VirtAddr> for x86_64::VirtAddr {
259    fn from(addr: &VirtAddr) -> x86_64::VirtAddr {
260        x86_64::VirtAddr::new(addr.0)
261    }
262}
263
264#[cfg(feature = "conv-x86")]
265impl From<x86_VAddr> for VirtAddr {
266    fn from(addr: x86_VAddr) -> Self {
267        Self(addr.as_u64())
268    }
269}
270#[cfg(feature = "conv-x86")]
271impl From<&x86_VAddr> for VirtAddr {
272    fn from(addr: &x86_VAddr) -> Self {
273        Self(addr.as_u64())
274    }
275}
276
277#[cfg(feature = "conv-x86")]
278impl From<VirtAddr> for x86_VAddr {
279    fn from(addr: VirtAddr) -> x86_VAddr {
280        x86_VAddr(addr.0)
281    }
282}
283
284#[cfg(feature = "conv-x86")]
285impl From<&VirtAddr> for x86_VAddr {
286    fn from(addr: &VirtAddr) -> x86_VAddr {
287        x86_VAddr(addr.0)
288    }
289}
290
291/// A passed `u64` was not a valid physical address.
292///
293/// This means that bits 52 to 64 were not all null.
294///
295/// Contains the invalid address.
296pub struct PhysAddrNotValid(pub u64);
297
298impl core::fmt::Debug for PhysAddrNotValid {
299    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
300        f.debug_tuple("PhysAddrNotValid")
301            .field(&format_args!("{:#x}", self.0))
302            .finish()
303    }
304}
305
306impl PhysAddr {
307    /// Creates a new physical address.
308    ///
309    /// ## Panics
310    ///
311    /// This function panics if a bit in the range 52 to 64 is set.
312    #[inline]
313    pub const fn new(addr: u64) -> Self {
314        // TODO: Replace with .ok().expect(msg) when that works on stable.
315        match Self::try_new(addr) {
316            Ok(p) => p,
317            Err(_) => panic!("physical addresses must not have any bits in the range 52 to 64 set"),
318        }
319    }
320
321    /// Creates a new physical address, throwing bits 52..64 away.
322    #[inline]
323    pub const fn new_truncate(addr: u64) -> PhysAddr {
324        PhysAddr(addr % (1 << 52))
325    }
326
327    /// Tries to create a new physical address.
328    ///
329    /// Fails if any bits in the range 52 to 64 are set.
330    #[inline]
331    pub const fn try_new(addr: u64) -> Result<Self, PhysAddrNotValid> {
332        let p = Self::new_truncate(addr);
333        if p.0 == addr {
334            Ok(p)
335        } else {
336            Err(PhysAddrNotValid(addr))
337        }
338    }
339
340    #[cfg(target_pointer_width = "64")]
341    // if the target_pointer_width is 64, usize = u64 so we can safely transform.
342    pub const fn as_usize(&self) -> usize {
343        self.0 as usize
344    }
345}
346
347impl Align<u64> for PhysAddr {
348    #[inline]
349    fn align_down(self, align: u64) -> Self {
350        Self::new(self.as_u64().align_down(align))
351    }
352
353    #[inline]
354    fn align_up(self, align: u64) -> Self {
355        Self::new(self.as_u64().align_up(align))
356    }
357}
358
359#[cfg(target_pointer_width = "64")]
360// if the target_pointer_width is 64, usize = u64 so we can safely transform.
361impl From<usize> for PhysAddr {
362    fn from(addr: usize) -> PhysAddr {
363        Self::new_truncate(addr as u64)
364    }
365}
366
367#[cfg(target_pointer_width = "64")]
368// if the target_pointer_width is 64, usize = u64 so we can safely add
369impl core::ops::Add<usize> for PhysAddr {
370    type Output = Self;
371    #[inline]
372    fn add(self, rhs: usize) -> Self::Output {
373        PhysAddr::new(self.0 + rhs as u64)
374    }
375}
376
377#[cfg(target_pointer_width = "64")]
378// if the target_pointer_width is 64, usize = u64 so we can safely add
379impl core::ops::AddAssign<usize> for PhysAddr {
380    #[inline]
381    fn add_assign(&mut self, rhs: usize) {
382        *self = *self + rhs;
383    }
384}
385
386#[cfg(target_pointer_width = "64")]
387// if the target_pointer_width is 64, usize = u64 so we can safely sub
388impl core::ops::Sub<usize> for PhysAddr {
389    type Output = Self;
390    #[inline]
391    fn sub(self, rhs: usize) -> Self::Output {
392        PhysAddr::new(self.0.checked_sub(rhs as u64).unwrap())
393    }
394}
395
396#[cfg(target_pointer_width = "64")]
397// if the target_pointer_width is 64, usize = u64 so we can safely sub
398impl core::ops::SubAssign<usize> for PhysAddr {
399    #[inline]
400    fn sub_assign(&mut self, rhs: usize) {
401        *self = *self - rhs;
402    }
403}
404
405#[cfg(feature = "conv-x86_64")]
406impl From<x86_64::PhysAddr> for PhysAddr {
407    fn from(addr: x86_64::PhysAddr) -> Self {
408        Self(addr.as_u64())
409    }
410}
411#[cfg(feature = "conv-x86_64")]
412impl From<&x86_64::PhysAddr> for PhysAddr {
413    fn from(addr: &x86_64::PhysAddr) -> Self {
414        Self(addr.as_u64())
415    }
416}
417
418#[cfg(feature = "conv-x86_64")]
419impl From<PhysAddr> for x86_64::PhysAddr {
420    fn from(addr: PhysAddr) -> x86_64::PhysAddr {
421        x86_64::PhysAddr::new(addr.0)
422    }
423}
424
425#[cfg(feature = "conv-x86_64")]
426impl From<&PhysAddr> for x86_64::PhysAddr {
427    fn from(addr: &PhysAddr) -> x86_64::PhysAddr {
428        x86_64::PhysAddr::new(addr.0)
429    }
430}
431
432#[cfg(feature = "conv-x86")]
433impl From<x86_PAddr> for PhysAddr {
434    fn from(addr: x86_PAddr) -> Self {
435        Self(addr.as_u64())
436    }
437}
438#[cfg(feature = "conv-x86")]
439impl From<&x86_PAddr> for PhysAddr {
440    fn from(addr: &x86_PAddr) -> Self {
441        Self(addr.as_u64())
442    }
443}
444
445#[cfg(feature = "conv-x86")]
446impl From<PhysAddr> for x86_PAddr {
447    fn from(addr: PhysAddr) -> x86_PAddr {
448        x86_PAddr(addr.0)
449    }
450}
451
452#[cfg(feature = "conv-x86")]
453impl From<&PhysAddr> for x86_PAddr {
454    fn from(addr: &PhysAddr) -> x86_PAddr {
455        x86_PAddr(addr.0)
456    }
457}
458
459#[cfg(test)]
460mod tests {
461    use super::*;
462
463    #[test]
464    pub fn virtaddr_new_truncate() {
465        assert_eq!(VirtAddr::new_truncate(0), VirtAddr(0));
466        assert_eq!(VirtAddr::new_truncate(1 << 47), VirtAddr(0xfffff << 47));
467        assert_eq!(VirtAddr::new_truncate(123), VirtAddr(123));
468        assert_eq!(VirtAddr::new_truncate(123 << 47), VirtAddr(0xfffff << 47));
469    }
470
471    #[test]
472    fn test_virt_addr_align_up() {
473        // Make sure the 47th bit is extended.
474        assert_eq!(
475            VirtAddr::new(0x7fff_ffff_ffff).align_up(2u64),
476            VirtAddr::new(0xffff_8000_0000_0000)
477        );
478    }
479
480    #[test]
481    fn test_virt_addr_align_down() {
482        // Make sure the 47th bit is extended.
483        assert_eq!(
484            VirtAddr::new(0xffff_8000_0000_0000).align_down(1u64 << 48),
485            VirtAddr::new(0)
486        );
487    }
488
489    #[test]
490    #[should_panic]
491    fn test_virt_addr_align_up_overflow() {
492        VirtAddr::new(0xffff_ffff_ffff_ffff).align_up(2u64);
493    }
494
495    #[test]
496    #[should_panic]
497    fn test_phys_addr_align_up_overflow() {
498        PhysAddr::new(0x000f_ffff_ffff_ffff).align_up(2u64);
499    }
500
501    #[test]
502    fn test_from_ptr_array() {
503        let slice = &[1, 2, 3, 4, 5];
504        // Make sure that from_ptr(slice) is the address of the first element
505        assert_eq!(VirtAddr::from_ptr(slice), VirtAddr::from_ptr(&slice[0]));
506    }
507}