memory_addresses/arch/
x86_64.rs1use align_address::Align;
4use core::fmt;
5#[cfg(feature = "conv-x86")]
6use x86::bits64::paging::{PAddr as x86_PAddr, VAddr as x86_VAddr};
7
8use crate::impl_address;
9
10use x86_64::structures::paging::page_table::PageTableLevel;
11use x86_64::structures::paging::{PageOffset, PageTableIndex};
12
13#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
24#[repr(transparent)]
25pub struct VirtAddr(u64);
26
27impl_address!(VirtAddr, u64);
28
29#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
39#[repr(transparent)]
40pub struct PhysAddr(u64);
41
42impl_address!(PhysAddr, u64);
43
44pub struct VirtAddrNotValid(pub u64);
53
54impl core::fmt::Debug for VirtAddrNotValid {
55 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
56 f.debug_tuple("VirtAddrNotValid")
57 .field(&format_args!("{:#x}", self.0))
58 .finish()
59 }
60}
61
62impl VirtAddr {
63 #[inline]
73 pub const fn new(addr: u64) -> VirtAddr {
74 match Self::try_new(addr) {
76 Ok(v) => v,
77 Err(_) => panic!("virtual address must be sign extended in bits 48 to 64"),
78 }
79 }
80
81 #[inline]
88 pub const fn try_new(addr: u64) -> Result<VirtAddr, VirtAddrNotValid> {
89 let v = Self::new_truncate(addr);
90 if v.0 == addr {
91 Ok(v)
92 } else {
93 Err(VirtAddrNotValid(addr))
94 }
95 }
96
97 #[inline]
103 pub const fn new_truncate(addr: u64) -> VirtAddr {
104 VirtAddr(((addr << 16) as i64 >> 16) as u64)
107 }
108
109 #[cfg(target_pointer_width = "64")]
111 #[inline]
112 pub fn from_ptr<T: ?Sized>(ptr: *const T) -> Self {
113 Self::new(ptr as *const () as u64)
114 }
115
116 #[cfg(target_pointer_width = "64")]
118 #[inline]
119 pub const fn as_ptr<T>(self) -> *const T {
120 self.as_u64() as *const T
121 }
122
123 #[cfg(target_pointer_width = "64")]
125 #[inline]
126 pub const fn as_mut_ptr<T>(self) -> *mut T {
127 self.as_ptr::<T>() as *mut T
128 }
129
130 #[cfg(target_pointer_width = "64")]
131 pub const fn as_usize(&self) -> usize {
133 self.0 as usize
134 }
135
136 #[inline]
138 pub fn is_aligned(self, align: u64) -> bool {
139 self.align_down(align).as_u64() == self.as_u64()
140 }
141
142 #[inline]
144 pub const fn page_offset(self) -> PageOffset {
145 PageOffset::new_truncate(self.0 as u16)
146 }
147
148 #[inline]
150 pub const fn p1_index(self) -> PageTableIndex {
151 PageTableIndex::new_truncate((self.0 >> 12) as u16)
152 }
153
154 #[inline]
156 pub const fn p2_index(self) -> PageTableIndex {
157 PageTableIndex::new_truncate((self.0 >> 12 >> 9) as u16)
158 }
159
160 #[inline]
162 pub const fn p3_index(self) -> PageTableIndex {
163 PageTableIndex::new_truncate((self.0 >> 12 >> 9 >> 9) as u16)
164 }
165
166 #[inline]
168 pub const fn p4_index(self) -> PageTableIndex {
169 PageTableIndex::new_truncate((self.0 >> 12 >> 9 >> 9 >> 9) as u16)
170 }
171
172 #[inline]
174 pub const fn page_table_index(self, level: PageTableLevel) -> PageTableIndex {
175 PageTableIndex::new_truncate((self.0 >> 12 >> ((level as u8 - 1) * 9)) as u16)
176 }
177}
178
179impl Align<u64> for VirtAddr {
180 #[inline]
181 fn align_down(self, align: u64) -> Self {
182 Self::new_truncate(self.0.align_down(align))
183 }
184
185 #[inline]
186 fn align_up(self, align: u64) -> Self {
187 Self::new_truncate(self.0.align_up(align))
188 }
189}
190
191#[cfg(target_pointer_width = "64")]
192impl From<usize> for VirtAddr {
194 fn from(addr: usize) -> VirtAddr {
195 Self::new_truncate(addr as u64)
196 }
197}
198
199#[cfg(target_pointer_width = "64")]
200impl core::ops::Add<usize> for VirtAddr {
202 type Output = Self;
203 #[inline]
204 fn add(self, rhs: usize) -> Self::Output {
205 VirtAddr::new(self.0 + rhs as u64)
206 }
207}
208
209#[cfg(target_pointer_width = "64")]
210impl core::ops::AddAssign<usize> for VirtAddr {
212 #[inline]
213 fn add_assign(&mut self, rhs: usize) {
214 *self = *self + rhs;
215 }
216}
217
218#[cfg(target_pointer_width = "64")]
219impl core::ops::Sub<usize> for VirtAddr {
221 type Output = Self;
222 #[inline]
223 fn sub(self, rhs: usize) -> Self::Output {
224 VirtAddr::new(self.0.checked_sub(rhs as u64).unwrap())
225 }
226}
227
228#[cfg(target_pointer_width = "64")]
229impl core::ops::SubAssign<usize> for VirtAddr {
231 #[inline]
232 fn sub_assign(&mut self, rhs: usize) {
233 *self = *self - rhs;
234 }
235}
236
237#[cfg(feature = "conv-x86_64")]
238impl From<x86_64::VirtAddr> for VirtAddr {
239 fn from(addr: x86_64::VirtAddr) -> Self {
240 Self(addr.as_u64())
241 }
242}
243#[cfg(feature = "conv-x86_64")]
244impl From<&x86_64::VirtAddr> for VirtAddr {
245 fn from(addr: &x86_64::VirtAddr) -> Self {
246 Self(addr.as_u64())
247 }
248}
249
250#[cfg(feature = "conv-x86_64")]
251impl From<VirtAddr> for x86_64::VirtAddr {
252 fn from(addr: VirtAddr) -> x86_64::VirtAddr {
253 x86_64::VirtAddr::new(addr.0)
254 }
255}
256
257#[cfg(feature = "conv-x86_64")]
258impl From<&VirtAddr> for x86_64::VirtAddr {
259 fn from(addr: &VirtAddr) -> x86_64::VirtAddr {
260 x86_64::VirtAddr::new(addr.0)
261 }
262}
263
264#[cfg(feature = "conv-x86")]
265impl From<x86_VAddr> for VirtAddr {
266 fn from(addr: x86_VAddr) -> Self {
267 Self(addr.as_u64())
268 }
269}
270#[cfg(feature = "conv-x86")]
271impl From<&x86_VAddr> for VirtAddr {
272 fn from(addr: &x86_VAddr) -> Self {
273 Self(addr.as_u64())
274 }
275}
276
277#[cfg(feature = "conv-x86")]
278impl From<VirtAddr> for x86_VAddr {
279 fn from(addr: VirtAddr) -> x86_VAddr {
280 x86_VAddr(addr.0)
281 }
282}
283
284#[cfg(feature = "conv-x86")]
285impl From<&VirtAddr> for x86_VAddr {
286 fn from(addr: &VirtAddr) -> x86_VAddr {
287 x86_VAddr(addr.0)
288 }
289}
290
291pub struct PhysAddrNotValid(pub u64);
297
298impl core::fmt::Debug for PhysAddrNotValid {
299 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
300 f.debug_tuple("PhysAddrNotValid")
301 .field(&format_args!("{:#x}", self.0))
302 .finish()
303 }
304}
305
306impl PhysAddr {
307 #[inline]
313 pub const fn new(addr: u64) -> Self {
314 match Self::try_new(addr) {
316 Ok(p) => p,
317 Err(_) => panic!("physical addresses must not have any bits in the range 52 to 64 set"),
318 }
319 }
320
321 #[inline]
323 pub const fn new_truncate(addr: u64) -> PhysAddr {
324 PhysAddr(addr % (1 << 52))
325 }
326
327 #[inline]
331 pub const fn try_new(addr: u64) -> Result<Self, PhysAddrNotValid> {
332 let p = Self::new_truncate(addr);
333 if p.0 == addr {
334 Ok(p)
335 } else {
336 Err(PhysAddrNotValid(addr))
337 }
338 }
339
340 #[cfg(target_pointer_width = "64")]
341 pub const fn as_usize(&self) -> usize {
343 self.0 as usize
344 }
345}
346
347impl Align<u64> for PhysAddr {
348 #[inline]
349 fn align_down(self, align: u64) -> Self {
350 Self::new(self.as_u64().align_down(align))
351 }
352
353 #[inline]
354 fn align_up(self, align: u64) -> Self {
355 Self::new(self.as_u64().align_up(align))
356 }
357}
358
359#[cfg(target_pointer_width = "64")]
360impl From<usize> for PhysAddr {
362 fn from(addr: usize) -> PhysAddr {
363 Self::new_truncate(addr as u64)
364 }
365}
366
367#[cfg(target_pointer_width = "64")]
368impl core::ops::Add<usize> for PhysAddr {
370 type Output = Self;
371 #[inline]
372 fn add(self, rhs: usize) -> Self::Output {
373 PhysAddr::new(self.0 + rhs as u64)
374 }
375}
376
377#[cfg(target_pointer_width = "64")]
378impl core::ops::AddAssign<usize> for PhysAddr {
380 #[inline]
381 fn add_assign(&mut self, rhs: usize) {
382 *self = *self + rhs;
383 }
384}
385
386#[cfg(target_pointer_width = "64")]
387impl core::ops::Sub<usize> for PhysAddr {
389 type Output = Self;
390 #[inline]
391 fn sub(self, rhs: usize) -> Self::Output {
392 PhysAddr::new(self.0.checked_sub(rhs as u64).unwrap())
393 }
394}
395
396#[cfg(target_pointer_width = "64")]
397impl core::ops::SubAssign<usize> for PhysAddr {
399 #[inline]
400 fn sub_assign(&mut self, rhs: usize) {
401 *self = *self - rhs;
402 }
403}
404
405#[cfg(feature = "conv-x86_64")]
406impl From<x86_64::PhysAddr> for PhysAddr {
407 fn from(addr: x86_64::PhysAddr) -> Self {
408 Self(addr.as_u64())
409 }
410}
411#[cfg(feature = "conv-x86_64")]
412impl From<&x86_64::PhysAddr> for PhysAddr {
413 fn from(addr: &x86_64::PhysAddr) -> Self {
414 Self(addr.as_u64())
415 }
416}
417
418#[cfg(feature = "conv-x86_64")]
419impl From<PhysAddr> for x86_64::PhysAddr {
420 fn from(addr: PhysAddr) -> x86_64::PhysAddr {
421 x86_64::PhysAddr::new(addr.0)
422 }
423}
424
425#[cfg(feature = "conv-x86_64")]
426impl From<&PhysAddr> for x86_64::PhysAddr {
427 fn from(addr: &PhysAddr) -> x86_64::PhysAddr {
428 x86_64::PhysAddr::new(addr.0)
429 }
430}
431
432#[cfg(feature = "conv-x86")]
433impl From<x86_PAddr> for PhysAddr {
434 fn from(addr: x86_PAddr) -> Self {
435 Self(addr.as_u64())
436 }
437}
438#[cfg(feature = "conv-x86")]
439impl From<&x86_PAddr> for PhysAddr {
440 fn from(addr: &x86_PAddr) -> Self {
441 Self(addr.as_u64())
442 }
443}
444
445#[cfg(feature = "conv-x86")]
446impl From<PhysAddr> for x86_PAddr {
447 fn from(addr: PhysAddr) -> x86_PAddr {
448 x86_PAddr(addr.0)
449 }
450}
451
452#[cfg(feature = "conv-x86")]
453impl From<&PhysAddr> for x86_PAddr {
454 fn from(addr: &PhysAddr) -> x86_PAddr {
455 x86_PAddr(addr.0)
456 }
457}
458
459#[cfg(test)]
460mod tests {
461 use super::*;
462
463 #[test]
464 pub fn virtaddr_new_truncate() {
465 assert_eq!(VirtAddr::new_truncate(0), VirtAddr(0));
466 assert_eq!(VirtAddr::new_truncate(1 << 47), VirtAddr(0xfffff << 47));
467 assert_eq!(VirtAddr::new_truncate(123), VirtAddr(123));
468 assert_eq!(VirtAddr::new_truncate(123 << 47), VirtAddr(0xfffff << 47));
469 }
470
471 #[test]
472 fn test_virt_addr_align_up() {
473 assert_eq!(
475 VirtAddr::new(0x7fff_ffff_ffff).align_up(2u64),
476 VirtAddr::new(0xffff_8000_0000_0000)
477 );
478 }
479
480 #[test]
481 fn test_virt_addr_align_down() {
482 assert_eq!(
484 VirtAddr::new(0xffff_8000_0000_0000).align_down(1u64 << 48),
485 VirtAddr::new(0)
486 );
487 }
488
489 #[test]
490 #[should_panic]
491 fn test_virt_addr_align_up_overflow() {
492 VirtAddr::new(0xffff_ffff_ffff_ffff).align_up(2u64);
493 }
494
495 #[test]
496 #[should_panic]
497 fn test_phys_addr_align_up_overflow() {
498 PhysAddr::new(0x000f_ffff_ffff_ffff).align_up(2u64);
499 }
500
501 #[test]
502 fn test_from_ptr_array() {
503 let slice = &[1, 2, 3, 4, 5];
504 assert_eq!(VirtAddr::from_ptr(slice), VirtAddr::from_ptr(&slice[0]));
506 }
507}