hermit/arch/x86_64/mm/
physicalmem.rs1use core::sync::atomic::{AtomicUsize, Ordering};
2
3use free_list::{AllocError, FreeList, PageLayout, PageRange};
4use hermit_sync::InterruptTicketMutex;
5use memory_addresses::{PhysAddr, VirtAddr};
6use x86_64::structures::paging::frame::PhysFrameRangeInclusive;
7use x86_64::structures::paging::mapper::MapToError;
8use x86_64::structures::paging::{Mapper, PageTableFlags, PhysFrame, Size2MiB};
9
10use crate::arch::mm::paging::identity_mapped_page_table;
11use crate::arch::x86_64::mm::paging::{BasePageSize, PageSize};
12use crate::{env, mm};
13
14pub static PHYSICAL_FREE_LIST: InterruptTicketMutex<FreeList<16>> =
15 InterruptTicketMutex::new(FreeList::new());
16static TOTAL_MEMORY: AtomicUsize = AtomicUsize::new(0);
17
18unsafe fn init_frame_range(frame_range: PageRange) {
19 let frames = {
20 use x86_64::PhysAddr;
21
22 let start = u64::try_from(frame_range.start()).unwrap();
23 let end = u64::try_from(frame_range.end()).unwrap();
24
25 let start = PhysFrame::containing_address(PhysAddr::new(start));
26 let end = PhysFrame::containing_address(PhysAddr::new(end));
27
28 PhysFrameRangeInclusive::<Size2MiB> { start, end }
29 };
30
31 let mut physical_free_list = PHYSICAL_FREE_LIST.lock();
32
33 unsafe {
34 physical_free_list.deallocate(frame_range).unwrap();
35 }
36
37 let flags = PageTableFlags::PRESENT | PageTableFlags::WRITABLE;
38 for frame in frames {
39 let mapper_result = unsafe {
40 identity_mapped_page_table().identity_map(frame, flags, &mut *physical_free_list)
41 };
42
43 match mapper_result {
44 Ok(mapper_flush) => mapper_flush.flush(),
45 Err(MapToError::PageAlreadyMapped(current_frame)) => assert_eq!(current_frame, frame),
46 Err(err) => panic!("could not identity-map {frame:?}: {err:?}"),
47 }
48 }
49
50 TOTAL_MEMORY.fetch_add(frame_range.len().get(), Ordering::Relaxed);
51}
52
53fn detect_from_fdt() -> Result<(), ()> {
54 let fdt = env::fdt().ok_or(())?;
55
56 let all_regions = fdt
57 .find_all_nodes("/memory")
58 .map(|m| m.reg().unwrap().next().unwrap());
59
60 let mut found_ram = false;
61
62 if env::is_uefi() {
63 let biggest_region = all_regions.max_by_key(|m| m.size.unwrap()).unwrap();
64 found_ram = true;
65
66 let range = PageRange::from_start_len(
67 biggest_region.starting_address.addr(),
68 biggest_region.size.unwrap(),
69 )
70 .unwrap();
71
72 unsafe {
73 init_frame_range(range);
74 }
75 } else {
76 for m in all_regions {
77 let start_address = m.starting_address as u64;
78 let size = m.size.unwrap() as u64;
79 let end_address = start_address + size;
80
81 if end_address <= mm::kernel_end_address().as_u64() {
82 continue;
83 }
84
85 found_ram = true;
86
87 let start_address = if start_address <= mm::kernel_start_address().as_u64() {
88 mm::kernel_end_address()
89 } else {
90 VirtAddr::new(start_address)
91 };
92
93 let range = PageRange::new(start_address.as_usize(), end_address as usize).unwrap();
94 unsafe {
95 init_frame_range(range);
96 }
97 }
98 }
99
100 if found_ram { Ok(()) } else { Err(()) }
101}
102
103pub fn init() {
104 detect_from_fdt().unwrap();
105}
106
107pub fn total_memory_size() -> usize {
108 TOTAL_MEMORY.load(Ordering::Relaxed)
109}
110
111pub fn allocate(size: usize) -> Result<PhysAddr, AllocError> {
112 assert!(size > 0);
113 assert_eq!(
114 size % BasePageSize::SIZE as usize,
115 0,
116 "Size {:#X} is not a multiple of {:#X}",
117 size,
118 BasePageSize::SIZE
119 );
120
121 let layout = PageLayout::from_size(size).unwrap();
122
123 Ok(PhysAddr::new(
124 PHYSICAL_FREE_LIST
125 .lock()
126 .allocate(layout)?
127 .start()
128 .try_into()
129 .unwrap(),
130 ))
131}
132
133pub fn allocate_aligned(size: usize, align: usize) -> Result<PhysAddr, AllocError> {
134 assert!(size > 0);
135 assert!(align > 0);
136 assert_eq!(
137 size % align,
138 0,
139 "Size {size:#X} is not a multiple of the given alignment {align:#X}"
140 );
141 assert_eq!(
142 align % BasePageSize::SIZE as usize,
143 0,
144 "Alignment {:#X} is not a multiple of {:#X}",
145 align,
146 BasePageSize::SIZE
147 );
148
149 let layout = PageLayout::from_size_align(size, align).unwrap();
150
151 Ok(PhysAddr::new(
152 PHYSICAL_FREE_LIST
153 .lock()
154 .allocate(layout)?
155 .start()
156 .try_into()
157 .unwrap(),
158 ))
159}
160
161pub fn deallocate(physical_address: PhysAddr, size: usize) {
164 assert!(size > 0);
165 assert_eq!(
166 size % BasePageSize::SIZE as usize,
167 0,
168 "Size {:#X} is not a multiple of {:#X}",
169 size,
170 BasePageSize::SIZE
171 );
172
173 let range = PageRange::from_start_len(physical_address.as_u64() as usize, size).unwrap();
174 if let Err(_err) = unsafe { PHYSICAL_FREE_LIST.lock().deallocate(range) } {
175 error!("Unable to deallocate {range:?}");
176 }
177}
178
179#[allow(dead_code)]
180#[cfg(not(feature = "pci"))]
181pub fn reserve(physical_address: PhysAddr, size: usize) {
182 use align_address::Align;
183 assert!(
184 physical_address.is_aligned_to(BasePageSize::SIZE),
185 "Physical address {:p} is not a multiple of {:#X}",
186 physical_address,
187 BasePageSize::SIZE
188 );
189 assert!(size > 0);
190 assert_eq!(
191 size % BasePageSize::SIZE as usize,
192 0,
193 "Size {:#X} is not a multiple of {:#X}",
194 size,
195 BasePageSize::SIZE
196 );
197
198 let range = PageRange::from_start_len(physical_address.as_usize(), size).unwrap();
199
200 PHYSICAL_FREE_LIST.lock().allocate_at(range).ok();
202}
203
204pub fn print_information() {
205 let free_list = PHYSICAL_FREE_LIST.lock();
206 info!("Physical memory free list:\n{free_list}");
207}