1use core::alloc::AllocError;
2use core::fmt;
3
4use free_list::{FreeList, PageLayout, PageRange};
5use hermit_sync::InterruptTicketMutex;
6use memory_addresses::VirtAddr;
7
8use crate::mm::{PageRangeAllocator, PageRangeBox};
9
10static KERNEL_FREE_LIST: InterruptTicketMutex<FreeList<16>> =
11 InterruptTicketMutex::new(FreeList::new());
12
13pub struct PageAlloc;
14
15impl PageRangeAllocator for PageAlloc {
16 unsafe fn init() {
17 unsafe {
18 init();
19 }
20 }
21
22 fn allocate(layout: PageLayout) -> Result<PageRange, AllocError> {
23 KERNEL_FREE_LIST
24 .lock()
25 .allocate(layout)
26 .map_err(|_| AllocError)
27 }
28
29 fn allocate_at(range: PageRange) -> Result<(), AllocError> {
30 KERNEL_FREE_LIST
31 .lock()
32 .allocate_at(range)
33 .map_err(|_| AllocError)
34 }
35
36 unsafe fn deallocate(range: PageRange) {
37 unsafe {
38 KERNEL_FREE_LIST.lock().deallocate(range).unwrap();
39 }
40 }
41}
42
43impl fmt::Display for PageAlloc {
44 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
45 let free_list = KERNEL_FREE_LIST.lock();
46 write!(f, "PageAlloc free list:\n{free_list}")
47 }
48}
49
50pub type PageBox = PageRangeBox<PageAlloc>;
51
52unsafe fn init() {
53 let range = PageRange::new(
54 kernel_heap_end().as_usize().div_ceil(2),
55 kernel_heap_end().as_usize() + 1,
56 )
57 .unwrap();
58
59 unsafe {
60 PageAlloc::deallocate(range);
61 }
62}
63
64#[inline]
67pub fn kernel_heap_end() -> VirtAddr {
68 cfg_if::cfg_if! {
69 if #[cfg(target_arch = "aarch64")] {
70 VirtAddr::new(0xFFFF_FFFF_FFFF)
72 } else if #[cfg(target_arch = "riscv64")] {
73 VirtAddr::new(0x0040_0000_0000 - 1)
75 } else if #[cfg(target_arch = "x86_64")] {
76 use x86_64::structures::paging::PageTableIndex;
77
78 let p4_index = if cfg!(feature = "common-os") {
79 PageTableIndex::new(1)
80 } else {
81 PageTableIndex::new(256)
82 };
83
84 let addr = u64::from(p4_index) << 39;
85 assert_eq!(VirtAddr::new_truncate(addr).p4_index(), p4_index);
86
87 VirtAddr::new_truncate(addr - 1)
88 }
89 }
90}