talc/oom_handler.rs
1use core::alloc::Layout;
2
3use crate::{Span, Talc};
4
5pub trait OomHandler: Sized {
6 /// Given the allocator and the `layout` of the allocation that caused
7 /// OOM, resize or claim and return `Ok(())` or fail by returning `Err(())`.
8 ///
9 /// This function is called repeatedly if the allocator is still out of memory.
10 /// Therefore an infinite loop will occur if `Ok(())` is repeatedly returned
11 /// without extending or claiming new memory.
12 fn handle_oom(talc: &mut Talc<Self>, layout: Layout) -> Result<(), ()>;
13}
14
15/// Doesn't handle out-of-memory conditions, immediate allocation error occurs.
16pub struct ErrOnOom;
17
18impl OomHandler for ErrOnOom {
19 fn handle_oom(_: &mut Talc<Self>, _: Layout) -> Result<(), ()> {
20 Err(())
21 }
22}
23
24/// An out-of-memory handler that attempts to claim the
25/// memory within the given [`Span`] upon OOM.
26///
27/// The contained span is then overwritten with an empty span.
28///
29/// If the span is empty or `claim` fails, allocation failure occurs.
30pub struct ClaimOnOom(Span);
31
32impl ClaimOnOom {
33 /// # Safety
34 /// The memory within the given [`Span`] must conform to
35 /// the requirements laid out by [`claim`](Talc::claim).
36 pub const unsafe fn new(span: Span) -> Self {
37 ClaimOnOom(span)
38 }
39}
40
41impl OomHandler for ClaimOnOom {
42 fn handle_oom(talc: &mut Talc<Self>, _: Layout) -> Result<(), ()> {
43 if !talc.oom_handler.0.is_empty() {
44 unsafe {
45 talc.claim(talc.oom_handler.0)?;
46 }
47
48 talc.oom_handler.0 = Span::empty();
49
50 Ok(())
51 } else {
52 Err(())
53 }
54 }
55}
56
57#[cfg(all(target_family = "wasm", feature = "lock_api"))]
58pub struct WasmHandler {
59 prev_heap: Span,
60}
61
62#[cfg(all(target_family = "wasm", feature = "lock_api"))]
63unsafe impl Send for WasmHandler {}
64
65#[cfg(all(target_family = "wasm", feature = "lock_api"))]
66impl WasmHandler {
67 /// Create a new WASM handler.
68 /// # Safety
69 /// [`WasmHandler`] expects to have full control over WASM memory
70 /// and be running in a single-threaded environment.
71 pub const unsafe fn new() -> Self {
72 Self { prev_heap: Span::empty() }
73 }
74}
75
76#[cfg(all(target_family = "wasm", feature = "lock_api"))]
77impl OomHandler for WasmHandler {
78 fn handle_oom(talc: &mut Talc<Self>, layout: Layout) -> Result<(), ()> {
79 /// WASM page size is 64KiB
80 const PAGE_SIZE: usize = 1024 * 64;
81
82 // growth strategy: just try to grow enough to avoid OOM again on this allocation
83 let required = (layout.size() + 8).max(layout.align() * 2);
84 let mut delta_pages = (required + (PAGE_SIZE - 1)) / PAGE_SIZE;
85
86 let prev = 'prev: {
87 // This performs a scan, trying to find a smaller possible
88 // growth if the previous one was unsuccessful. Return
89 // any successful allocated to memory.
90 // If not quite enough, talc will invoke handle_oom again.
91
92 // if we're about to fail because of allocation failure
93 // we may as well try as hard as we can to probe what's permissable
94 // which can be done with a log2(n)-ish algorithm
95 // (factoring in repeated called to handle_oom)
96 while delta_pages != 0 {
97 // use `core::arch::wasm` instead once it doesn't
98 // require the unstable feature wasm_simd64?
99 let result = core::arch::wasm32::memory_grow::<0>(delta_pages);
100
101 if result != usize::MAX {
102 break 'prev result;
103 } else {
104 delta_pages >>= 1;
105 continue;
106 }
107 }
108
109 return Err(());
110 };
111
112 let prev_heap_acme = (prev * PAGE_SIZE) as *mut u8;
113 let new_heap_acme = prev_heap_acme.wrapping_add(delta_pages * PAGE_SIZE);
114
115 // try to get base & acme, which will fail if prev_heap is empty
116 // otherwise the allocator has been initialized previously
117 if let Some((prev_base, prev_acme)) = talc.oom_handler.prev_heap.get_base_acme() {
118 if prev_acme == prev_heap_acme {
119 talc.oom_handler.prev_heap = unsafe {
120 talc.extend(talc.oom_handler.prev_heap, Span::new(prev_base, new_heap_acme))
121 };
122
123 return Ok(());
124 }
125 }
126
127 talc.oom_handler.prev_heap = unsafe {
128 // delta_pages is always greater than zero
129 // thus one page is enough space for metadata
130 // therefore we can unwrap the result
131 talc.claim(Span::new(prev_heap_acme, new_heap_acme)).unwrap()
132 };
133
134 Ok(())
135 }
136}