1mod llist;
2mod tag;
3
4#[cfg(feature = "counters")]
5pub mod counters;
6
7use crate::{ptr_utils::*, OomHandler, Span};
8use core::{
9 alloc::Layout,
10 ptr::{null_mut, NonNull},
11};
12use llist::LlistNode;
13use tag::Tag;
14
15const NODE_SIZE: usize = core::mem::size_of::<LlistNode>();
16const TAG_SIZE: usize = core::mem::size_of::<Tag>();
17
18const MIN_TAG_OFFSET: usize = NODE_SIZE;
19const MIN_CHUNK_SIZE: usize = MIN_TAG_OFFSET + TAG_SIZE;
20const MIN_HEAP_SIZE: usize = MIN_CHUNK_SIZE + TAG_SIZE;
21
22const BIN_COUNT: usize = usize::BITS as usize * 2;
23
24type Bin = Option<NonNull<LlistNode>>;
25
26const GAP_NODE_OFFSET: usize = 0;
37const GAP_LOW_SIZE_OFFSET: usize = NODE_SIZE;
38const GAP_HIGH_SIZE_OFFSET: usize = WORD_SIZE;
39
40#[inline]
42unsafe fn gap_base_to_node(base: *mut u8) -> *mut LlistNode {
43 base.add(GAP_NODE_OFFSET).cast()
44}
45#[inline]
46unsafe fn gap_base_to_size(base: *mut u8) -> *mut usize {
47 base.add(GAP_LOW_SIZE_OFFSET).cast()
48}
49#[inline]
50unsafe fn gap_base_to_acme(base: *mut u8) -> *mut u8 {
51 gap_base_to_acme_size(base).0
52}
53#[inline]
54unsafe fn gap_base_to_acme_size(base: *mut u8) -> (*mut u8, usize) {
55 let size = gap_base_to_size(base).read();
56 (base.add(size), size)
57}
58#[inline]
59unsafe fn gap_acme_to_size(acme: *mut u8) -> *mut usize {
60 acme.sub(GAP_HIGH_SIZE_OFFSET).cast()
61}
62#[inline]
63unsafe fn gap_acme_to_base(acme: *mut u8) -> *mut u8 {
64 gap_acme_to_base_size(acme).0
65}
66#[inline]
67unsafe fn gap_acme_to_base_size(acme: *mut u8) -> (*mut u8, usize) {
68 let size = gap_acme_to_size(acme).read();
69 (acme.sub(size), size)
70}
71#[inline]
72unsafe fn gap_node_to_base(node: NonNull<LlistNode>) -> *mut u8 {
73 node.as_ptr().cast::<u8>().sub(GAP_NODE_OFFSET).cast()
74}
75#[inline]
76unsafe fn gap_node_to_size(node: NonNull<LlistNode>) -> *mut usize {
77 node.as_ptr().cast::<u8>().sub(GAP_NODE_OFFSET).add(GAP_LOW_SIZE_OFFSET).cast()
78}
79#[inline]
80unsafe fn is_gap_below(acme: *mut u8) -> bool {
81 gap_acme_to_size(acme).read() & Tag::ALLOCATED_FLAG == 0
83}
84#[inline]
85unsafe fn is_gap_above_heap_base(heap_base: *mut u8) -> bool {
86 heap_base.cast::<Tag>().read().is_above_free()
88}
89
90#[inline]
92unsafe fn tag_from_alloc_ptr(ptr: *mut u8, size: usize) -> (*mut u8, Tag) {
93 let post_alloc_ptr = align_up(ptr.add(size));
94 let tag_or_tag_ptr = post_alloc_ptr.cast::<*mut u8>().read();
96
97 if tag_or_tag_ptr > post_alloc_ptr {
101 (tag_or_tag_ptr, tag_or_tag_ptr.cast::<Tag>().read())
102 } else {
103 (post_alloc_ptr, Tag(tag_or_tag_ptr))
104 }
105}
106
107#[inline]
109fn is_chunk_size(base: *mut u8, acme: *mut u8) -> bool {
110 debug_assert!(acme >= base, "!(acme {:p} >= base {:p})", acme, base);
111 acme as usize - base as usize >= MIN_CHUNK_SIZE
112}
113
114#[inline]
116unsafe fn bin_of_size(size: usize) -> usize {
117 const ERRMSG: &str = "Unsupported system word size, open an issue/create a PR!";
129
130 const WORD_BIN_LIMIT: usize = match WORD_SIZE {
132 8 => 256,
133 4 => 64,
134 _ => panic!("{}", ERRMSG),
135 };
136 const DOUBLE_BIN_LIMIT: usize = match WORD_SIZE {
138 8 => 512,
139 4 => 128,
140 _ => panic!("{}", ERRMSG),
141 };
142 const DIVS_PER_POW2: usize = match WORD_SIZE {
144 8 => 4,
145 4 => 2,
146 _ => panic!("{}", ERRMSG),
147 };
148 const DIV_BITS: usize = DIVS_PER_POW2.ilog2() as usize;
150
151 const DBL_BUCKET: usize = (WORD_BIN_LIMIT - MIN_CHUNK_SIZE) / WORD_SIZE;
153 const EXP_BUCKET: usize = DBL_BUCKET + (DOUBLE_BIN_LIMIT - WORD_BIN_LIMIT) / WORD_SIZE / 2;
155 const MIN_EXP_BITS_LESS_ONE: usize = DOUBLE_BIN_LIMIT.ilog2() as usize;
157
158 debug_assert!(size >= MIN_CHUNK_SIZE);
159
160 if size < WORD_BIN_LIMIT {
161 (size - MIN_CHUNK_SIZE) / WORD_SIZE
164 } else if size < DOUBLE_BIN_LIMIT {
165 size / (2 * WORD_SIZE) - WORD_BIN_LIMIT / (2 * WORD_SIZE) + DBL_BUCKET
170 } else {
171 let bits_less_one = size.ilog2() as usize;
179
180 let magnitude = bits_less_one - MIN_EXP_BITS_LESS_ONE;
183 let division = (size >> (bits_less_one - DIV_BITS)) - DIVS_PER_POW2;
186 let bucket_offset = magnitude * DIVS_PER_POW2 + division;
188
189 (bucket_offset + EXP_BUCKET).min(BIN_COUNT - 1)
191 }
192}
193
194pub struct Talc<O: OomHandler> {
204 availability_low: usize,
206 availability_high: usize,
208 bins: *mut Bin,
210
211 pub oom_handler: O,
215
216 #[cfg(feature = "counters")]
217 counters: counters::Counters,
219}
220
221unsafe impl<O: Send + OomHandler> Send for Talc<O> {}
222
223impl<O: OomHandler> core::fmt::Debug for Talc<O> {
224 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
225 f.debug_struct("Talc")
226 .field("availability_low", &format_args!("{:x}", self.availability_low))
227 .field("availability_high", &format_args!("{:x}", self.availability_high))
228 .field("metadata_ptr", &self.bins)
229 .finish()
230 }
231}
232
233impl<O: OomHandler> Talc<O> {
234 #[inline]
235 const fn required_chunk_size(size: usize) -> usize {
236 if size <= MIN_CHUNK_SIZE - TAG_SIZE {
237 MIN_CHUNK_SIZE
238 } else {
239 (size + TAG_SIZE + (ALIGN - 1)) & !(ALIGN - 1)
240 }
241 }
242
243 #[inline]
247 unsafe fn get_bin_ptr(&self, bin: usize) -> *mut Bin {
248 debug_assert!(bin < BIN_COUNT);
249
250 self.bins.add(bin)
251 }
252
253 #[inline]
257 fn set_avails(&mut self, b: usize) {
258 debug_assert!(b < BIN_COUNT);
259
260 if b < WORD_BITS {
261 debug_assert!(self.availability_low & 1 << b == 0);
262 self.availability_low ^= 1 << b;
263 } else {
264 debug_assert!(self.availability_high & 1 << (b - WORD_BITS) == 0);
265 self.availability_high ^= 1 << (b - WORD_BITS);
266 }
267 }
268 #[inline]
272 fn clear_avails(&mut self, b: usize) {
273 debug_assert!(b < BIN_COUNT);
274
275 if b < WORD_BITS {
277 self.availability_low ^= 1 << b;
278 debug_assert!(self.availability_low & 1 << b == 0);
279 } else {
280 self.availability_high ^= 1 << (b - WORD_BITS);
281 debug_assert!(self.availability_high & 1 << (b - WORD_BITS) == 0);
282 }
283 }
284
285 #[inline]
287 unsafe fn register_gap(&mut self, base: *mut u8, acme: *mut u8) {
288 debug_assert!(is_chunk_size(base, acme));
289
290 let size = acme as usize - base as usize;
291 let bin = bin_of_size(size);
292
293 let bin_ptr = self.get_bin_ptr(bin);
294
295 if (*bin_ptr).is_none() {
296 self.set_avails(bin);
297 }
298
299 LlistNode::insert(gap_base_to_node(base), bin_ptr, *bin_ptr);
300
301 debug_assert!((*bin_ptr).is_some());
302
303 gap_base_to_size(base).write(size);
304 gap_acme_to_size(acme).write(size);
305
306 #[cfg(feature = "counters")]
307 self.counters.account_register_gap(size);
308 }
309
310 #[inline]
312 unsafe fn deregister_gap(&mut self, base: *mut u8, bin: usize) {
313 debug_assert!((*self.get_bin_ptr(bin)).is_some());
314 #[cfg(feature = "counters")]
315 self.counters.account_deregister_gap(gap_base_to_size(base).read());
316
317 LlistNode::remove(gap_base_to_node(base));
318
319 if (*self.get_bin_ptr(bin)).is_none() {
320 self.clear_avails(bin);
321 }
322 }
323
324 pub unsafe fn malloc(&mut self, layout: Layout) -> Result<NonNull<u8>, ()> {
328 debug_assert!(layout.size() != 0);
329 self.scan_for_errors();
330
331 let (mut free_base, free_acme, alloc_base) = loop {
332 match self.get_sufficient_chunk(layout) {
334 Some(payload) => break payload,
335 None => _ = O::handle_oom(self, layout)?,
336 }
337 };
338
339 let chunk_base_ceil = alloc_base.min(free_acme.sub(MIN_CHUNK_SIZE));
342 if is_chunk_size(free_base, chunk_base_ceil) {
343 self.register_gap(free_base, chunk_base_ceil);
344 free_base = chunk_base_ceil;
345 } else {
346 Tag::clear_above_free(free_base.sub(TAG_SIZE).cast());
347 }
348
349 let post_alloc_ptr = align_up(alloc_base.add(layout.size()));
351 let mut tag_ptr = free_base.add(MIN_TAG_OFFSET).max(post_alloc_ptr);
353 let min_alloc_chunk_acme = tag_ptr.add(TAG_SIZE);
355
356 if is_chunk_size(min_alloc_chunk_acme, free_acme) {
358 self.register_gap(min_alloc_chunk_acme, free_acme);
359 Tag::write(tag_ptr.cast(), free_base, true);
360 } else {
361 tag_ptr = free_acme.sub(TAG_SIZE);
362 Tag::write(tag_ptr.cast(), free_base, false);
363 }
364
365 if tag_ptr != post_alloc_ptr {
366 post_alloc_ptr.cast::<*mut u8>().write(tag_ptr);
368 }
369
370 #[cfg(feature = "counters")]
371 self.counters.account_alloc(layout.size());
372
373 Ok(NonNull::new_unchecked(alloc_base))
374 }
375
376 unsafe fn get_sufficient_chunk(
378 &mut self,
379 layout: Layout,
380 ) -> Option<(*mut u8, *mut u8, *mut u8)> {
381 let required_chunk_size = Self::required_chunk_size(layout.size());
382
383 let mut bin = self.next_available_bin(bin_of_size(required_chunk_size))?;
385
386 if layout.align() <= ALIGN {
387 loop {
390 for node_ptr in LlistNode::iter_mut(*self.get_bin_ptr(bin)) {
391 let size = gap_node_to_size(node_ptr).read();
392
393 if size >= required_chunk_size {
395 let base = gap_node_to_base(node_ptr);
396 self.deregister_gap(base, bin);
397 return Some((base, base.add(size), base));
398 }
399 }
400
401 bin = self.next_available_bin(bin + 1)?;
402 }
403 } else {
404 let align_mask = layout.align() - 1;
407 let required_size = layout.size() + TAG_SIZE;
408
409 loop {
410 for node_ptr in LlistNode::iter_mut(*self.get_bin_ptr(bin)) {
411 let size = gap_node_to_size(node_ptr).read();
412
413 if size >= required_chunk_size {
414 let base = gap_node_to_base(node_ptr);
415 let acme = base.add(size);
416 let aligned_ptr = align_up_by(base, align_mask);
418
419 if aligned_ptr.add(required_size) <= acme {
421 self.deregister_gap(base, bin);
422 return Some((base, acme, aligned_ptr));
423 }
424 }
425 }
426
427 bin = self.next_available_bin(bin + 1)?;
428 }
429 }
430 }
431
432 #[inline(always)]
433 fn next_available_bin(&self, next_bin: usize) -> Option<usize> {
434 if next_bin < usize::BITS as usize {
435 let shifted_avails = self.availability_low >> next_bin;
437
438 if shifted_avails != 0 {
440 Some(next_bin + shifted_avails.trailing_zeros() as usize)
441 } else if self.availability_high != 0 {
442 Some(self.availability_high.trailing_zeros() as usize + WORD_BITS)
443 } else {
444 None
445 }
446 } else if next_bin < BIN_COUNT {
447 let shifted_avails = self.availability_high >> (next_bin - WORD_BITS);
449
450 if shifted_avails != 0 {
451 Some(next_bin + shifted_avails.trailing_zeros() as usize)
452 } else {
453 return None;
454 }
455 } else {
456 None
457 }
458 }
459
460 pub unsafe fn free(&mut self, ptr: NonNull<u8>, layout: Layout) {
464 self.scan_for_errors();
465 #[cfg(feature = "counters")]
466 self.counters.account_dealloc(layout.size());
467
468 let (tag_ptr, tag) = tag_from_alloc_ptr(ptr.as_ptr(), layout.size());
469 let mut chunk_base = tag.chunk_base();
470 let mut chunk_acme = tag_ptr.add(TAG_SIZE);
471
472 debug_assert!(tag.is_allocated());
473 debug_assert!(is_chunk_size(chunk_base, chunk_acme));
474
475 if is_gap_below(chunk_base) {
477 let (below_base, below_size) = gap_acme_to_base_size(chunk_base);
478 self.deregister_gap(below_base, bin_of_size(below_size));
479
480 chunk_base = below_base;
481 } else {
482 Tag::set_above_free(chunk_base.sub(TAG_SIZE).cast());
483 }
484
485 if tag.is_above_free() {
487 let above_size = gap_base_to_size(chunk_acme).read();
488 self.deregister_gap(chunk_acme, bin_of_size(above_size));
489
490 chunk_acme = chunk_acme.add(above_size);
491 }
492
493 self.register_gap(chunk_base, chunk_acme);
495 }
496
497 pub unsafe fn grow(
502 &mut self,
503 ptr: NonNull<u8>,
504 old_layout: Layout,
505 new_size: usize,
506 ) -> Result<NonNull<u8>, ()> {
507 match self.grow_in_place(ptr, old_layout, new_size) {
508 Err(_) => {
509 let new_layout = Layout::from_size_align_unchecked(new_size, old_layout.align());
511 let allocation = self.malloc(new_layout)?;
512 allocation.as_ptr().copy_from_nonoverlapping(ptr.as_ptr(), old_layout.size());
513 self.free(ptr, old_layout);
514
515 Ok(allocation)
516 }
517 res => res,
518 }
519 }
520
521 pub unsafe fn grow_in_place(
529 &mut self,
530 ptr: NonNull<u8>,
531 old_layout: Layout,
532 new_size: usize,
533 ) -> Result<NonNull<u8>, ()> {
534 debug_assert!(new_size >= old_layout.size());
535 self.scan_for_errors();
536
537 let old_post_alloc_ptr = align_up(ptr.as_ptr().add(old_layout.size()));
538 let new_post_alloc_ptr = align_up(ptr.as_ptr().add(new_size));
539
540 if old_post_alloc_ptr == new_post_alloc_ptr {
541 #[cfg(feature = "counters")]
547 self.counters.account_grow_in_place(old_layout.size(), new_size);
548
549 return Ok(ptr);
550 }
551
552 let (tag_ptr, tag) = tag_from_alloc_ptr(ptr.as_ptr(), old_layout.size());
553
554 if new_post_alloc_ptr <= tag_ptr {
556 if new_post_alloc_ptr < tag_ptr {
557 new_post_alloc_ptr.cast::<*mut u8>().write(tag_ptr);
558 }
559
560 #[cfg(feature = "counters")]
561 self.counters.account_grow_in_place(old_layout.size(), new_size);
562
563 return Ok(ptr);
564 }
565
566 let new_tag_ptr = new_post_alloc_ptr;
567
568 let base = tag.chunk_base();
569 let acme = tag_ptr.add(TAG_SIZE);
570
571 debug_assert!(tag.is_allocated());
572 debug_assert!(is_chunk_size(base, acme));
573
574 if tag.is_above_free() {
577 let above_size = gap_base_to_size(acme).read();
578 let above_tag_ptr = tag_ptr.add(above_size);
579
580 if new_tag_ptr <= above_tag_ptr {
581 self.deregister_gap(acme, bin_of_size(above_size));
582
583 if is_chunk_size(new_tag_ptr, above_tag_ptr) {
586 self.register_gap(new_tag_ptr.add(TAG_SIZE), above_tag_ptr.add(TAG_SIZE));
587 Tag::write(new_tag_ptr.cast(), base, true);
588 } else {
589 Tag::write(above_tag_ptr.cast(), base, false);
590
591 if new_post_alloc_ptr != above_tag_ptr {
592 new_post_alloc_ptr.cast::<*mut u8>().write(above_tag_ptr);
593 }
594 }
595
596 #[cfg(feature = "counters")]
597 self.counters.account_grow_in_place(old_layout.size(), new_size);
598
599 return Ok(ptr);
600 }
601 }
602
603 Err(())
604 }
605
606 pub unsafe fn shrink(&mut self, ptr: NonNull<u8>, layout: Layout, new_size: usize) {
616 debug_assert!(new_size != 0);
617 debug_assert!(new_size <= layout.size());
618 self.scan_for_errors();
619
620 let (tag_ptr, tag) = tag_from_alloc_ptr(ptr.as_ptr(), layout.size());
621 let chunk_base = tag.chunk_base();
622
623 debug_assert!(tag.is_allocated());
624 debug_assert!(is_chunk_size(chunk_base, tag_ptr.add(TAG_SIZE)));
625
626 let new_post_alloc_ptr = align_up(ptr.as_ptr().add(new_size));
628 let mut new_tag_ptr = chunk_base.add(MIN_TAG_OFFSET).max(new_post_alloc_ptr);
630
631 if is_chunk_size(new_tag_ptr, tag_ptr) {
634 let mut acme = tag_ptr.add(TAG_SIZE);
635 let new_acme = new_tag_ptr.add(TAG_SIZE);
636
637 if tag.is_above_free() {
638 let above_size = gap_base_to_size(acme).read();
639 self.deregister_gap(acme, bin_of_size(above_size));
640
641 acme = acme.add(above_size);
642 }
643
644 self.register_gap(new_acme, acme);
645 Tag::write(new_tag_ptr.cast(), chunk_base, true);
646 } else {
647 new_tag_ptr = tag_ptr;
648 }
649
650 if new_tag_ptr != new_post_alloc_ptr {
651 new_post_alloc_ptr.cast::<*mut u8>().write(new_tag_ptr);
652 }
653
654 #[cfg(feature = "counters")]
655 self.counters.account_shrink_in_place(layout.size(), new_size);
656 }
657
658 pub const fn new(oom_handler: O) -> Self {
664 Self {
665 oom_handler,
666 availability_low: 0,
667 availability_high: 0,
668 bins: null_mut(),
669
670 #[cfg(feature = "counters")]
671 counters: counters::Counters::new(),
672 }
673 }
674
675 pub unsafe fn get_allocated_span(&self, heap: Span) -> Span {
679 assert!(heap.size() >= MIN_HEAP_SIZE);
680
681 let (mut base, mut acme) = heap.get_base_acme().unwrap();
682
683 if is_gap_below(acme) {
685 acme = gap_acme_to_base(acme);
686 }
687
688 if is_gap_above_heap_base(base) {
690 base = gap_base_to_acme(base.add(TAG_SIZE)).sub(TAG_SIZE);
691 }
692
693 Span::new(base, acme)
696 }
697
698 pub unsafe fn claim(&mut self, memory: Span) -> Result<Span, ()> {
724 self.scan_for_errors();
725
726 const BIN_ARRAY_SIZE: usize = core::mem::size_of::<Bin>() * BIN_COUNT;
727
728 assert!(!memory.contains(null_mut()), "heap covers the null address!");
733
734 let aligned_heap = memory.word_align_inward();
735
736 if let Some((base, acme)) = aligned_heap.get_base_acme() {
738 if !self.bins.is_null() {
740 if acme as usize - base as usize >= MIN_HEAP_SIZE {
742 Tag::write(base.cast(), null_mut(), true);
744
745 let chunk_base = base.wrapping_add(TAG_SIZE);
747 self.register_gap(chunk_base, acme);
748
749 self.scan_for_errors();
750
751 #[cfg(feature = "counters")]
752 self.counters.account_claim(aligned_heap.size());
753
754 return Ok(aligned_heap);
755 }
756 } else {
757 if acme as usize - base as usize >= TAG_SIZE + BIN_ARRAY_SIZE + TAG_SIZE {
759 Tag::write(base.cast(), null_mut(), false);
760
761 let metadata_ptr = base.add(TAG_SIZE);
763 let post_metadata_ptr = metadata_ptr.add(BIN_ARRAY_SIZE);
765
766 for i in 0..BIN_COUNT {
768 let bin_ptr = metadata_ptr.cast::<Bin>().add(i);
769 bin_ptr.write(None);
770 }
771
772 self.bins = metadata_ptr.cast::<Bin>();
773
774 let metadata_chunk_acme = post_metadata_ptr.add(TAG_SIZE);
777 if is_chunk_size(metadata_chunk_acme, acme) {
778 self.register_gap(metadata_chunk_acme, acme);
779 Tag::write(post_metadata_ptr.cast(), base, true);
780 } else {
781 let tag_ptr = acme.sub(TAG_SIZE).cast::<Tag>();
782
783 if tag_ptr != post_metadata_ptr.cast() {
784 post_metadata_ptr.cast::<*mut Tag>().write(tag_ptr);
785 }
786 Tag::write(tag_ptr, base, false);
787 }
788
789 self.scan_for_errors();
790
791 #[cfg(feature = "counters")]
792 self.counters.account_claim(aligned_heap.size());
793
794 return Ok(aligned_heap);
795 }
796 }
797 }
798
799 Err(())
802 }
803
804 pub unsafe fn extend(&mut self, old_heap: Span, req_heap: Span) -> Span {
834 assert!(!self.bins.is_null());
835 assert!(old_heap.size() >= MIN_HEAP_SIZE);
836 assert!(req_heap.contains_span(old_heap), "new_heap must contain old_heap");
837 assert!(!req_heap.contains(null_mut()), "new_heap covers the null address!");
838
839 self.scan_for_errors();
840
841 let (old_base, old_acme) = old_heap.word_align_inward().get_base_acme().unwrap();
842 let (new_base, new_acme) = req_heap.word_align_inward().get_base_acme().unwrap();
843 let new_chunk_base = new_base.add(TAG_SIZE);
844 let mut ret_base = new_base;
845 let mut ret_acme = new_acme;
846
847 if is_gap_below(old_acme) {
850 let (top_base, top_size) = gap_acme_to_base_size(old_acme);
851 self.deregister_gap(top_base, bin_of_size(top_size));
852 self.register_gap(top_base, new_acme);
853 } else if is_chunk_size(old_acme, new_acme) {
854 self.register_gap(old_acme, new_acme);
855 Tag::set_above_free(old_acme.sub(TAG_SIZE).cast());
856 } else {
857 ret_acme = old_acme;
858 }
859
860 if is_gap_above_heap_base(old_base) {
862 let bottom_base = old_base.add(TAG_SIZE);
863 let bottom_size = gap_base_to_size(bottom_base).read();
864 self.deregister_gap(bottom_base, bin_of_size(bottom_size));
865 self.register_gap(new_chunk_base, bottom_base.add(bottom_size));
866 Tag::write(new_base.cast(), null_mut(), true);
867 } else if is_chunk_size(new_base, old_base) {
868 self.register_gap(new_base.add(TAG_SIZE), old_base.add(TAG_SIZE));
869 Tag::write(new_base.cast(), null_mut(), true);
870 } else {
871 ret_base = old_base;
872 }
873
874 let ret_heap = Span::new(ret_base, ret_acme);
875
876 #[cfg(feature = "counters")]
877 self.counters.account_extend(old_heap.size(), ret_heap.size());
878
879 ret_heap
880 }
881
882 pub unsafe fn truncate(&mut self, old_heap: Span, req_heap: Span) -> Span {
927 assert!(!self.bins.is_null(), "no heaps have been successfully established!");
928
929 self.scan_for_errors();
930
931 let new_heap = req_heap.word_align_inward();
932
933 assert!(old_heap.contains_span(new_heap), "the old_heap must contain new_heap!");
935 assert!(
936 new_heap.contains_span(unsafe { self.get_allocated_span(old_heap) }),
937 "new_heap must contain all the heap's allocated memory! see `get_allocated_span`"
938 );
939
940 let (old_base, old_acme) = old_heap.get_base_acme().unwrap();
941 let old_chunk_base = old_base.add(TAG_SIZE);
942
943 if new_heap.size() < MIN_HEAP_SIZE {
945 self.deregister_gap(
946 old_chunk_base,
947 bin_of_size(old_acme as usize - old_chunk_base as usize),
948 );
949
950 #[cfg(feature = "counters")]
951 self.counters.account_truncate(old_heap.size(), 0);
952
953 return Span::empty();
954 }
955
956 let (new_base, new_acme) = new_heap.get_base_acme().unwrap();
957 let new_chunk_base = new_base.add(TAG_SIZE);
958 let mut ret_base = new_base;
959 let mut ret_acme = new_acme;
960
961 if new_acme < old_acme {
963 let (top_base, top_size) = gap_acme_to_base_size(old_acme);
964 self.deregister_gap(top_base, bin_of_size(top_size));
965
966 if is_chunk_size(top_base, new_acme) {
967 self.register_gap(top_base, new_acme);
968 } else {
969 ret_acme = top_base;
970 Tag::clear_above_free(top_base.sub(TAG_SIZE).cast());
971 }
972 }
973
974 if old_base < new_base {
979 debug_assert!(is_gap_above_heap_base(old_base));
980
981 let (bottom_acme, bottom_size) = gap_base_to_acme_size(old_chunk_base);
982 self.deregister_gap(old_chunk_base, bin_of_size(bottom_size));
983
984 if is_chunk_size(new_chunk_base, bottom_acme) {
985 self.register_gap(new_chunk_base, bottom_acme);
986 Tag::write(new_base.cast(), null_mut(), true);
987 } else {
988 ret_base = bottom_acme.sub(TAG_SIZE);
989 Tag::write(ret_base.cast(), null_mut(), false);
990 }
991 }
992
993 let ret_heap = Span::new(ret_base, ret_acme);
994
995 #[cfg(feature = "counters")]
996 self.counters.account_truncate(old_heap.size(), ret_heap.size());
997
998 ret_heap
999 }
1000
1001 #[cfg(not(debug_assertions))]
1002 fn scan_for_errors(&self) {}
1003
1004 #[cfg(debug_assertions)]
1005 fn scan_for_errors(&self) {
1007 #[cfg(any(test, feature = "fuzzing"))]
1008 let mut vec = std::vec::Vec::<Span>::new();
1009
1010 if !self.bins.is_null() {
1011 for b in 0..BIN_COUNT {
1012 let mut any = false;
1013 unsafe {
1014 for node in LlistNode::iter_mut(*self.get_bin_ptr(b)) {
1015 any = true;
1016 if b < WORD_BITS {
1017 assert!(self.availability_low & 1 << b != 0);
1018 } else {
1019 assert!(self.availability_high & 1 << (b - WORD_BITS) != 0);
1020 }
1021
1022 let base = gap_node_to_base(node);
1023 let (acme, size) = gap_base_to_acme_size(base);
1024 let low_size = gap_acme_to_size(acme).read();
1025 assert!(low_size == size);
1026
1027 let lower_tag = base.sub(TAG_SIZE).cast::<Tag>().read();
1028 assert!(lower_tag.is_allocated());
1029 assert!(lower_tag.is_above_free());
1030
1031 #[cfg(any(test, feature = "fuzzing"))]
1032 {
1033 let span = Span::new(base, acme);
1034 for other in &vec {
1036 assert!(!span.overlaps(*other), "{} intersects {}", span, other);
1037 }
1038 vec.push(span);
1039 }
1040 }
1041 }
1042
1043 if !any {
1044 if b < WORD_BITS {
1045 assert!(self.availability_low & 1 << b == 0);
1046 } else {
1047 assert!(self.availability_high & 1 << (b - WORD_BITS) == 0);
1048 }
1049 }
1050 }
1051 } else {
1052 assert!(self.availability_low == 0);
1053 assert!(self.availability_high == 0);
1054 }
1055 }
1056}
1057
1058#[cfg(test)]
1059mod tests {
1060 use super::*;
1061
1062 #[test]
1063 fn alignment_assumptions_hold() {
1064 assert!(ALIGN == std::mem::align_of::<Bin>() && ALIGN == std::mem::size_of::<Bin>());
1066 }
1067
1068 #[test]
1069 fn alloc_dealloc_test() {
1070 const ARENA_SIZE: usize = 10000000;
1071
1072 let arena = Box::leak(vec![0u8; ARENA_SIZE].into_boxed_slice()) as *mut [_];
1073
1074 let mut talc = Talc::new(crate::ErrOnOom);
1075
1076 unsafe {
1077 talc.claim(arena.as_mut().unwrap().into()).unwrap();
1078 }
1079
1080 let layout = Layout::from_size_align(1243, 8).unwrap();
1081
1082 let a = unsafe { talc.malloc(layout) };
1083 assert!(a.is_ok());
1084 unsafe {
1085 a.unwrap().as_ptr().write_bytes(255, layout.size());
1086 }
1087
1088 let mut x = vec![NonNull::dangling(); 100];
1089
1090 for _ in 0..1 {
1091 for i in 0..100 {
1092 let allocation = unsafe { talc.malloc(layout) };
1093 assert!(allocation.is_ok());
1094 unsafe {
1095 allocation.unwrap().as_ptr().write_bytes(0xab, layout.size());
1096 }
1097 x[i] = allocation.unwrap();
1098 }
1099
1100 for i in 0..50 {
1101 unsafe {
1102 talc.free(x[i], layout);
1103 }
1104 }
1105 for i in (50..100).rev() {
1106 unsafe {
1107 talc.free(x[i], layout);
1108 }
1109 }
1110 }
1111
1112 unsafe {
1113 talc.free(a.unwrap(), layout);
1114 }
1115
1116 unsafe {
1117 drop(Box::from_raw(arena));
1118 }
1119 }
1120
1121 #[test]
1122 fn claim_truncate_extend_test() {
1123 let mut tiny_heap = [0u8; BIN_COUNT * WORD_SIZE / 2];
1125 let tiny_heap_span: Span = Span::from(&mut tiny_heap);
1126
1127 let big_heap = Box::leak(vec![0u8; BIN_COUNT * WORD_SIZE + 100000].into_boxed_slice());
1129 let big_heap_span = Span::from(big_heap.as_mut());
1130
1131 let mut talc = Talc::new(crate::ErrOnOom);
1132
1133 unsafe {
1134 talc.claim(tiny_heap_span).unwrap_err();
1135 }
1136
1137 assert!(talc.bins.is_null());
1138 assert!(talc.availability_low == 0 && talc.availability_high == 0);
1139
1140 let alloc_big_heap = unsafe { talc.claim(big_heap_span).unwrap() };
1141
1142 assert!(!talc.bins.is_null());
1143
1144 let alloc_big_heap = unsafe {
1145 talc.truncate(
1146 alloc_big_heap,
1147 alloc_big_heap.truncate(500, 500).fit_over(talc.get_allocated_span(alloc_big_heap)),
1148 )
1149 };
1150
1151 let _alloc_tiny_heap = unsafe { talc.claim(tiny_heap_span).unwrap() };
1152
1153 let allocation = unsafe {
1154 let allocation = talc.malloc(Layout::new::<u128>()).unwrap();
1155 allocation.as_ptr().write_bytes(0, Layout::new::<u128>().size());
1156 allocation
1157 };
1158
1159 let alloc_big_heap = unsafe {
1160 talc.truncate(
1161 alloc_big_heap,
1162 alloc_big_heap
1163 .truncate(100000, 100000)
1164 .fit_over(talc.get_allocated_span(alloc_big_heap)),
1165 )
1166 };
1167
1168 unsafe {
1169 talc.extend(
1170 alloc_big_heap,
1171 alloc_big_heap.extend(10000, 10000).fit_within(big_heap_span),
1172 );
1173 }
1174
1175 unsafe {
1176 talc.free(allocation, Layout::new::<u128>());
1177 }
1178
1179 unsafe {
1180 drop(Box::from_raw(big_heap));
1181 }
1182 }
1183}