From 1f848baf85bba918074464da774152c6067ce2d1 Mon Sep 17 00:00:00 2001 From: Philipp Oppermann Date: Thu, 23 Jun 2022 09:24:05 +0200 Subject: [PATCH 01/13] Try to make miri happy by using `*mut u8` instead of `usize` for addresses --- src/hole.rs | 64 +++++++++++++++++++++++++++++++---------------------- src/lib.rs | 42 +++++++++++++++++++++-------------- src/test.rs | 13 ++++++----- 3 files changed, 71 insertions(+), 48 deletions(-) diff --git a/src/hole.rs b/src/hole.rs index d6d5188..6cf6ab8 100644 --- a/src/hole.rs +++ b/src/hole.rs @@ -1,8 +1,11 @@ use core::alloc::Layout; +use core::convert::{TryFrom, TryInto}; use core::mem; use core::mem::{align_of, size_of}; use core::ptr::NonNull; +use crate::align_up_size; + use super::align_up; /// A sorted list of holes. It uses the the holes itself to store its nodes. @@ -42,13 +45,14 @@ impl HoleList { /// is invalid or if memory from the `[hole_addr, hole_addr+size)` range is used somewhere else. /// /// The pointer to `hole_addr` is automatically aligned. - pub unsafe fn new(hole_addr: usize, hole_size: usize) -> HoleList { + pub unsafe fn new(hole_addr: *mut u8, hole_size: usize) -> HoleList { assert_eq!(size_of::(), Self::min_size()); let aligned_hole_addr = align_up(hole_addr, align_of::()); let ptr = aligned_hole_addr as *mut Hole; ptr.write(Hole { - size: hole_size.saturating_sub(aligned_hole_addr - hole_addr), + size: hole_size + .saturating_sub(aligned_hole_addr.offset_from(hole_addr).try_into().unwrap()), next: None, }); @@ -73,7 +77,7 @@ impl HoleList { if size < Self::min_size() { size = Self::min_size(); } - let size = align_up(size, mem::align_of::()); + let size = align_up_size(size, mem::align_of::()); let layout = Layout::from_size_align(size, layout.align()).unwrap(); layout @@ -114,11 +118,7 @@ impl HoleList { /// [`allocate_first_fit`]: HoleList::allocate_first_fit pub unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout) -> Layout { let aligned_layout = Self::align_layout(layout); - deallocate( - &mut self.first, - ptr.as_ptr() as usize, - aligned_layout.size(), - ); + deallocate(&mut self.first, ptr.as_ptr(), aligned_layout.size()); aligned_layout } @@ -129,11 +129,11 @@ impl HoleList { /// Returns information about the first hole for test purposes. #[cfg(test)] - pub fn first_hole(&self) -> Option<(usize, usize)> { + pub fn first_hole(&self) -> Option<(*const u8, usize)> { self.first .next .as_ref() - .map(|hole| ((*hole) as *const Hole as usize, hole.size)) + .map(|hole| ((*hole) as *const Hole as *const u8, hole.size)) } } @@ -152,9 +152,9 @@ pub struct Hole { impl Hole { /// Returns basic information about the hole. - fn info(&self) -> HoleInfo { + fn info(&mut self) -> HoleInfo { HoleInfo { - addr: self as *const _ as usize, + addr: self as *mut _ as *mut u8, size: self.size, } } @@ -163,7 +163,7 @@ impl Hole { /// Basic information about a hole. #[derive(Debug, Clone, Copy)] struct HoleInfo { - addr: usize, + addr: *mut u8, size: usize, } @@ -189,24 +189,29 @@ fn split_hole(hole: HoleInfo, required_layout: Layout) -> Option { (hole.addr, None) } else { // the required alignment causes some padding before the allocation - let aligned_addr = align_up(hole.addr + HoleList::min_size(), required_align); + let aligned_addr = align_up(hole.addr.wrapping_add(HoleList::min_size()), required_align); ( aligned_addr, Some(HoleInfo { addr: hole.addr, - size: aligned_addr - hole.addr, + size: unsafe { aligned_addr.offset_from(hole.addr) } + .try_into() + .unwrap(), }), ) }; let aligned_hole = { - if aligned_addr + required_size > hole.addr + hole.size { + if aligned_addr.wrapping_offset(required_size.try_into().unwrap()) + > hole.addr.wrapping_offset(hole.size.try_into().unwrap()) + { // hole is too small return None; } HoleInfo { addr: aligned_addr, - size: hole.size - (aligned_addr - hole.addr), + size: hole.size + - usize::try_from(unsafe { aligned_addr.offset_from(hole.addr) }).unwrap(), } }; @@ -219,7 +224,9 @@ fn split_hole(hole: HoleInfo, required_layout: Layout) -> Option { } else { // the hole is bigger than necessary, so there is some padding behind the allocation Some(HoleInfo { - addr: aligned_hole.addr + required_size, + addr: aligned_hole + .addr + .wrapping_offset(required_size.try_into().unwrap()), size: aligned_hole.size - required_size, }) }; @@ -291,7 +298,7 @@ fn allocate_first_fit(mut previous: &mut Hole, layout: Layout) -> Result= HoleList::min_size()); @@ -299,24 +306,27 @@ fn deallocate(mut hole: &mut Hole, addr: usize, mut size: usize) { // It's the dummy hole, which is the head of the HoleList. It's somewhere on the stack, // so it's address is not the address of the hole. We set the addr to 0 as it's always // the first hole. - 0 + core::ptr::null_mut() } else { // tt's a real hole in memory and its address is the address of the hole - hole as *mut _ as usize + hole as *mut _ as *mut u8 }; // Each freed block must be handled by the previous hole in memory. Thus the freed // address must be always behind the current hole. assert!( - hole_addr + hole.size <= addr, + hole_addr.wrapping_offset(hole.size.try_into().unwrap()) <= addr, "invalid deallocation (probably a double free)" ); // get information about the next block - let next_hole_info = hole.next.as_ref().map(|next| next.info()); + let next_hole_info = hole.next.as_mut().map(|next| next.info()); match next_hole_info { - Some(next) if hole_addr + hole.size == addr && addr + size == next.addr => { + Some(next) + if hole_addr.wrapping_offset(hole.size.try_into().unwrap()) == addr + && addr.wrapping_offset(size.try_into().unwrap()) == next.addr => + { // block fills the gap between this hole and the next hole // before: ___XXX____YYYYY____ where X is this hole and Y the next hole // after: ___XXXFFFFYYYYY____ where F is the freed block @@ -324,7 +334,7 @@ fn deallocate(mut hole: &mut Hole, addr: usize, mut size: usize) { hole.size += size + next.size; // merge the F and Y blocks to this X block hole.next = hole.next.as_mut().unwrap().next.take(); // remove the Y block } - _ if hole_addr + hole.size == addr => { + _ if hole_addr.wrapping_add(hole.size.try_into().unwrap()) == addr => { // block is right behind this hole but there is used memory after it // before: ___XXX______YYYYY____ where X is this hole and Y the next hole // after: ___XXXFFFF__YYYYY____ where F is the freed block @@ -335,7 +345,7 @@ fn deallocate(mut hole: &mut Hole, addr: usize, mut size: usize) { hole.size += size; // merge the F block to this X block } - Some(next) if addr + size == next.addr => { + Some(next) if addr.wrapping_offset(size.try_into().unwrap()) == next.addr => { // block is right before the next hole but there is used memory before it // before: ___XXX______YYYYY____ where X is this hole and Y the next hole // after: ___XXX__FFFFYYYYY____ where F is the freed block @@ -366,7 +376,7 @@ fn deallocate(mut hole: &mut Hole, addr: usize, mut size: usize) { next: hole.next.take(), // the reference to the Y block (if it exists) }; // write the new hole to the freed memory - debug_assert_eq!(addr % align_of::(), 0); + debug_assert_eq!(addr as usize % align_of::(), 0); let ptr = addr as *mut Hole; unsafe { ptr.write(new_hole) }; // add the F block as the next block of the X block diff --git a/src/lib.rs b/src/lib.rs index b9c1a28..5d8e745 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -17,6 +17,7 @@ use core::alloc::GlobalAlloc; use core::alloc::Layout; #[cfg(feature = "alloc_ref")] use core::alloc::{AllocError, Allocator}; +use core::convert::{TryFrom, TryInto}; use core::mem::MaybeUninit; #[cfg(feature = "use_spin")] use core::ops::Deref; @@ -33,7 +34,7 @@ mod test; /// A fixed size heap backed by a linked list of free memory blocks. pub struct Heap { - bottom: usize, + bottom: *mut u8, size: usize, used: usize, holes: HoleList, @@ -54,7 +55,7 @@ impl Heap { #[cfg(feature = "const_mut_refs")] pub const fn empty() -> Heap { Heap { - bottom: 0, + bottom: core::ptr::null_mut(), size: 0, used: 0, holes: HoleList::empty(), @@ -67,7 +68,7 @@ impl Heap { /// /// This function must be called at most once and must only be used on an /// empty heap. - pub unsafe fn init(&mut self, heap_bottom: usize, heap_size: usize) { + pub unsafe fn init(&mut self, heap_bottom: *mut u8, heap_size: usize) { self.bottom = heap_bottom; self.size = heap_size; self.used = 0; @@ -89,9 +90,12 @@ impl Heap { /// /// This method panics if the heap is already initialized. pub fn init_from_slice(&mut self, mem: &'static mut [MaybeUninit]) { - assert!(self.bottom == 0, "The heap has already been initialized."); + assert!( + self.bottom.is_null(), + "The heap has already been initialized." + ); let size = mem.len(); - let address = mem.as_ptr() as usize; + let address = mem.as_mut_ptr().cast(); // SAFETY: All initialization requires the bottom address to be valid, which implies it // must not be 0. Initially the address is 0. The assertion above ensures that no // initialization had been called before. @@ -104,7 +108,7 @@ impl Heap { /// and the memory in the `[heap_bottom, heap_bottom + heap_size)` range must not be used for /// anything else. This function is unsafe because it can cause undefined behavior if the /// given address is invalid. - pub unsafe fn new(heap_bottom: usize, heap_size: usize) -> Heap { + pub unsafe fn new(heap_bottom: *mut u8, heap_size: usize) -> Heap { if heap_size < HoleList::min_size() { Self::empty() } else { @@ -123,7 +127,7 @@ impl Heap { /// single operation that can not panic. pub fn from_slice(mem: &'static mut [MaybeUninit]) -> Heap { let size = mem.len(); - let address = mem.as_ptr() as usize; + let address = mem.as_mut_ptr().cast(); // SAFETY: The given address and size is valid according to the safety invariants of the // mutable reference handed to us by the caller. unsafe { Self::new(address, size) } @@ -156,7 +160,7 @@ impl Heap { } /// Returns the bottom address of the heap. - pub fn bottom(&self) -> usize { + pub fn bottom(&self) -> *mut u8 { self.bottom } @@ -166,8 +170,9 @@ impl Heap { } /// Return the top address of the heap - pub fn top(&self) -> usize { - self.bottom + self.size + pub fn top(&self) -> *mut u8 { + self.bottom + .wrapping_offset(isize::try_from(self.size).unwrap()) } /// Returns the size of the used part of the heap @@ -234,7 +239,7 @@ impl LockedHeap { /// and the memory in the `[heap_bottom, heap_bottom + heap_size)` range must not be used for /// anything else. This function is unsafe because it can cause undefined behavior if the /// given address is invalid. - pub unsafe fn new(heap_bottom: usize, heap_size: usize) -> LockedHeap { + pub unsafe fn new(heap_bottom: *mut u8, heap_size: usize) -> LockedHeap { LockedHeap(Spinlock::new(Heap { bottom: heap_bottom, size: heap_size, @@ -272,18 +277,23 @@ unsafe impl GlobalAlloc for LockedHeap { /// Align downwards. Returns the greatest x with alignment `align` /// so that x <= addr. The alignment must be a power of 2. -pub fn align_down(addr: usize, align: usize) -> usize { +pub fn align_down_size(size: usize, align: usize) -> usize { if align.is_power_of_two() { - addr & !(align - 1) + size & !(align - 1) } else if align == 0 { - addr + size } else { panic!("`align` must be a power of 2"); } } +pub fn align_up_size(size: usize, align: usize) -> usize { + align_down_size(size + align - 1, align) +} + /// Align upwards. Returns the smallest x with alignment `align` /// so that x >= addr. The alignment must be a power of 2. -pub fn align_up(addr: usize, align: usize) -> usize { - align_down(addr + align - 1, align) +pub fn align_up(addr: *mut u8, align: usize) -> *mut u8 { + let offset = addr.align_offset(align); + addr.wrapping_offset(offset.try_into().unwrap()) } diff --git a/src/test.rs b/src/test.rs index 5ba2caa..534d45b 100644 --- a/src/test.rs +++ b/src/test.rs @@ -6,7 +6,7 @@ use std::prelude::v1::*; fn new_heap() -> Heap { const HEAP_SIZE: usize = 1000; let heap_space = Box::leak(Box::new([MaybeUninit::uninit(); HEAP_SIZE])); - let assumed_location = heap_space.as_ptr() as usize; + let assumed_location = heap_space.as_mut_ptr().cast(); let heap = Heap::from_slice(heap_space); assert!(heap.bottom == assumed_location); @@ -18,7 +18,7 @@ fn new_max_heap() -> Heap { const HEAP_SIZE: usize = 1024; const HEAP_SIZE_MAX: usize = 2048; let heap_space = Box::leak(Box::new([MaybeUninit::::uninit(); HEAP_SIZE_MAX])); - let start_ptr = heap_space.as_ptr() as usize; + let start_ptr = heap_space.as_mut_ptr().cast(); // Unsafe so that we have provenance over the whole allocation. let heap = unsafe { Heap::new(start_ptr, HEAP_SIZE) }; @@ -49,14 +49,17 @@ fn allocate_double_usize() { let layout = Layout::from_size_align(size, align_of::()); let addr = heap.allocate_first_fit(layout.unwrap()); assert!(addr.is_ok()); - let addr = addr.unwrap().as_ptr() as usize; + let addr = addr.unwrap().as_ptr(); assert!(addr == heap.bottom); let (hole_addr, hole_size) = heap.holes.first_hole().expect("ERROR: no hole left"); - assert!(hole_addr == heap.bottom + size); + assert!(hole_addr == heap.bottom.wrapping_add(size)); assert!(hole_size == heap.size - size); unsafe { - assert_eq!((*((addr + size) as *const Hole)).size, heap.size - size); + assert_eq!( + (*((addr.wrapping_offset(size.try_into().unwrap())) as *const Hole)).size, + heap.size - size + ); } } From d93f94f401dc8882523704a4b1f5fa880c78e23c Mon Sep 17 00:00:00 2001 From: James Munns Date: Sat, 25 Jun 2022 01:30:03 +0200 Subject: [PATCH 02/13] Investigate miri issues --- src/hole.rs | 11 ++--------- src/lib.rs | 4 ++++ src/test.rs | 26 +++++++++++++++++++++----- 3 files changed, 27 insertions(+), 14 deletions(-) diff --git a/src/hole.rs b/src/hole.rs index 6cf6ab8..036a3c6 100644 --- a/src/hole.rs +++ b/src/hole.rs @@ -10,7 +10,7 @@ use super::align_up; /// A sorted list of holes. It uses the the holes itself to store its nodes. pub struct HoleList { - first: Hole, // dummy + pub(crate) first: Hole, // dummy } impl HoleList { @@ -138,14 +138,7 @@ impl HoleList { } /// A block containing free memory. It points to the next hole and thus forms a linked list. -#[cfg(not(test))] -struct Hole { - size: usize, - next: Option<&'static mut Hole>, -} - -#[cfg(test)] -pub struct Hole { +pub(crate) struct Hole { pub size: usize, pub next: Option<&'static mut Hole>, } diff --git a/src/lib.rs b/src/lib.rs index 5d8e745..7a5deef 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -185,6 +185,10 @@ impl Heap { self.size - self.used } + pub(crate) fn holes(&self) -> &HoleList { + &self.holes + } + /// Extends the size of the heap by creating a new hole at the end /// /// # Unsafety diff --git a/src/test.rs b/src/test.rs index 534d45b..2cc6729 100644 --- a/src/test.rs +++ b/src/test.rs @@ -3,12 +3,26 @@ use core::alloc::Layout; use std::mem::{align_of, size_of, MaybeUninit}; use std::prelude::v1::*; +#[repr(align(128))] +struct Chonk { + data: [MaybeUninit; N] +} + +impl Chonk { + pub fn new() -> Self { + Self { + data: [MaybeUninit::uninit(); N], + } + } +} + fn new_heap() -> Heap { const HEAP_SIZE: usize = 1000; - let heap_space = Box::leak(Box::new([MaybeUninit::uninit(); HEAP_SIZE])); - let assumed_location = heap_space.as_mut_ptr().cast(); + let heap_space = Box::leak(Box::new(Chonk::::new())); + let data = &mut heap_space.data; + let assumed_location = data.as_mut_ptr().cast(); - let heap = Heap::from_slice(heap_space); + let heap = Heap::from_slice(data); assert!(heap.bottom == assumed_location); assert!(heap.size == HEAP_SIZE); heap @@ -73,8 +87,10 @@ fn allocate_and_free_double_usize() { *(x.as_ptr() as *mut (usize, usize)) = (0xdeafdeadbeafbabe, 0xdeafdeadbeafbabe); heap.deallocate(x, layout.clone()); - assert_eq!((*(heap.bottom as *const Hole)).size, heap.size); - assert!((*(heap.bottom as *const Hole)).next.is_none()); + let real_first = heap.holes().first.next.as_ref().unwrap(); + + assert_eq!(real_first.size, heap.size); + assert!(real_first.next.is_none()); } } From 23ae87dbe0759055191b778db9306a834536f1b5 Mon Sep 17 00:00:00 2001 From: James Munns Date: Sat, 25 Jun 2022 01:44:18 +0200 Subject: [PATCH 03/13] Also force alignment for max stack tests --- src/test.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/test.rs b/src/test.rs index 2cc6729..c9e681c 100644 --- a/src/test.rs +++ b/src/test.rs @@ -31,8 +31,9 @@ fn new_heap() -> Heap { fn new_max_heap() -> Heap { const HEAP_SIZE: usize = 1024; const HEAP_SIZE_MAX: usize = 2048; - let heap_space = Box::leak(Box::new([MaybeUninit::::uninit(); HEAP_SIZE_MAX])); - let start_ptr = heap_space.as_mut_ptr().cast(); + let heap_space = Box::leak(Box::new(Chonk::::new())); + let data = &mut heap_space.data; + let start_ptr = data.as_mut_ptr().cast(); // Unsafe so that we have provenance over the whole allocation. let heap = unsafe { Heap::new(start_ptr, HEAP_SIZE) }; From c6f82533bdafe8c5afdcf668fd6c701986d1d508 Mon Sep 17 00:00:00 2001 From: James Munns Date: Sat, 25 Jun 2022 21:03:14 +0200 Subject: [PATCH 04/13] Rewrite `hole` using a cursor API --- src/hole.rs | 638 ++++++++++++++++++++++++++++++++-------------------- src/lib.rs | 201 +++++++++-------- 2 files changed, 497 insertions(+), 342 deletions(-) diff --git a/src/hole.rs b/src/hole.rs index 036a3c6..a47b8c6 100644 --- a/src/hole.rs +++ b/src/hole.rs @@ -13,6 +13,203 @@ pub struct HoleList { pub(crate) first: Hole, // dummy } +pub struct Cursor { + prev: NonNull, + hole: NonNull, +} + +impl Cursor { + fn next(mut self) -> Option { + unsafe { + if let Some(nhole) = self.hole.as_mut().next { + Some(Cursor { + prev: self.hole, + hole: nhole, + }) + } else { + None + } + } + } + + fn peek_next(&self) -> Option<&Hole> { + unsafe { + if let Some(nhole) = self.hole.as_ref().next { + Some(nhole.as_ref()) + } else { + None + } + } + } + + fn current(&self) -> &Hole { + unsafe { + self.hole.as_ref() + } + } + + fn previous(&self) -> &Hole { + unsafe { + self.prev.as_ref() + } + } + + // On success, it returns the new allocation, and the linked list has been updated + // to accomodate any new holes and allocation. On error, it returns the cursor + // unmodified, and has made no changes to the linked list of holes. + fn split_current(self, required_layout: Layout) -> Result<(*mut u8, usize), Self> { + let front_padding; + let alloc_ptr; + let alloc_size; + let back_padding; + + // Here we create a scope, JUST to make sure that any created references do not + // live to the point where we start doing pointer surgery below. + { + let hole_size = self.current().size; + let hole_addr_u8 = self.hole.as_ptr().cast::(); + let required_size = required_layout.size(); + let required_align = required_layout.align(); + + // Quick check: If the new item is larger than the current hole, it's never gunna + // work. Go ahead and bail early to save ourselves some math. + if hole_size < required_size { + return Err(self); + } + + // Attempt to fracture the current hole into the following parts: + // ([front_padding], allocation, [back_padding]) + // + // The paddings are optional, and only placed if required. + // + // First, figure out if front padding is necessary. This would be necessary if the new + // allocation has a larger alignment requirement than the current hole, and we didn't get + // lucky that the current position was well-aligned enough for the new item. + let aligned_addr = if hole_addr_u8 == align_up(hole_addr_u8, required_align) { + // hole has already the required alignment, no front padding is needed. + front_padding = None; + hole_addr_u8 + } else { + // Unfortunately, we did not get lucky. Instead: Push the "starting location" FORWARD the size + // of a hole node, to guarantee there is at least enough room for the hole header, and + // potentially additional space. + let new_start = hole_addr_u8.wrapping_add(HoleList::min_size()); + + let aligned_addr = align_up(new_start, required_align); + front_padding = Some(HoleInfo { + // Our new front padding will exist at the same location as the previous hole, + // it will just have a smaller size after we have chopped off the "tail" for + // the allocation. + addr: hole_addr_u8, + size: unsafe { aligned_addr.offset_from(hole_addr_u8) } + .try_into() + .unwrap(), + }); + aligned_addr + }; + + // Okay, now that we found space, we need to see if the decisions we just made + // ACTUALLY fit in the previous hole space + let allocation_end = aligned_addr.wrapping_add(required_size); + let hole_end = hole_addr_u8.wrapping_add(hole_size); + + if allocation_end > hole_end { + // hole is too small + return Err(self); + } + + // Yes! We have successfully placed our allocation as well. + alloc_ptr = aligned_addr; + alloc_size = required_size; + + // Okay, time to move onto the back padding. Here, we are opportunistic - + // if it fits, we sits. Otherwise we just skip adding the back padding, and + // sort of assume that the allocation is actually a bit larger than it + // actually needs to be. + let hole_layout = Layout::new::(); + let back_padding_start = align_up(allocation_end, hole_layout.align()); + let back_padding_end = back_padding_start.wrapping_add(hole_layout.size()); + + // Will the proposed new back padding actually fit in the old hole slot? + back_padding = if back_padding_end <= hole_end { + // Yes, it does! + Some(HoleInfo { + addr: back_padding_start, + size: unsafe { hole_end.offset_from(back_padding_start) } + .try_into() + .unwrap(), + }) + } else { + // No, it does not. + None + }; + } + + //////////////////////////////////////////////////////////////////////////// + // This is where we actually perform surgery on the linked list. + //////////////////////////////////////////////////////////////////////////// + let Cursor { mut prev, mut hole } = self; + // Remove the current location from the previous node + unsafe { prev.as_mut().next = None; } + // Take the next node out of our current node + let maybe_next_addr: Option> = unsafe { hole.as_mut().next.take() }; + + // As of now, the old `Hole` is no more. We are about to replace it with one or more of + // the front padding, the allocation, and the back padding. + drop(hole); + + match (front_padding, back_padding) { + (None, None) => { + // No padding at all, how lucky! Nothing to do but return the allocation. + }, + (None, Some(singlepad)) | (Some(singlepad), None) => unsafe { + // We have front padding OR back padding, but not both. + // + // Replace the old node with the new single node. We need to stitch the new node + // into the linked list. Start by writing the padding into the proper location + let singlepad_ptr = singlepad.addr.cast::(); + singlepad_ptr.write(Hole { + size: singlepad.size, + // If the old hole had a next pointer, the single padding now takes + // "ownership" of that link + next: maybe_next_addr, + }); + + // Then connect the OLD previous to the NEW single padding + prev.as_mut().next = Some(NonNull::new_unchecked(singlepad_ptr)); + }, + (Some(frontpad), Some(backpad)) => unsafe { + // We have front padding AND back padding. + // + // We need to stich them together as two nodes where there used to + // only be one. Start with the back padding. + let backpad_ptr = backpad.addr.cast::(); + backpad_ptr.write(Hole { + size: backpad.size, + // If the old hole had a next pointer, the BACK padding now takes + // "ownership" of that link + next: maybe_next_addr, + }); + + // Now we emplace the front padding, and link it to both the back padding, + // and the old previous + let frontpad_ptr = frontpad.addr.cast::(); + frontpad_ptr.write(Hole { + size: frontpad.size, + // We now connect the FRONT padding to the BACK padding + next: Some(NonNull::new_unchecked(backpad_ptr)), + }); + + // Then connect the OLD previous to the NEW FRONT padding + prev.as_mut().next = Some(NonNull::new_unchecked(frontpad_ptr)); + } + } + + // Well that went swimmingly! Hand off the allocation, with surgery performed successfully! + Ok((alloc_ptr, alloc_size)) + } +} + impl HoleList { /// Creates an empty `HoleList`. #[cfg(not(feature = "const_mut_refs"))] @@ -36,6 +233,17 @@ impl HoleList { } } + pub fn cursor(&mut self) -> Option { + if let Some(hole) = self.first.next { + Some(Cursor { + hole, + prev: NonNull::new(&mut self.first)?, + }) + } else { + None + } + } + /// Creates a `HoleList` that contains the given hole. /// /// ## Safety @@ -59,7 +267,7 @@ impl HoleList { HoleList { first: Hole { size: 0, - next: Some(&mut *ptr), + next: Some(NonNull::new_unchecked(ptr)), }, } } @@ -95,52 +303,57 @@ impl HoleList { /// enough. Thus the runtime is in O(n) but it should be reasonably fast for small allocations. pub fn allocate_first_fit(&mut self, layout: Layout) -> Result<(NonNull, Layout), ()> { let aligned_layout = Self::align_layout(layout); - - allocate_first_fit(&mut self.first, aligned_layout).map(|holeinfo| { - ( - NonNull::new(holeinfo.addr as *mut u8).unwrap(), - aligned_layout, - ) - }) + let mut cursor = self.cursor().ok_or(())?; + + loop { + match cursor.split_current(aligned_layout) { + Ok((ptr, _len)) => { + return Ok((NonNull::new(ptr).ok_or(())?, aligned_layout)); + }, + Err(curs) => { + cursor = curs.next().ok_or(())?; + }, + } + } } - /// Frees the allocation given by `ptr` and `layout`. - /// - /// `ptr` must be a pointer returned by a call to the [`allocate_first_fit`] function with - /// identical layout. Undefined behavior may occur for invalid arguments. - /// The function performs exactly the same layout adjustments as [`allocate_first_fit`] and - /// returns the aligned layout. - /// - /// This function walks the list and inserts the given block at the correct place. If the freed - /// block is adjacent to another free block, the blocks are merged again. - /// This operation is in `O(n)` since the list needs to be sorted by address. - /// - /// [`allocate_first_fit`]: HoleList::allocate_first_fit - pub unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout) -> Layout { - let aligned_layout = Self::align_layout(layout); - deallocate(&mut self.first, ptr.as_ptr(), aligned_layout.size()); - aligned_layout - } + // /// Frees the allocation given by `ptr` and `layout`. + // /// + // /// `ptr` must be a pointer returned by a call to the [`allocate_first_fit`] function with + // /// identical layout. Undefined behavior may occur for invalid arguments. + // /// The function performs exactly the same layout adjustments as [`allocate_first_fit`] and + // /// returns the aligned layout. + // /// + // /// This function walks the list and inserts the given block at the correct place. If the freed + // /// block is adjacent to another free block, the blocks are merged again. + // /// This operation is in `O(n)` since the list needs to be sorted by address. + // /// + // /// [`allocate_first_fit`]: HoleList::allocate_first_fit + // pub unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout) -> Layout { + // let aligned_layout = Self::align_layout(layout); + // deallocate(&mut self.first, ptr.as_ptr(), aligned_layout.size()); + // aligned_layout + // } /// Returns the minimal allocation size. Smaller allocations or deallocations are not allowed. pub fn min_size() -> usize { size_of::() * 2 } - /// Returns information about the first hole for test purposes. - #[cfg(test)] - pub fn first_hole(&self) -> Option<(*const u8, usize)> { - self.first - .next - .as_ref() - .map(|hole| ((*hole) as *const Hole as *const u8, hole.size)) - } + // /// Returns information about the first hole for test purposes. + // #[cfg(test)] + // pub fn first_hole(&self) -> Option<(*const u8, usize)> { + // self.first + // .next + // .as_ref() + // .map(|hole| ((*hole) as *const Hole as *const u8, hole.size)) + // } } /// A block containing free memory. It points to the next hole and thus forms a linked list. pub(crate) struct Hole { pub size: usize, - pub next: Option<&'static mut Hole>, + pub next: Option>, } impl Hole { @@ -168,217 +381,96 @@ struct Allocation { back_padding: Option, } -/// Splits the given hole into `(front_padding, hole, back_padding)` if it's big enough to allocate -/// `required_layout.size()` bytes with the `required_layout.align()`. Else `None` is returned. -/// Front padding occurs if the required alignment is higher than the hole's alignment. Back -/// padding occurs if the required size is smaller than the size of the aligned hole. All padding -/// must be at least `HoleList::min_size()` big or the hole is unusable. -fn split_hole(hole: HoleInfo, required_layout: Layout) -> Option { - let required_size = required_layout.size(); - let required_align = required_layout.align(); - - let (aligned_addr, front_padding) = if hole.addr == align_up(hole.addr, required_align) { - // hole has already the required alignment - (hole.addr, None) - } else { - // the required alignment causes some padding before the allocation - let aligned_addr = align_up(hole.addr.wrapping_add(HoleList::min_size()), required_align); - ( - aligned_addr, - Some(HoleInfo { - addr: hole.addr, - size: unsafe { aligned_addr.offset_from(hole.addr) } - .try_into() - .unwrap(), - }), - ) - }; - - let aligned_hole = { - if aligned_addr.wrapping_offset(required_size.try_into().unwrap()) - > hole.addr.wrapping_offset(hole.size.try_into().unwrap()) - { - // hole is too small - return None; - } - HoleInfo { - addr: aligned_addr, - size: hole.size - - usize::try_from(unsafe { aligned_addr.offset_from(hole.addr) }).unwrap(), - } - }; - - let back_padding = if aligned_hole.size == required_size { - // the aligned hole has exactly the size that's needed, no padding accrues - None - } else if aligned_hole.size - required_size < HoleList::min_size() { - // we can't use this hole since its remains would form a new, too small hole - return None; - } else { - // the hole is bigger than necessary, so there is some padding behind the allocation - Some(HoleInfo { - addr: aligned_hole - .addr - .wrapping_offset(required_size.try_into().unwrap()), - size: aligned_hole.size - required_size, - }) - }; - - Some(Allocation { - info: HoleInfo { - addr: aligned_hole.addr, - size: required_size, - }, - front_padding: front_padding, - back_padding: back_padding, - }) -} - -/// Searches the list starting at the next hole of `previous` for a big enough hole. A hole is big -/// enough if it can hold an allocation of `layout.size()` bytes with the given `layout.align()`. -/// When a hole is used for an allocation, there may be some needed padding before and/or after -/// the allocation. The padding will then merge back to linked-list -/// This function uses the “first fit” strategy, so it breaks as soon as a big enough hole is -/// found (and returns it). -fn allocate_first_fit(mut previous: &mut Hole, layout: Layout) -> Result { - loop { - let allocation: Option = previous - .next - .as_mut() - .and_then(|current| split_hole(current.info(), layout.clone())); - match allocation { - Some(allocation) => { - // link the front/back padding - // Note that there must be no hole between following pair: - // previous - front_padding - // front_padding - back_padding - // back_padding - previous.next - previous.next = previous.next.as_mut().unwrap().next.take(); - if let Some(padding) = allocation.front_padding { - let ptr = padding.addr as *mut Hole; - unsafe { - ptr.write(Hole { - size: padding.size, - next: previous.next.take(), - }) - } - previous.next = Some(unsafe { &mut *ptr }); - previous = move_helper(previous).next.as_mut().unwrap(); - } - if let Some(padding) = allocation.back_padding { - let ptr = padding.addr as *mut Hole; - unsafe { - ptr.write(Hole { - size: padding.size, - next: previous.next.take(), - }) - } - previous.next = Some(unsafe { &mut *ptr }); - } - return Ok(allocation.info); - } - None if previous.next.is_some() => { - // try next hole - previous = move_helper(previous).next.as_mut().unwrap(); - } - None => { - // this was the last hole, so no hole is big enough -> allocation not possible - return Err(()); - } - } - } -} - -/// Frees the allocation given by `(addr, size)`. It starts at the given hole and walks the list to -/// find the correct place (the list is sorted by address). -fn deallocate(mut hole: &mut Hole, addr: *mut u8, mut size: usize) { - loop { - assert!(size >= HoleList::min_size()); - - let hole_addr = if hole.size == 0 { - // It's the dummy hole, which is the head of the HoleList. It's somewhere on the stack, - // so it's address is not the address of the hole. We set the addr to 0 as it's always - // the first hole. - core::ptr::null_mut() - } else { - // tt's a real hole in memory and its address is the address of the hole - hole as *mut _ as *mut u8 - }; - - // Each freed block must be handled by the previous hole in memory. Thus the freed - // address must be always behind the current hole. - assert!( - hole_addr.wrapping_offset(hole.size.try_into().unwrap()) <= addr, - "invalid deallocation (probably a double free)" - ); - - // get information about the next block - let next_hole_info = hole.next.as_mut().map(|next| next.info()); - - match next_hole_info { - Some(next) - if hole_addr.wrapping_offset(hole.size.try_into().unwrap()) == addr - && addr.wrapping_offset(size.try_into().unwrap()) == next.addr => - { - // block fills the gap between this hole and the next hole - // before: ___XXX____YYYYY____ where X is this hole and Y the next hole - // after: ___XXXFFFFYYYYY____ where F is the freed block - - hole.size += size + next.size; // merge the F and Y blocks to this X block - hole.next = hole.next.as_mut().unwrap().next.take(); // remove the Y block - } - _ if hole_addr.wrapping_add(hole.size.try_into().unwrap()) == addr => { - // block is right behind this hole but there is used memory after it - // before: ___XXX______YYYYY____ where X is this hole and Y the next hole - // after: ___XXXFFFF__YYYYY____ where F is the freed block - - // or: block is right behind this hole and this is the last hole - // before: ___XXX_______________ where X is this hole and Y the next hole - // after: ___XXXFFFF___________ where F is the freed block - - hole.size += size; // merge the F block to this X block - } - Some(next) if addr.wrapping_offset(size.try_into().unwrap()) == next.addr => { - // block is right before the next hole but there is used memory before it - // before: ___XXX______YYYYY____ where X is this hole and Y the next hole - // after: ___XXX__FFFFYYYYY____ where F is the freed block - - hole.next = hole.next.as_mut().unwrap().next.take(); // remove the Y block - size += next.size; // free the merged F/Y block in next iteration - continue; - } - Some(next) if next.addr <= addr => { - // block is behind the next hole, so we delegate it to the next hole - // before: ___XXX__YYYYY________ where X is this hole and Y the next hole - // after: ___XXX__YYYYY__FFFF__ where F is the freed block - - hole = move_helper(hole).next.as_mut().unwrap(); // start next iteration at next hole - continue; - } - _ => { - // block is between this and the next hole - // before: ___XXX________YYYYY_ where X is this hole and Y the next hole - // after: ___XXX__FFFF__YYYYY_ where F is the freed block - - // or: this is the last hole - // before: ___XXX_________ where X is this hole - // after: ___XXX__FFFF___ where F is the freed block - - let new_hole = Hole { - size: size, - next: hole.next.take(), // the reference to the Y block (if it exists) - }; - // write the new hole to the freed memory - debug_assert_eq!(addr as usize % align_of::(), 0); - let ptr = addr as *mut Hole; - unsafe { ptr.write(new_hole) }; - // add the F block as the next block of the X block - hole.next = Some(unsafe { &mut *ptr }); - } - } - break; - } -} +// /// Frees the allocation given by `(addr, size)`. It starts at the given hole and walks the list to +// /// find the correct place (the list is sorted by address). +// fn deallocate(mut hole: &mut Hole, addr: *mut u8, mut size: usize) { +// loop { +// assert!(size >= HoleList::min_size()); + +// let hole_addr = if hole.size == 0 { +// // It's the dummy hole, which is the head of the HoleList. It's somewhere on the stack, +// // so it's address is not the address of the hole. We set the addr to 0 as it's always +// // the first hole. +// core::ptr::null_mut() +// } else { +// // tt's a real hole in memory and its address is the address of the hole +// hole as *mut _ as *mut u8 +// }; + +// // Each freed block must be handled by the previous hole in memory. Thus the freed +// // address must be always behind the current hole. +// assert!( +// hole_addr.wrapping_offset(hole.size.try_into().unwrap()) <= addr, +// "invalid deallocation (probably a double free)" +// ); + +// // get information about the next block +// let next_hole_info = hole.next.as_mut().map(|next| next.info()); + +// match next_hole_info { +// Some(next) +// if hole_addr.wrapping_offset(hole.size.try_into().unwrap()) == addr +// && addr.wrapping_offset(size.try_into().unwrap()) == next.addr => +// { +// // block fills the gap between this hole and the next hole +// // before: ___XXX____YYYYY____ where X is this hole and Y the next hole +// // after: ___XXXFFFFYYYYY____ where F is the freed block + +// hole.size += size + next.size; // merge the F and Y blocks to this X block +// hole.next = hole.next.as_mut().unwrap().next.take(); // remove the Y block +// } +// _ if hole_addr.wrapping_add(hole.size.try_into().unwrap()) == addr => { +// // block is right behind this hole but there is used memory after it +// // before: ___XXX______YYYYY____ where X is this hole and Y the next hole +// // after: ___XXXFFFF__YYYYY____ where F is the freed block + +// // or: block is right behind this hole and this is the last hole +// // before: ___XXX_______________ where X is this hole and Y the next hole +// // after: ___XXXFFFF___________ where F is the freed block + +// hole.size += size; // merge the F block to this X block +// } +// Some(next) if addr.wrapping_offset(size.try_into().unwrap()) == next.addr => { +// // block is right before the next hole but there is used memory before it +// // before: ___XXX______YYYYY____ where X is this hole and Y the next hole +// // after: ___XXX__FFFFYYYYY____ where F is the freed block + +// hole.next = hole.next.as_mut().unwrap().next.take(); // remove the Y block +// size += next.size; // free the merged F/Y block in next iteration +// continue; +// } +// Some(next) if next.addr <= addr => { +// // block is behind the next hole, so we delegate it to the next hole +// // before: ___XXX__YYYYY________ where X is this hole and Y the next hole +// // after: ___XXX__YYYYY__FFFF__ where F is the freed block + +// hole = move_helper(hole).next.as_mut().unwrap(); // start next iteration at next hole +// continue; +// } +// _ => { +// // block is between this and the next hole +// // before: ___XXX________YYYYY_ where X is this hole and Y the next hole +// // after: ___XXX__FFFF__YYYYY_ where F is the freed block + +// // or: this is the last hole +// // before: ___XXX_________ where X is this hole +// // after: ___XXX__FFFF___ where F is the freed block + +// let new_hole = Hole { +// size: size, +// next: hole.next.take(), // the reference to the Y block (if it exists) +// }; +// // write the new hole to the freed memory +// debug_assert_eq!(addr as usize % align_of::(), 0); +// let ptr = addr as *mut Hole; +// unsafe { ptr.write(new_hole) }; +// // add the F block as the next block of the X block +// hole.next = Some(unsafe { &mut *ptr }); +// } +// } +// break; +// } +// } /// Identity function to ease moving of references. /// @@ -390,3 +482,59 @@ fn deallocate(mut hole: &mut Hole, addr: *mut u8, mut size: usize) { fn move_helper(x: T) -> T { x } + +#[cfg(test)] +pub mod test { + use super::*; + use core::alloc::Layout; + use std::mem::{align_of, size_of, MaybeUninit}; + use std::prelude::v1::*; + use crate::Heap; + + #[repr(align(128))] + struct Chonk { + data: [MaybeUninit; N] + } + + impl Chonk { + pub fn new() -> Self { + Self { + data: [MaybeUninit::uninit(); N], + } + } + } + + fn new_heap() -> Heap { + const HEAP_SIZE: usize = 1000; + let heap_space = Box::leak(Box::new(Chonk::::new())); + let data = &mut heap_space.data; + let assumed_location = data.as_mut_ptr().cast(); + + let heap = Heap::from_slice(data); + assert!(heap.bottom == assumed_location); + assert!(heap.size == HEAP_SIZE); + heap + } + + #[test] + fn cursor() { + let mut heap = new_heap(); + let curs = heap.holes_mut().cursor().unwrap(); + // This is the "dummy" node + assert_eq!(curs.previous().size, 0); + // This is the "full" heap + assert_eq!(curs.current().size, 1000); + // There is no other hole + assert!(curs.peek_next().is_none()); + + let reqd = Layout::from_size_align(256, 1).unwrap(); + let _ = curs.split_current(reqd).map_err(drop).unwrap(); + } + + #[test] + fn aff() { + let mut heap = new_heap(); + let reqd = Layout::from_size_align(256, 1).unwrap(); + let _ = heap.allocate_first_fit(reqd).unwrap(); + } +} diff --git a/src/lib.rs b/src/lib.rs index 7a5deef..51acc21 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,6 +5,9 @@ )] #![no_std] +#![allow(dead_code)] +#![allow(unused_imports)] + #[cfg(test)] #[macro_use] extern crate std; @@ -29,8 +32,8 @@ use hole::HoleList; use spinning_top::Spinlock; pub mod hole; -#[cfg(test)] -mod test; +// #[cfg(test)] +// mod test; /// A fixed size heap backed by a linked list of free memory blocks. pub struct Heap { @@ -148,16 +151,16 @@ impl Heap { } } - /// Frees the given allocation. `ptr` must be a pointer returned - /// by a call to the `allocate_first_fit` function with identical size and alignment. Undefined - /// behavior may occur for invalid arguments, thus this function is unsafe. - /// - /// This function walks the list of free memory blocks and inserts the freed block at the - /// correct place. If the freed block is adjacent to another free block, the blocks are merged - /// again. This operation is in `O(n)` since the list needs to be sorted by address. - pub unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout) { - self.used -= self.holes.deallocate(ptr, layout).size(); - } + // /// Frees the given allocation. `ptr` must be a pointer returned + // /// by a call to the `allocate_first_fit` function with identical size and alignment. Undefined + // /// behavior may occur for invalid arguments, thus this function is unsafe. + // /// + // /// This function walks the list of free memory blocks and inserts the freed block at the + // /// correct place. If the freed block is adjacent to another free block, the blocks are merged + // /// again. This operation is in `O(n)` since the list needs to be sorted by address. + // pub unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout) { + // self.used -= self.holes.deallocate(ptr, layout).size(); + // } /// Returns the bottom address of the heap. pub fn bottom(&self) -> *mut u8 { @@ -189,95 +192,99 @@ impl Heap { &self.holes } - /// Extends the size of the heap by creating a new hole at the end - /// - /// # Unsafety - /// - /// The new extended area must be valid - pub unsafe fn extend(&mut self, by: usize) { - let top = self.top(); - let layout = Layout::from_size_align(by, 1).unwrap(); - self.holes - .deallocate(NonNull::new_unchecked(top as *mut u8), layout); - self.size += by; + pub(crate) fn holes_mut(&mut self) -> &mut HoleList { + &mut self.holes } -} -#[cfg(all(feature = "alloc_ref", feature = "use_spin"))] -unsafe impl Allocator for LockedHeap { - fn allocate(&self, layout: Layout) -> Result, AllocError> { - if layout.size() == 0 { - return Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)); - } - match self.0.lock().allocate_first_fit(layout) { - Ok(ptr) => Ok(NonNull::slice_from_raw_parts(ptr, layout.size())), - Err(()) => Err(AllocError), - } - } - - unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { - if layout.size() != 0 { - self.0.lock().deallocate(ptr, layout); - } - } + // /// Extends the size of the heap by creating a new hole at the end + // /// + // /// # Unsafety + // /// + // /// The new extended area must be valid + // pub unsafe fn extend(&mut self, by: usize) { + // let top = self.top(); + // let layout = Layout::from_size_align(by, 1).unwrap(); + // self.holes + // .deallocate(NonNull::new_unchecked(top as *mut u8), layout); + // self.size += by; + // } } -#[cfg(feature = "use_spin")] -pub struct LockedHeap(Spinlock); - -#[cfg(feature = "use_spin")] -impl LockedHeap { - /// Creates an empty heap. All allocate calls will return `None`. - #[cfg(feature = "use_spin_nightly")] - pub const fn empty() -> LockedHeap { - LockedHeap(Spinlock::new(Heap::empty())) - } - - /// Creates an empty heap. All allocate calls will return `None`. - #[cfg(not(feature = "use_spin_nightly"))] - pub fn empty() -> LockedHeap { - LockedHeap(Spinlock::new(Heap::empty())) - } - - /// Creates a new heap with the given `bottom` and `size`. The bottom address must be valid - /// and the memory in the `[heap_bottom, heap_bottom + heap_size)` range must not be used for - /// anything else. This function is unsafe because it can cause undefined behavior if the - /// given address is invalid. - pub unsafe fn new(heap_bottom: *mut u8, heap_size: usize) -> LockedHeap { - LockedHeap(Spinlock::new(Heap { - bottom: heap_bottom, - size: heap_size, - used: 0, - holes: HoleList::new(heap_bottom, heap_size), - })) - } -} - -#[cfg(feature = "use_spin")] -impl Deref for LockedHeap { - type Target = Spinlock; - - fn deref(&self) -> &Spinlock { - &self.0 - } -} - -#[cfg(feature = "use_spin")] -unsafe impl GlobalAlloc for LockedHeap { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - self.0 - .lock() - .allocate_first_fit(layout) - .ok() - .map_or(0 as *mut u8, |allocation| allocation.as_ptr()) - } - - unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { - self.0 - .lock() - .deallocate(NonNull::new_unchecked(ptr), layout) - } -} +// #[cfg(all(feature = "alloc_ref", feature = "use_spin"))] +// unsafe impl Allocator for LockedHeap { +// fn allocate(&self, layout: Layout) -> Result, AllocError> { +// if layout.size() == 0 { +// return Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)); +// } +// match self.0.lock().allocate_first_fit(layout) { +// Ok(ptr) => Ok(NonNull::slice_from_raw_parts(ptr, layout.size())), +// Err(()) => Err(AllocError), +// } +// } + +// unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { +// if layout.size() != 0 { +// self.0.lock().deallocate(ptr, layout); +// } +// } +// } + +// #[cfg(feature = "use_spin")] +// pub struct LockedHeap(Spinlock); + +// #[cfg(feature = "use_spin")] +// impl LockedHeap { +// /// Creates an empty heap. All allocate calls will return `None`. +// #[cfg(feature = "use_spin_nightly")] +// pub const fn empty() -> LockedHeap { +// LockedHeap(Spinlock::new(Heap::empty())) +// } + +// /// Creates an empty heap. All allocate calls will return `None`. +// #[cfg(not(feature = "use_spin_nightly"))] +// pub fn empty() -> LockedHeap { +// LockedHeap(Spinlock::new(Heap::empty())) +// } + +// /// Creates a new heap with the given `bottom` and `size`. The bottom address must be valid +// /// and the memory in the `[heap_bottom, heap_bottom + heap_size)` range must not be used for +// /// anything else. This function is unsafe because it can cause undefined behavior if the +// /// given address is invalid. +// pub unsafe fn new(heap_bottom: *mut u8, heap_size: usize) -> LockedHeap { +// LockedHeap(Spinlock::new(Heap { +// bottom: heap_bottom, +// size: heap_size, +// used: 0, +// holes: HoleList::new(heap_bottom, heap_size), +// })) +// } +// } + +// #[cfg(feature = "use_spin")] +// impl Deref for LockedHeap { +// type Target = Spinlock; + +// fn deref(&self) -> &Spinlock { +// &self.0 +// } +// } + +// #[cfg(feature = "use_spin")] +// unsafe impl GlobalAlloc for LockedHeap { +// unsafe fn alloc(&self, layout: Layout) -> *mut u8 { +// self.0 +// .lock() +// .allocate_first_fit(layout) +// .ok() +// .map_or(0 as *mut u8, |allocation| allocation.as_ptr()) +// } + +// unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { +// self.0 +// .lock() +// .deallocate(NonNull::new_unchecked(ptr), layout) +// } +// } /// Align downwards. Returns the greatest x with alignment `align` /// so that x <= addr. The alignment must be a power of 2. From 2f12b288c2970b900910bb78c822eaf58235cc67 Mon Sep 17 00:00:00 2001 From: James Munns Date: Sat, 25 Jun 2022 21:13:04 +0200 Subject: [PATCH 05/13] Restore dealloc API --- src/hole.rs | 230 ++++++++++++++++++++++++++-------------------------- src/lib.rs | 48 +++++------ src/test.rs | 2 +- 3 files changed, 140 insertions(+), 140 deletions(-) diff --git a/src/hole.rs b/src/hole.rs index a47b8c6..06b6d9d 100644 --- a/src/hole.rs +++ b/src/hole.rs @@ -317,37 +317,37 @@ impl HoleList { } } - // /// Frees the allocation given by `ptr` and `layout`. - // /// - // /// `ptr` must be a pointer returned by a call to the [`allocate_first_fit`] function with - // /// identical layout. Undefined behavior may occur for invalid arguments. - // /// The function performs exactly the same layout adjustments as [`allocate_first_fit`] and - // /// returns the aligned layout. - // /// - // /// This function walks the list and inserts the given block at the correct place. If the freed - // /// block is adjacent to another free block, the blocks are merged again. - // /// This operation is in `O(n)` since the list needs to be sorted by address. - // /// - // /// [`allocate_first_fit`]: HoleList::allocate_first_fit - // pub unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout) -> Layout { - // let aligned_layout = Self::align_layout(layout); - // deallocate(&mut self.first, ptr.as_ptr(), aligned_layout.size()); - // aligned_layout - // } + /// Frees the allocation given by `ptr` and `layout`. + /// + /// `ptr` must be a pointer returned by a call to the [`allocate_first_fit`] function with + /// identical layout. Undefined behavior may occur for invalid arguments. + /// The function performs exactly the same layout adjustments as [`allocate_first_fit`] and + /// returns the aligned layout. + /// + /// This function walks the list and inserts the given block at the correct place. If the freed + /// block is adjacent to another free block, the blocks are merged again. + /// This operation is in `O(n)` since the list needs to be sorted by address. + /// + /// [`allocate_first_fit`]: HoleList::allocate_first_fit + pub unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout) -> Layout { + let aligned_layout = Self::align_layout(layout); + deallocate(&mut self.first, ptr.as_ptr(), aligned_layout.size()); + aligned_layout + } /// Returns the minimal allocation size. Smaller allocations or deallocations are not allowed. pub fn min_size() -> usize { size_of::() * 2 } - // /// Returns information about the first hole for test purposes. - // #[cfg(test)] - // pub fn first_hole(&self) -> Option<(*const u8, usize)> { - // self.first - // .next - // .as_ref() - // .map(|hole| ((*hole) as *const Hole as *const u8, hole.size)) - // } + /// Returns information about the first hole for test purposes. + #[cfg(test)] + pub fn first_hole(&self) -> Option<(*const u8, usize)> { + self.first + .next + .as_ref() + .map(|hole| (hole.as_ptr() as *mut u8 as *const u8, unsafe { hole.as_ref().size })) + } } /// A block containing free memory. It points to the next hole and thus forms a linked list. @@ -381,96 +381,96 @@ struct Allocation { back_padding: Option, } -// /// Frees the allocation given by `(addr, size)`. It starts at the given hole and walks the list to -// /// find the correct place (the list is sorted by address). -// fn deallocate(mut hole: &mut Hole, addr: *mut u8, mut size: usize) { -// loop { -// assert!(size >= HoleList::min_size()); - -// let hole_addr = if hole.size == 0 { -// // It's the dummy hole, which is the head of the HoleList. It's somewhere on the stack, -// // so it's address is not the address of the hole. We set the addr to 0 as it's always -// // the first hole. -// core::ptr::null_mut() -// } else { -// // tt's a real hole in memory and its address is the address of the hole -// hole as *mut _ as *mut u8 -// }; - -// // Each freed block must be handled by the previous hole in memory. Thus the freed -// // address must be always behind the current hole. -// assert!( -// hole_addr.wrapping_offset(hole.size.try_into().unwrap()) <= addr, -// "invalid deallocation (probably a double free)" -// ); - -// // get information about the next block -// let next_hole_info = hole.next.as_mut().map(|next| next.info()); - -// match next_hole_info { -// Some(next) -// if hole_addr.wrapping_offset(hole.size.try_into().unwrap()) == addr -// && addr.wrapping_offset(size.try_into().unwrap()) == next.addr => -// { -// // block fills the gap between this hole and the next hole -// // before: ___XXX____YYYYY____ where X is this hole and Y the next hole -// // after: ___XXXFFFFYYYYY____ where F is the freed block - -// hole.size += size + next.size; // merge the F and Y blocks to this X block -// hole.next = hole.next.as_mut().unwrap().next.take(); // remove the Y block -// } -// _ if hole_addr.wrapping_add(hole.size.try_into().unwrap()) == addr => { -// // block is right behind this hole but there is used memory after it -// // before: ___XXX______YYYYY____ where X is this hole and Y the next hole -// // after: ___XXXFFFF__YYYYY____ where F is the freed block - -// // or: block is right behind this hole and this is the last hole -// // before: ___XXX_______________ where X is this hole and Y the next hole -// // after: ___XXXFFFF___________ where F is the freed block - -// hole.size += size; // merge the F block to this X block -// } -// Some(next) if addr.wrapping_offset(size.try_into().unwrap()) == next.addr => { -// // block is right before the next hole but there is used memory before it -// // before: ___XXX______YYYYY____ where X is this hole and Y the next hole -// // after: ___XXX__FFFFYYYYY____ where F is the freed block - -// hole.next = hole.next.as_mut().unwrap().next.take(); // remove the Y block -// size += next.size; // free the merged F/Y block in next iteration -// continue; -// } -// Some(next) if next.addr <= addr => { -// // block is behind the next hole, so we delegate it to the next hole -// // before: ___XXX__YYYYY________ where X is this hole and Y the next hole -// // after: ___XXX__YYYYY__FFFF__ where F is the freed block - -// hole = move_helper(hole).next.as_mut().unwrap(); // start next iteration at next hole -// continue; -// } -// _ => { -// // block is between this and the next hole -// // before: ___XXX________YYYYY_ where X is this hole and Y the next hole -// // after: ___XXX__FFFF__YYYYY_ where F is the freed block - -// // or: this is the last hole -// // before: ___XXX_________ where X is this hole -// // after: ___XXX__FFFF___ where F is the freed block - -// let new_hole = Hole { -// size: size, -// next: hole.next.take(), // the reference to the Y block (if it exists) -// }; -// // write the new hole to the freed memory -// debug_assert_eq!(addr as usize % align_of::(), 0); -// let ptr = addr as *mut Hole; -// unsafe { ptr.write(new_hole) }; -// // add the F block as the next block of the X block -// hole.next = Some(unsafe { &mut *ptr }); -// } -// } -// break; -// } -// } +/// Frees the allocation given by `(addr, size)`. It starts at the given hole and walks the list to +/// find the correct place (the list is sorted by address). +fn deallocate(mut hole: &mut Hole, addr: *mut u8, mut size: usize) { + loop { + assert!(size >= HoleList::min_size()); + + let hole_addr = if hole.size == 0 { + // It's the dummy hole, which is the head of the HoleList. It's somewhere on the stack, + // so it's address is not the address of the hole. We set the addr to 0 as it's always + // the first hole. + core::ptr::null_mut() + } else { + // tt's a real hole in memory and its address is the address of the hole + hole as *mut _ as *mut u8 + }; + + // Each freed block must be handled by the previous hole in memory. Thus the freed + // address must be always behind the current hole. + assert!( + hole_addr.wrapping_offset(hole.size.try_into().unwrap()) <= addr, + "invalid deallocation (probably a double free)" + ); + + // get information about the next block + let next_hole_info = hole.next.as_mut().map(|next| unsafe { next.as_mut().info() }); + + match next_hole_info { + Some(next) + if hole_addr.wrapping_offset(hole.size.try_into().unwrap()) == addr + && addr.wrapping_offset(size.try_into().unwrap()) == next.addr => + { + // block fills the gap between this hole and the next hole + // before: ___XXX____YYYYY____ where X is this hole and Y the next hole + // after: ___XXXFFFFYYYYY____ where F is the freed block + + hole.size += size + next.size; // merge the F and Y blocks to this X block + hole.next = unsafe { hole.next.as_mut().unwrap().as_mut().next.take() }; // remove the Y block + } + _ if hole_addr.wrapping_add(hole.size.try_into().unwrap()) == addr => { + // block is right behind this hole but there is used memory after it + // before: ___XXX______YYYYY____ where X is this hole and Y the next hole + // after: ___XXXFFFF__YYYYY____ where F is the freed block + + // or: block is right behind this hole and this is the last hole + // before: ___XXX_______________ where X is this hole and Y the next hole + // after: ___XXXFFFF___________ where F is the freed block + + hole.size += size; // merge the F block to this X block + } + Some(next) if addr.wrapping_offset(size.try_into().unwrap()) == next.addr => { + // block is right before the next hole but there is used memory before it + // before: ___XXX______YYYYY____ where X is this hole and Y the next hole + // after: ___XXX__FFFFYYYYY____ where F is the freed block + + hole.next = unsafe { hole.next.as_mut().unwrap().as_mut().next.take() }; // remove the Y block + size += next.size; // free the merged F/Y block in next iteration + continue; + } + Some(next) if next.addr <= addr => { + // block is behind the next hole, so we delegate it to the next hole + // before: ___XXX__YYYYY________ where X is this hole and Y the next hole + // after: ___XXX__YYYYY__FFFF__ where F is the freed block + + hole = unsafe { move_helper(hole).next.as_mut().unwrap().as_mut() }; // start next iteration at next hole + continue; + } + _ => { + // block is between this and the next hole + // before: ___XXX________YYYYY_ where X is this hole and Y the next hole + // after: ___XXX__FFFF__YYYYY_ where F is the freed block + + // or: this is the last hole + // before: ___XXX_________ where X is this hole + // after: ___XXX__FFFF___ where F is the freed block + + let new_hole = Hole { + size: size, + next: hole.next.take(), // the reference to the Y block (if it exists) + }; + // write the new hole to the freed memory + debug_assert_eq!(addr as usize % align_of::(), 0); + let ptr = addr as *mut Hole; + unsafe { ptr.write(new_hole) }; + // add the F block as the next block of the X block + hole.next = Some(unsafe { NonNull::new_unchecked(ptr) }); + } + } + break; + } +} /// Identity function to ease moving of references. /// diff --git a/src/lib.rs b/src/lib.rs index 51acc21..baac0c4 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -32,8 +32,8 @@ use hole::HoleList; use spinning_top::Spinlock; pub mod hole; -// #[cfg(test)] -// mod test; +#[cfg(test)] +mod test; /// A fixed size heap backed by a linked list of free memory blocks. pub struct Heap { @@ -151,16 +151,16 @@ impl Heap { } } - // /// Frees the given allocation. `ptr` must be a pointer returned - // /// by a call to the `allocate_first_fit` function with identical size and alignment. Undefined - // /// behavior may occur for invalid arguments, thus this function is unsafe. - // /// - // /// This function walks the list of free memory blocks and inserts the freed block at the - // /// correct place. If the freed block is adjacent to another free block, the blocks are merged - // /// again. This operation is in `O(n)` since the list needs to be sorted by address. - // pub unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout) { - // self.used -= self.holes.deallocate(ptr, layout).size(); - // } + /// Frees the given allocation. `ptr` must be a pointer returned + /// by a call to the `allocate_first_fit` function with identical size and alignment. Undefined + /// behavior may occur for invalid arguments, thus this function is unsafe. + /// + /// This function walks the list of free memory blocks and inserts the freed block at the + /// correct place. If the freed block is adjacent to another free block, the blocks are merged + /// again. This operation is in `O(n)` since the list needs to be sorted by address. + pub unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout) { + self.used -= self.holes.deallocate(ptr, layout).size(); + } /// Returns the bottom address of the heap. pub fn bottom(&self) -> *mut u8 { @@ -196,18 +196,18 @@ impl Heap { &mut self.holes } - // /// Extends the size of the heap by creating a new hole at the end - // /// - // /// # Unsafety - // /// - // /// The new extended area must be valid - // pub unsafe fn extend(&mut self, by: usize) { - // let top = self.top(); - // let layout = Layout::from_size_align(by, 1).unwrap(); - // self.holes - // .deallocate(NonNull::new_unchecked(top as *mut u8), layout); - // self.size += by; - // } + /// Extends the size of the heap by creating a new hole at the end + /// + /// # Unsafety + /// + /// The new extended area must be valid + pub unsafe fn extend(&mut self, by: usize) { + let top = self.top(); + let layout = Layout::from_size_align(by, 1).unwrap(); + self.holes + .deallocate(NonNull::new_unchecked(top as *mut u8), layout); + self.size += by; + } } // #[cfg(all(feature = "alloc_ref", feature = "use_spin"))] diff --git a/src/test.rs b/src/test.rs index c9e681c..49e671c 100644 --- a/src/test.rs +++ b/src/test.rs @@ -88,7 +88,7 @@ fn allocate_and_free_double_usize() { *(x.as_ptr() as *mut (usize, usize)) = (0xdeafdeadbeafbabe, 0xdeafdeadbeafbabe); heap.deallocate(x, layout.clone()); - let real_first = heap.holes().first.next.as_ref().unwrap(); + let real_first = heap.holes().first.next.as_ref().unwrap().as_ref(); assert_eq!(real_first.size, heap.size); assert!(real_first.next.is_none()); From 3b570bf8c7f2670fad9fea0dd9cd3aeaa6f5fc69 Mon Sep 17 00:00:00 2001 From: James Munns Date: Sun, 26 Jun 2022 00:43:17 +0200 Subject: [PATCH 06/13] (Sort of) Works, but I have a different idea --- src/hole.rs | 359 +++++++++++++++++++++++++++++++++++++++++----------- src/test.rs | 12 +- 2 files changed, 288 insertions(+), 83 deletions(-) diff --git a/src/hole.rs b/src/hole.rs index 06b6d9d..aebb83c 100644 --- a/src/hole.rs +++ b/src/hole.rs @@ -18,6 +18,16 @@ pub struct Cursor { hole: NonNull, } +enum Position<'a> { + BeforeCurrent, + BetweenCurrentNext { + curr: &'a Hole, + next: &'a Hole, + }, + AfterBoth, + AfterCurrentNoNext, +} + impl Cursor { fn next(mut self) -> Option { unsafe { @@ -54,6 +64,85 @@ impl Cursor { } } + fn data_position(&self, data: *mut u8, size: usize) -> Position { + let cur_ptr = self.hole.as_ptr().cast::(); + if data < cur_ptr { + assert!( + data.wrapping_add(size) <= cur_ptr, + "Freed data overlaps a hole!", + ); + Position::BeforeCurrent + } else { + assert!( + cur_ptr.wrapping_add(self.current().size) <= data, + "hole overlaps freed data!", + ); + + if let Some(next) = self.peek_next() { + let next_ptr = next as *const Hole as *const u8; + let data_const = data as *const u8; + if data_const < next_ptr { + assert!( + data_const.wrapping_add(size) <= next_ptr, + "Freed data overlaps a hole!" + ); + Position::BetweenCurrentNext { + curr: unsafe { self.hole.as_ref() }, + next, + } + } else { + assert!( + next_ptr.wrapping_add(next.size) <= data, + "hole overlaps freed data!", + ); + Position::AfterBoth + } + } else { + Position::AfterCurrentNoNext + } + } + } + + // Merge the current node with IMMEDIATELY FOLLOWING data + unsafe fn merge_data(&mut self, size: usize) { + self.hole.as_mut().size += size; + } + + unsafe fn merge_back(mut self, mut new_hole: NonNull) { + let mut prev = self.prev; + let (size, next) = { + let hole = self.hole.as_mut(); + (hole.size, hole.next.take()) + }; + drop(self.hole); + + prev.as_mut().next = Some(new_hole); + let new_hole = new_hole.as_mut(); + new_hole.size += size; + new_hole.next = next; + } + + unsafe fn insert_back(mut self, mut new_hole: NonNull) { + self.prev.as_mut().next = Some(new_hole); + new_hole.as_mut().next = Some(self.hole); + } + + // Merge the current node with an IMMEDIATELY FOLLOWING next node + unsafe fn merge_next(mut self) { + // We MUST have a current and next + let hole = self.hole.as_mut(); + let next = hole.next.take().unwrap().as_mut(); + + // Extract the data from the next node, then drop it + let next_size = next.size; + let next_next = next.next.take(); + drop(next); + + // Extend the current node to subsume the next node + hole.size += next_size; + hole.next = next_next; + } + // On success, it returns the new allocation, and the linked list has been updated // to accomodate any new holes and allocation. On error, it returns the cursor // unmodified, and has made no changes to the linked list of holes. @@ -126,6 +215,10 @@ impl Cursor { // if it fits, we sits. Otherwise we just skip adding the back padding, and // sort of assume that the allocation is actually a bit larger than it // actually needs to be. + // + // TODO(AJM): Write a test for this - does the code actually handle this + // correctly? Do we need to check the delta is less than an aligned hole + // size? let hole_layout = Layout::new::(); let back_padding_start = align_up(allocation_end, hole_layout.align()); let back_padding_end = back_padding_start.wrapping_add(hole_layout.size()); @@ -331,7 +424,7 @@ impl HoleList { /// [`allocate_first_fit`]: HoleList::allocate_first_fit pub unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout) -> Layout { let aligned_layout = Self::align_layout(layout); - deallocate(&mut self.first, ptr.as_ptr(), aligned_layout.size()); + deallocate(self, ptr.as_ptr(), aligned_layout.size()); aligned_layout } @@ -364,6 +457,10 @@ impl Hole { size: self.size, } } + + fn addr_u8(&self) -> *const u8 { + self as *const Hole as *const u8 + } } /// Basic information about a hole. @@ -381,95 +478,203 @@ struct Allocation { back_padding: Option, } +unsafe fn make_hole(addr: *mut u8, size: usize) -> NonNull { + let hole_addr = addr.cast::(); + debug_assert_eq!(addr as usize % align_of::(), 0); + hole_addr.write(Hole {size, next: None}); + NonNull::new_unchecked(hole_addr) +} + /// Frees the allocation given by `(addr, size)`. It starts at the given hole and walks the list to /// find the correct place (the list is sorted by address). -fn deallocate(mut hole: &mut Hole, addr: *mut u8, mut size: usize) { - loop { - assert!(size >= HoleList::min_size()); - - let hole_addr = if hole.size == 0 { - // It's the dummy hole, which is the head of the HoleList. It's somewhere on the stack, - // so it's address is not the address of the hole. We set the addr to 0 as it's always - // the first hole. - core::ptr::null_mut() - } else { - // tt's a real hole in memory and its address is the address of the hole - hole as *mut _ as *mut u8 - }; - - // Each freed block must be handled by the previous hole in memory. Thus the freed - // address must be always behind the current hole. - assert!( - hole_addr.wrapping_offset(hole.size.try_into().unwrap()) <= addr, - "invalid deallocation (probably a double free)" - ); - - // get information about the next block - let next_hole_info = hole.next.as_mut().map(|next| unsafe { next.as_mut().info() }); - - match next_hole_info { - Some(next) - if hole_addr.wrapping_offset(hole.size.try_into().unwrap()) == addr - && addr.wrapping_offset(size.try_into().unwrap()) == next.addr => - { - // block fills the gap between this hole and the next hole - // before: ___XXX____YYYYY____ where X is this hole and Y the next hole - // after: ___XXXFFFFYYYYY____ where F is the freed block - - hole.size += size + next.size; // merge the F and Y blocks to this X block - hole.next = unsafe { hole.next.as_mut().unwrap().as_mut().next.take() }; // remove the Y block - } - _ if hole_addr.wrapping_add(hole.size.try_into().unwrap()) == addr => { - // block is right behind this hole but there is used memory after it - // before: ___XXX______YYYYY____ where X is this hole and Y the next hole - // after: ___XXXFFFF__YYYYY____ where F is the freed block +fn deallocate(list: &mut HoleList, addr: *mut u8, mut size: usize) { + let mut cursor = if let Some(cursor) = list.cursor() { + cursor + } else { + // Oh hey, there are no holes at all. That means this just becomes the only hole! + let hole = unsafe { make_hole(addr, size) }; + list.first.next = Some(hole); + return; + }; - // or: block is right behind this hole and this is the last hole - // before: ___XXX_______________ where X is this hole and Y the next hole - // after: ___XXXFFFF___________ where F is the freed block - hole.size += size; // merge the F block to this X block - } - Some(next) if addr.wrapping_offset(size.try_into().unwrap()) == next.addr => { - // block is right before the next hole but there is used memory before it - // before: ___XXX______YYYYY____ where X is this hole and Y the next hole - // after: ___XXX__FFFFYYYYY____ where F is the freed block - - hole.next = unsafe { hole.next.as_mut().unwrap().as_mut().next.take() }; // remove the Y block - size += next.size; // free the merged F/Y block in next iteration - continue; - } - Some(next) if next.addr <= addr => { - // block is behind the next hole, so we delegate it to the next hole - // before: ___XXX__YYYYY________ where X is this hole and Y the next hole - // after: ___XXX__YYYYY__FFFF__ where F is the freed block + let addr_const = addr as *const u8; - hole = unsafe { move_helper(hole).next.as_mut().unwrap().as_mut() }; // start next iteration at next hole - continue; - } - _ => { - // block is between this and the next hole - // before: ___XXX________YYYYY_ where X is this hole and Y the next hole - // after: ___XXX__FFFF__YYYYY_ where F is the freed block + loop { + assert!(size >= HoleList::min_size()); + match cursor.data_position(addr, size) { + Position::BeforeCurrent => { + // We should only hit the code path if this is BEFORE the "real" nodes, e.g. + // it is after the "dummy" node, and before the first hole + assert_eq!(cursor.previous().size, 0, "This can only be the dummy node!"); + let cur_addr_u8 = cursor.current().addr_u8(); + let alloc_touches_cur = addr_const.wrapping_add(size) == cur_addr_u8; + + unsafe { + let nn_hole_addr = make_hole(addr, size); + list.first.next = Some(nn_hole_addr); + + if alloc_touches_cur { + // before: ___XXX____YYYYY____ where X is this hole and Y the next hole + // after: _FFXXX____YYYYY____ where F is the freed block + // _XXXXX____YYYYY____ where X is the new combined hole + cursor.merge_back(nn_hole_addr); + } else { + // before: ___XXX____YYYYY____ where X is this hole and Y the next hole + // after: FF_XXX____YYYYY____ where F is the freed block + // NN_XXX____YYYYY____ where N is the new hole + cursor.insert_back(nn_hole_addr); + } + } + + return; + }, + Position::BetweenCurrentNext { curr, next } => { + let cur_addr_u8 = curr.addr_u8(); + let nxt_addr_u8 = next.addr_u8(); + + let current_touches_alloc = cur_addr_u8.wrapping_add(curr.size) == addr_const; + let alloc_touches_next = addr_const.wrapping_add(size) == nxt_addr_u8; + + match (current_touches_alloc, alloc_touches_next) { + (true, true) => { + // Everything is touching! Merge the current hole to consume the + // following data and the following hole + // + // block fills the gap between this hole and the next hole + // before: ___XXX____YYYYY____ where X is this hole and Y the next hole + // after: ___XXXFFFFYYYYY____ where F is the freed block + // ___XXXXXXXXXXXX____ where X is the new combined hole + unsafe { + cursor.merge_data(size); + cursor.merge_next(); + } + return; + }, + (true, false) => { + // The current node touches the allocation + // + // block is right behind this hole but there is used memory after it + // before: ___XXX______YYYYY____ where X is this hole and Y the next hole + // after: ___XXXFFFF__YYYYY____ where F is the freed block + // after: ___XXXXXXX__YYYYY____ + unsafe { + cursor.merge_data(size); + } + return; + }, + (false, true) => { + todo!("Merge allocation and next!"); + }, + (false, false) => { + todo!("Is this even possible?"); + }, + } + }, + Position::AfterBoth => { + cursor = cursor.next().unwrap(); + }, + Position::AfterCurrentNoNext => { + let cur_addr_u8 = curr.addr_u8(); + let nxt_addr_u8 = next.addr_u8(); + + let current_touches_alloc = cur_addr_u8.wrapping_add(curr.size) == addr_const; + if current_touches_alloc { + // block is right behind this hole and this is the last hole + // before: ___XXX_______________ where X is this hole and Y the next hole + // after: ___XXXFFFF___________ where F is the freed block + unsafe { + cursor.merge_data(size); + } + return; + } else { + + } // or: this is the last hole // before: ___XXX_________ where X is this hole // after: ___XXX__FFFF___ where F is the freed block + // - let new_hole = Hole { - size: size, - next: hole.next.take(), // the reference to the Y block (if it exists) - }; - // write the new hole to the freed memory - debug_assert_eq!(addr as usize % align_of::(), 0); - let ptr = addr as *mut Hole; - unsafe { ptr.write(new_hole) }; - // add the F block as the next block of the X block - hole.next = Some(unsafe { NonNull::new_unchecked(ptr) }); + todo!("after current, no next!") } } - break; } + // loop { + // + + // let hole_addr = if hole.size == 0 { + // // It's the dummy hole, which is the head of the HoleList. It's somewhere on the stack, + // // so it's address is not the address of the hole. We set the addr to 0 as it's always + // // the first hole. + // core::ptr::null_mut() + // } else { + // // tt's a real hole in memory and its address is the address of the hole + // hole as *mut _ as *mut u8 + // }; + + // // Each freed block must be handled by the previous hole in memory. Thus the freed + // // address must be always behind the current hole. + // assert!( + // hole_addr.wrapping_offset(hole.size.try_into().unwrap()) <= addr, + // "invalid deallocation (probably a double free)" + // ); + + // // get information about the next block + // let next_hole_info = hole.next.as_mut().map(|next| unsafe { next.as_mut().info() }); + + // match next_hole_info { + // _ if hole_addr.wrapping_add(hole.size) == addr => { + // // block is right behind this hole but there is used memory after it + // // before: ___XXX______YYYYY____ where X is this hole and Y the next hole + // // after: ___XXXFFFF__YYYYY____ where F is the freed block + + // // or: block is right behind this hole and this is the last hole + // // before: ___XXX_______________ where X is this hole and Y the next hole + // // after: ___XXXFFFF___________ where F is the freed block + + // hole.size += size; // merge the F block to this X block + // } + // Some(next) if addr.wrapping_offset(size.try_into().unwrap()) == next.addr => { + // // block is right before the next hole but there is used memory before it + // // before: ___XXX______YYYYY____ where X is this hole and Y the next hole + // // after: ___XXX__FFFFYYYYY____ where F is the freed block + + // hole.next = unsafe { hole.next.as_mut().unwrap().as_mut().next.take() }; // remove the Y block + // size += next.size; // free the merged F/Y block in next iteration + // continue; + // } + // Some(next) if next.addr <= addr => { + // // block is behind the next hole, so we delegate it to the next hole + // // before: ___XXX__YYYYY________ where X is this hole and Y the next hole + // // after: ___XXX__YYYYY__FFFF__ where F is the freed block + + // hole = unsafe { move_helper(hole).next.as_mut().unwrap().as_mut() }; // start next iteration at next hole + // continue; + // } + // _ => { + // // block is between this and the next hole + // // before: ___XXX________YYYYY_ where X is this hole and Y the next hole + // // after: ___XXX__FFFF__YYYYY_ where F is the freed block + + // // or: this is the last hole + // // before: ___XXX_________ where X is this hole + // // after: ___XXX__FFFF___ where F is the freed block + + // let new_hole = Hole { + // size: size, + // next: hole.next.take(), // the reference to the Y block (if it exists) + // }; + // // write the new hole to the freed memory + // debug_assert_eq!(addr as usize % align_of::(), 0); + // let ptr = addr as *mut Hole; + // unsafe { ptr.write(new_hole) }; + // // add the F block as the next block of the X block + // hole.next = Some(unsafe { NonNull::new_unchecked(ptr) }); + // } + // } + // break; + // } } /// Identity function to ease moving of references. diff --git a/src/test.rs b/src/test.rs index 49e671c..73b7451 100644 --- a/src/test.rs +++ b/src/test.rs @@ -302,11 +302,11 @@ fn extend_fragmented_heap() { heap.deallocate(alloc1.unwrap(), layout_1.clone()); } - unsafe { - heap.extend(1024); - } + // unsafe { + // heap.extend(1024); + // } - // We got additional 1024 bytes hole at the end of the heap - // Try to allocate there - assert!(heap.allocate_first_fit(layout_2.clone()).is_ok()); + // // We got additional 1024 bytes hole at the end of the heap + // // Try to allocate there + // assert!(heap.allocate_first_fit(layout_2.clone()).is_ok()); } From c56d1bcf75f6552cf28409b68b37e6a921427686 Mon Sep 17 00:00:00 2001 From: James Munns Date: Sun, 26 Jun 2022 03:55:31 +0200 Subject: [PATCH 07/13] reimpl dealloc --- src/hole.rs | 404 +++++++++++++++++++--------------------------------- src/lib.rs | 150 +++++++++---------- src/test.rs | 30 +++- 3 files changed, 245 insertions(+), 339 deletions(-) diff --git a/src/hole.rs b/src/hole.rs index aebb83c..368f9a9 100644 --- a/src/hole.rs +++ b/src/hole.rs @@ -64,85 +64,6 @@ impl Cursor { } } - fn data_position(&self, data: *mut u8, size: usize) -> Position { - let cur_ptr = self.hole.as_ptr().cast::(); - if data < cur_ptr { - assert!( - data.wrapping_add(size) <= cur_ptr, - "Freed data overlaps a hole!", - ); - Position::BeforeCurrent - } else { - assert!( - cur_ptr.wrapping_add(self.current().size) <= data, - "hole overlaps freed data!", - ); - - if let Some(next) = self.peek_next() { - let next_ptr = next as *const Hole as *const u8; - let data_const = data as *const u8; - if data_const < next_ptr { - assert!( - data_const.wrapping_add(size) <= next_ptr, - "Freed data overlaps a hole!" - ); - Position::BetweenCurrentNext { - curr: unsafe { self.hole.as_ref() }, - next, - } - } else { - assert!( - next_ptr.wrapping_add(next.size) <= data, - "hole overlaps freed data!", - ); - Position::AfterBoth - } - } else { - Position::AfterCurrentNoNext - } - } - } - - // Merge the current node with IMMEDIATELY FOLLOWING data - unsafe fn merge_data(&mut self, size: usize) { - self.hole.as_mut().size += size; - } - - unsafe fn merge_back(mut self, mut new_hole: NonNull) { - let mut prev = self.prev; - let (size, next) = { - let hole = self.hole.as_mut(); - (hole.size, hole.next.take()) - }; - drop(self.hole); - - prev.as_mut().next = Some(new_hole); - let new_hole = new_hole.as_mut(); - new_hole.size += size; - new_hole.next = next; - } - - unsafe fn insert_back(mut self, mut new_hole: NonNull) { - self.prev.as_mut().next = Some(new_hole); - new_hole.as_mut().next = Some(self.hole); - } - - // Merge the current node with an IMMEDIATELY FOLLOWING next node - unsafe fn merge_next(mut self) { - // We MUST have a current and next - let hole = self.hole.as_mut(); - let next = hole.next.take().unwrap().as_mut(); - - // Extract the data from the next node, then drop it - let next_size = next.size; - let next_next = next.next.take(); - drop(next); - - // Extend the current node to subsume the next node - hole.size += next_size; - hole.next = next_next; - } - // On success, it returns the new allocation, and the linked list has been updated // to accomodate any new holes and allocation. On error, it returns the cursor // unmodified, and has made no changes to the linked list of holes. @@ -253,7 +174,9 @@ impl Cursor { match (front_padding, back_padding) { (None, None) => { - // No padding at all, how lucky! Nothing to do but return the allocation. + // No padding at all, how lucky! We still need to connect the PREVIOUS node + // to the NEXT node, if there was one + unsafe { prev.as_mut().next = maybe_next_addr; } }, (None, Some(singlepad)) | (Some(singlepad), None) => unsafe { // We have front padding OR back padding, but not both. @@ -337,6 +260,30 @@ impl HoleList { } } + #[cfg(test)] + pub(crate) fn debug(&mut self) { + if let Some(cursor) = self.cursor() { + let mut cursor = cursor; + loop { + println!( + "prev: {:?}[{}], hole: {:?}[{}]", + cursor.previous() as *const Hole, + cursor.previous().size, + cursor.current() as *const Hole, + cursor.current().size, + ); + if let Some(c) = cursor.next() { + cursor = c; + } else { + println!("Done!"); + return; + } + } + } else { + println!("No holes"); + } + } + /// Creates a `HoleList` that contains the given hole. /// /// ## Safety @@ -485,198 +432,139 @@ unsafe fn make_hole(addr: *mut u8, size: usize) -> NonNull { NonNull::new_unchecked(hole_addr) } -/// Frees the allocation given by `(addr, size)`. It starts at the given hole and walks the list to -/// find the correct place (the list is sorted by address). -fn deallocate(list: &mut HoleList, addr: *mut u8, mut size: usize) { - let mut cursor = if let Some(cursor) = list.cursor() { - cursor - } else { - // Oh hey, there are no holes at all. That means this just becomes the only hole! - let hole = unsafe { make_hole(addr, size) }; - list.first.next = Some(hole); - return; - }; +impl Cursor { + fn try_insert_back(self, mut node: NonNull) -> Result { + // Covers the case where the new hole exists BEFORE the current pointer, + // which only happens when previous is the stub pointer + if node < self.hole { + let node_u8 = node.as_ptr().cast::(); + let node_size = unsafe { node.as_ref().size }; + let hole_u8 = self.hole.as_ptr().cast::(); + + assert!(node_u8.wrapping_add(node_size) <= hole_u8); + assert_eq!(self.previous().size, 0); + + let Cursor { mut prev, hole } = self; + unsafe { + prev.as_mut().next = Some(node); + node.as_mut().next = Some(hole); + } + Ok(Cursor { prev, hole: node }) + } else { + Err(self) + } + } + fn try_insert_after(&mut self, mut node: NonNull) -> Result<(), ()> { + if self.hole < node { + let node_u8 = node.as_ptr().cast::(); + let node_size = unsafe { node.as_ref().size }; + let hole_u8 = self.hole.as_ptr().cast::(); + let hole_size = self.current().size; - let addr_const = addr as *const u8; + // Does hole overlap node? + assert!(hole_u8.wrapping_add(hole_size) <= node_u8); - loop { - assert!(size >= HoleList::min_size()); - match cursor.data_position(addr, size) { - Position::BeforeCurrent => { - // We should only hit the code path if this is BEFORE the "real" nodes, e.g. - // it is after the "dummy" node, and before the first hole - assert_eq!(cursor.previous().size, 0, "This can only be the dummy node!"); - let cur_addr_u8 = cursor.current().addr_u8(); - let alloc_touches_cur = addr_const.wrapping_add(size) == cur_addr_u8; + // If we have a next, does the node overlap next? + if let Some(next) = self.current().next.as_ref() { + let node_u8 = node_u8 as *const u8; + assert!(node_u8.wrapping_add(node_size) <= next.as_ptr().cast::()) + } + + // All good! Let's insert that after. + unsafe { + let maybe_next = self.hole.as_mut().next.replace(node); + node.as_mut().next = maybe_next; + } + Ok(()) + } else { + Err(()) + } + } - unsafe { - let nn_hole_addr = make_hole(addr, size); - list.first.next = Some(nn_hole_addr); - - if alloc_touches_cur { - // before: ___XXX____YYYYY____ where X is this hole and Y the next hole - // after: _FFXXX____YYYYY____ where F is the freed block - // _XXXXX____YYYYY____ where X is the new combined hole - cursor.merge_back(nn_hole_addr); - } else { - // before: ___XXX____YYYYY____ where X is this hole and Y the next hole - // after: FF_XXX____YYYYY____ where F is the freed block - // NN_XXX____YYYYY____ where N is the new hole - cursor.insert_back(nn_hole_addr); - } - } + // Merge the current node with an IMMEDIATELY FOLLOWING next node + fn try_merge_next_two(self) { + let Cursor { prev: _, mut hole } = self; + + for _ in 0..2 { + // Is there a next node? + let mut next = if let Some(next) = unsafe { hole.as_mut() }.next.as_ref() { + *next + } else { return; - }, - Position::BetweenCurrentNext { curr, next } => { - let cur_addr_u8 = curr.addr_u8(); - let nxt_addr_u8 = next.addr_u8(); - - let current_touches_alloc = cur_addr_u8.wrapping_add(curr.size) == addr_const; - let alloc_touches_next = addr_const.wrapping_add(size) == nxt_addr_u8; - - match (current_touches_alloc, alloc_touches_next) { - (true, true) => { - // Everything is touching! Merge the current hole to consume the - // following data and the following hole - // - // block fills the gap between this hole and the next hole - // before: ___XXX____YYYYY____ where X is this hole and Y the next hole - // after: ___XXXFFFFYYYYY____ where F is the freed block - // ___XXXXXXXXXXXX____ where X is the new combined hole - unsafe { - cursor.merge_data(size); - cursor.merge_next(); - } - return; - }, - (true, false) => { - // The current node touches the allocation - // - // block is right behind this hole but there is used memory after it - // before: ___XXX______YYYYY____ where X is this hole and Y the next hole - // after: ___XXXFFFF__YYYYY____ where F is the freed block - // after: ___XXXXXXX__YYYYY____ - unsafe { - cursor.merge_data(size); - } - return; - }, - (false, true) => { - todo!("Merge allocation and next!"); - }, - (false, false) => { - todo!("Is this even possible?"); - }, - } - }, - Position::AfterBoth => { - cursor = cursor.next().unwrap(); - }, - Position::AfterCurrentNoNext => { - let cur_addr_u8 = curr.addr_u8(); - let nxt_addr_u8 = next.addr_u8(); - - let current_touches_alloc = cur_addr_u8.wrapping_add(curr.size) == addr_const; - if current_touches_alloc { - // block is right behind this hole and this is the last hole - // before: ___XXX_______________ where X is this hole and Y the next hole - // after: ___XXXFFFF___________ where F is the freed block - unsafe { - cursor.merge_data(size); - } - return; - } else { + }; - } + // Can we directly merge these? e.g. are they touching? + // + // TODO(AJM): Should "touching" also include deltas <= node size? + let hole_u8 = hole.as_ptr().cast::(); + let hole_sz = unsafe { hole.as_ref().size }; + let next_u8 = next.as_ptr().cast::(); - // or: this is the last hole - // before: ___XXX_________ where X is this hole - // after: ___XXX__FFFF___ where F is the freed block - // + let touching = hole_u8.wrapping_add(hole_sz) == next_u8; - todo!("after current, no next!") + if touching { + let next_sz; + let next_next; + unsafe { + let next_mut = next.as_mut(); + next_sz = next_mut.size; + next_next = next_mut.next.take(); + } + unsafe { + let hole_mut = hole.as_mut(); + hole_mut.next = next_next; + hole_mut.size += next_sz; + } + // Okay, we just merged the next item. DON'T move the cursor, as we can + // just try to merge the next_next, which is now our next. + } else { + // Welp, not touching, can't merge. Move to the next node. + hole = next; } } + } - // loop { - // - - // let hole_addr = if hole.size == 0 { - // // It's the dummy hole, which is the head of the HoleList. It's somewhere on the stack, - // // so it's address is not the address of the hole. We set the addr to 0 as it's always - // // the first hole. - // core::ptr::null_mut() - // } else { - // // tt's a real hole in memory and its address is the address of the hole - // hole as *mut _ as *mut u8 - // }; - - // // Each freed block must be handled by the previous hole in memory. Thus the freed - // // address must be always behind the current hole. - // assert!( - // hole_addr.wrapping_offset(hole.size.try_into().unwrap()) <= addr, - // "invalid deallocation (probably a double free)" - // ); - - // // get information about the next block - // let next_hole_info = hole.next.as_mut().map(|next| unsafe { next.as_mut().info() }); - - // match next_hole_info { - // _ if hole_addr.wrapping_add(hole.size) == addr => { - // // block is right behind this hole but there is used memory after it - // // before: ___XXX______YYYYY____ where X is this hole and Y the next hole - // // after: ___XXXFFFF__YYYYY____ where F is the freed block - - // // or: block is right behind this hole and this is the last hole - // // before: ___XXX_______________ where X is this hole and Y the next hole - // // after: ___XXXFFFF___________ where F is the freed block - - // hole.size += size; // merge the F block to this X block - // } - // Some(next) if addr.wrapping_offset(size.try_into().unwrap()) == next.addr => { - // // block is right before the next hole but there is used memory before it - // // before: ___XXX______YYYYY____ where X is this hole and Y the next hole - // // after: ___XXX__FFFFYYYYY____ where F is the freed block - - // hole.next = unsafe { hole.next.as_mut().unwrap().as_mut().next.take() }; // remove the Y block - // size += next.size; // free the merged F/Y block in next iteration - // continue; - // } - // Some(next) if next.addr <= addr => { - // // block is behind the next hole, so we delegate it to the next hole - // // before: ___XXX__YYYYY________ where X is this hole and Y the next hole - // // after: ___XXX__YYYYY__FFFF__ where F is the freed block - - // hole = unsafe { move_helper(hole).next.as_mut().unwrap().as_mut() }; // start next iteration at next hole - // continue; - // } - // _ => { - // // block is between this and the next hole - // // before: ___XXX________YYYYY_ where X is this hole and Y the next hole - // // after: ___XXX__FFFF__YYYYY_ where F is the freed block - - // // or: this is the last hole - // // before: ___XXX_________ where X is this hole - // // after: ___XXX__FFFF___ where F is the freed block - - // let new_hole = Hole { - // size: size, - // next: hole.next.take(), // the reference to the Y block (if it exists) - // }; - // // write the new hole to the freed memory - // debug_assert_eq!(addr as usize % align_of::(), 0); - // let ptr = addr as *mut Hole; - // unsafe { ptr.write(new_hole) }; - // // add the F block as the next block of the X block - // hole.next = Some(unsafe { NonNull::new_unchecked(ptr) }); - // } - // } - // break; - // } } +/// Frees the allocation given by `(addr, size)`. It starts at the given hole and walks the list to +/// find the correct place (the list is sorted by address). +fn deallocate(list: &mut HoleList, addr: *mut u8, size: usize) { + // Start off by just making this allocation a hole. + let hole = unsafe { make_hole(addr, size) }; + + let cursor = if let Some(cursor) = list.cursor() { + cursor + } else { + // Oh hey, there are no holes at all. That means this just becomes the only hole! + list.first.next = Some(hole); + return; + }; + + // First, check if we can just insert this node at the top of the list. If the + // insertion succeeded, then our cursor now points to the NEW node, behind the + // previous location the cursor was pointing to. + let cursor = match cursor.try_insert_back(hole) { + Ok(cursor) => { + // Yup! It lives at the front of the list. Hooray! + cursor + }, + Err(mut cursor) => { + // Nope. It lives somewhere else. Advance the list until we find its home + while let Err(()) = cursor.try_insert_after(hole) { + cursor = cursor.next().unwrap(); + } + cursor + }, + }; + + // We now need to merge up to two times to combine the current node with the next + // two nodes. + cursor.try_merge_next_two(); +} + + /// Identity function to ease moving of references. /// /// By default, references are reborrowed instead of moved (equivalent to `&mut *reference`). This diff --git a/src/lib.rs b/src/lib.rs index baac0c4..1550895 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -210,81 +210,81 @@ impl Heap { } } -// #[cfg(all(feature = "alloc_ref", feature = "use_spin"))] -// unsafe impl Allocator for LockedHeap { -// fn allocate(&self, layout: Layout) -> Result, AllocError> { -// if layout.size() == 0 { -// return Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)); -// } -// match self.0.lock().allocate_first_fit(layout) { -// Ok(ptr) => Ok(NonNull::slice_from_raw_parts(ptr, layout.size())), -// Err(()) => Err(AllocError), -// } -// } - -// unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { -// if layout.size() != 0 { -// self.0.lock().deallocate(ptr, layout); -// } -// } -// } - -// #[cfg(feature = "use_spin")] -// pub struct LockedHeap(Spinlock); - -// #[cfg(feature = "use_spin")] -// impl LockedHeap { -// /// Creates an empty heap. All allocate calls will return `None`. -// #[cfg(feature = "use_spin_nightly")] -// pub const fn empty() -> LockedHeap { -// LockedHeap(Spinlock::new(Heap::empty())) -// } - -// /// Creates an empty heap. All allocate calls will return `None`. -// #[cfg(not(feature = "use_spin_nightly"))] -// pub fn empty() -> LockedHeap { -// LockedHeap(Spinlock::new(Heap::empty())) -// } - -// /// Creates a new heap with the given `bottom` and `size`. The bottom address must be valid -// /// and the memory in the `[heap_bottom, heap_bottom + heap_size)` range must not be used for -// /// anything else. This function is unsafe because it can cause undefined behavior if the -// /// given address is invalid. -// pub unsafe fn new(heap_bottom: *mut u8, heap_size: usize) -> LockedHeap { -// LockedHeap(Spinlock::new(Heap { -// bottom: heap_bottom, -// size: heap_size, -// used: 0, -// holes: HoleList::new(heap_bottom, heap_size), -// })) -// } -// } - -// #[cfg(feature = "use_spin")] -// impl Deref for LockedHeap { -// type Target = Spinlock; - -// fn deref(&self) -> &Spinlock { -// &self.0 -// } -// } - -// #[cfg(feature = "use_spin")] -// unsafe impl GlobalAlloc for LockedHeap { -// unsafe fn alloc(&self, layout: Layout) -> *mut u8 { -// self.0 -// .lock() -// .allocate_first_fit(layout) -// .ok() -// .map_or(0 as *mut u8, |allocation| allocation.as_ptr()) -// } - -// unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { -// self.0 -// .lock() -// .deallocate(NonNull::new_unchecked(ptr), layout) -// } -// } +#[cfg(all(feature = "alloc_ref", feature = "use_spin"))] +unsafe impl Allocator for LockedHeap { + fn allocate(&self, layout: Layout) -> Result, AllocError> { + if layout.size() == 0 { + return Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)); + } + match self.0.lock().allocate_first_fit(layout) { + Ok(ptr) => Ok(NonNull::slice_from_raw_parts(ptr, layout.size())), + Err(()) => Err(AllocError), + } + } + + unsafe fn deallocate(&self, ptr: NonNull, layout: Layout) { + if layout.size() != 0 { + self.0.lock().deallocate(ptr, layout); + } + } +} + +#[cfg(feature = "use_spin")] +pub struct LockedHeap(Spinlock); + +#[cfg(feature = "use_spin")] +impl LockedHeap { + /// Creates an empty heap. All allocate calls will return `None`. + #[cfg(feature = "use_spin_nightly")] + pub const fn empty() -> LockedHeap { + LockedHeap(Spinlock::new(Heap::empty())) + } + + /// Creates an empty heap. All allocate calls will return `None`. + #[cfg(not(feature = "use_spin_nightly"))] + pub fn empty() -> LockedHeap { + LockedHeap(Spinlock::new(Heap::empty())) + } + + /// Creates a new heap with the given `bottom` and `size`. The bottom address must be valid + /// and the memory in the `[heap_bottom, heap_bottom + heap_size)` range must not be used for + /// anything else. This function is unsafe because it can cause undefined behavior if the + /// given address is invalid. + pub unsafe fn new(heap_bottom: *mut u8, heap_size: usize) -> LockedHeap { + LockedHeap(Spinlock::new(Heap { + bottom: heap_bottom, + size: heap_size, + used: 0, + holes: HoleList::new(heap_bottom, heap_size), + })) + } +} + +#[cfg(feature = "use_spin")] +impl Deref for LockedHeap { + type Target = Spinlock; + + fn deref(&self) -> &Spinlock { + &self.0 + } +} + +#[cfg(feature = "use_spin")] +unsafe impl GlobalAlloc for LockedHeap { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + self.0 + .lock() + .allocate_first_fit(layout) + .ok() + .map_or(0 as *mut u8, |allocation| allocation.as_ptr()) + } + + unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { + self.0 + .lock() + .deallocate(NonNull::new_unchecked(ptr), layout) + } +} /// Align downwards. Returns the greatest x with alignment `align` /// so that x <= addr. The alignment must be a power of 2. diff --git a/src/test.rs b/src/test.rs index 73b7451..2845787 100644 --- a/src/test.rs +++ b/src/test.rs @@ -302,11 +302,29 @@ fn extend_fragmented_heap() { heap.deallocate(alloc1.unwrap(), layout_1.clone()); } - // unsafe { - // heap.extend(1024); - // } + unsafe { + heap.extend(1024); + } - // // We got additional 1024 bytes hole at the end of the heap - // // Try to allocate there - // assert!(heap.allocate_first_fit(layout_2.clone()).is_ok()); + // We got additional 1024 bytes hole at the end of the heap + // Try to allocate there + assert!(heap.allocate_first_fit(layout_2.clone()).is_ok()); } + + +// #[test] +// fn break_fragmented_heap() { +// let mut heap = new_heap(); + +// let layout_1 = Layout::from_size_align(505, 1).unwrap(); +// let alloc1 = heap.allocate_first_fit(layout_1.clone()); +// assert!(alloc1.is_ok()); + +// unsafe { +// heap.deallocate(alloc1.unwrap(), layout_1.clone()); +// } + +// let layout_2 = Layout::from_size_align(1024, 1).unwrap(); +// let alloc2 = heap.allocate_first_fit(layout_2.clone()); +// assert!(alloc2.is_ok()); +// } From 7c578bee5e02ef52bcffa2983b6f4b0e8b6a044b Mon Sep 17 00:00:00 2001 From: James Munns Date: Sun, 26 Jun 2022 04:34:39 +0200 Subject: [PATCH 08/13] Cleanups --- src/hole.rs | 123 +++++++++++++++------------------------------------- src/lib.rs | 13 +----- src/test.rs | 20 +-------- 3 files changed, 37 insertions(+), 119 deletions(-) diff --git a/src/hole.rs b/src/hole.rs index 368f9a9..ead24c7 100644 --- a/src/hole.rs +++ b/src/hole.rs @@ -1,5 +1,5 @@ use core::alloc::Layout; -use core::convert::{TryFrom, TryInto}; +use core::convert::TryInto; use core::mem; use core::mem::{align_of, size_of}; use core::ptr::NonNull; @@ -13,42 +13,20 @@ pub struct HoleList { pub(crate) first: Hole, // dummy } -pub struct Cursor { +pub(crate) struct Cursor { prev: NonNull, hole: NonNull, } -enum Position<'a> { - BeforeCurrent, - BetweenCurrentNext { - curr: &'a Hole, - next: &'a Hole, - }, - AfterBoth, - AfterCurrentNoNext, -} - impl Cursor { fn next(mut self) -> Option { unsafe { - if let Some(nhole) = self.hole.as_mut().next { - Some(Cursor { + self.hole.as_mut().next.map(|nhole| { + Cursor { prev: self.hole, hole: nhole, - }) - } else { - None - } - } - } - - fn peek_next(&self) -> Option<&Hole> { - unsafe { - if let Some(nhole) = self.hole.as_ref().next { - Some(nhole.as_ref()) - } else { - None - } + } + }) } } @@ -170,7 +148,6 @@ impl Cursor { // As of now, the old `Hole` is no more. We are about to replace it with one or more of // the front padding, the allocation, and the back padding. - drop(hole); match (front_padding, back_padding) { (None, None) => { @@ -249,7 +226,7 @@ impl HoleList { } } - pub fn cursor(&mut self) -> Option { + pub(crate) fn cursor(&mut self) -> Option { if let Some(hole) = self.first.next { Some(Cursor { hole, @@ -326,9 +303,7 @@ impl HoleList { size = Self::min_size(); } let size = align_up_size(size, mem::align_of::()); - let layout = Layout::from_size_align(size, layout.align()).unwrap(); - - layout + Layout::from_size_align(size, layout.align()).unwrap() } /// Searches the list for a big enough hole. @@ -396,20 +371,6 @@ pub(crate) struct Hole { pub next: Option>, } -impl Hole { - /// Returns basic information about the hole. - fn info(&mut self) -> HoleInfo { - HoleInfo { - addr: self as *mut _ as *mut u8, - size: self.size, - } - } - - fn addr_u8(&self) -> *const u8 { - self as *const Hole as *const u8 - } -} - /// Basic information about a hole. #[derive(Debug, Clone, Copy)] struct HoleInfo { @@ -417,14 +378,6 @@ struct HoleInfo { size: usize, } -/// The result returned by `split_hole` and `allocate_first_fit`. Contains the address and size of -/// the allocation (in the `info` field), and the front and back padding. -struct Allocation { - info: HoleInfo, - front_padding: Option, - back_padding: Option, -} - unsafe fn make_hole(addr: *mut u8, size: usize) -> NonNull { let hole_addr = addr.cast::(); debug_assert_eq!(addr as usize % align_of::(), 0); @@ -482,12 +435,11 @@ impl Cursor { } } - - // Merge the current node with an IMMEDIATELY FOLLOWING next node - fn try_merge_next_two(self) { + // Merge the current node with up to n following nodes + fn try_merge_next_n(self, max: usize) { let Cursor { prev: _, mut hole } = self; - for _ in 0..2 { + for _ in 0..max { // Is there a next node? let mut next = if let Some(next) = unsafe { hole.as_mut() }.next.as_ref() { *next @@ -496,13 +448,12 @@ impl Cursor { }; // Can we directly merge these? e.g. are they touching? - // - // TODO(AJM): Should "touching" also include deltas <= node size? let hole_u8 = hole.as_ptr().cast::(); let hole_sz = unsafe { hole.as_ref().size }; let next_u8 = next.as_ptr().cast::(); + let end = hole_u8.wrapping_add(hole_sz); - let touching = hole_u8.wrapping_add(hole_sz) == next_u8; + let touching = end == next_u8; if touching { let next_sz; @@ -531,13 +482,18 @@ impl Cursor { /// Frees the allocation given by `(addr, size)`. It starts at the given hole and walks the list to /// find the correct place (the list is sorted by address). fn deallocate(list: &mut HoleList, addr: *mut u8, size: usize) { - // Start off by just making this allocation a hole. + // Start off by just making this allocation a hole where it stands. + // We'll attempt to merge it with other nodes once we figure out where + // it should live let hole = unsafe { make_hole(addr, size) }; + // Now, try to get a cursor to the list - this only works if we have at least + // one non-"dummy" hole in the list let cursor = if let Some(cursor) = list.cursor() { cursor } else { - // Oh hey, there are no holes at all. That means this just becomes the only hole! + // Oh hey, there are no "real" holes at all. That means this just + // becomes the only "real" hole! list.first.next = Some(hole); return; }; @@ -545,42 +501,36 @@ fn deallocate(list: &mut HoleList, addr: *mut u8, size: usize) { // First, check if we can just insert this node at the top of the list. If the // insertion succeeded, then our cursor now points to the NEW node, behind the // previous location the cursor was pointing to. - let cursor = match cursor.try_insert_back(hole) { + // + // Otherwise, our cursor will point at the current non-"dummy" head of the list + let (cursor, n) = match cursor.try_insert_back(hole) { Ok(cursor) => { - // Yup! It lives at the front of the list. Hooray! - cursor + // Yup! It lives at the front of the list. Hooray! Attempt to merge + // it with just ONE next node, since it is at the front of the list + (cursor, 1) }, Err(mut cursor) => { // Nope. It lives somewhere else. Advance the list until we find its home while let Err(()) = cursor.try_insert_after(hole) { cursor = cursor.next().unwrap(); } - cursor + // Great! We found a home for it, our cursor is now JUST BEFORE the new + // node we inserted, so we need to try to merge up to twice: One to combine + // the current node to the new node, then once more to combine the new node + // with the node after that. + (cursor, 2) }, }; // We now need to merge up to two times to combine the current node with the next // two nodes. - cursor.try_merge_next_two(); -} - - -/// Identity function to ease moving of references. -/// -/// By default, references are reborrowed instead of moved (equivalent to `&mut *reference`). This -/// function forces a move. -/// -/// for more information, see section “id Forces References To Move” in: -/// https://bluss.github.io/rust/fun/2015/10/11/stuff-the-identity-function-does/ -fn move_helper(x: T) -> T { - x + cursor.try_merge_next_n(n); } #[cfg(test)] pub mod test { - use super::*; use core::alloc::Layout; - use std::mem::{align_of, size_of, MaybeUninit}; + use std::mem::MaybeUninit; use std::prelude::v1::*; use crate::Heap; @@ -612,16 +562,13 @@ pub mod test { #[test] fn cursor() { let mut heap = new_heap(); - let curs = heap.holes_mut().cursor().unwrap(); + let curs = heap.holes.cursor().unwrap(); // This is the "dummy" node assert_eq!(curs.previous().size, 0); // This is the "full" heap assert_eq!(curs.current().size, 1000); // There is no other hole - assert!(curs.peek_next().is_none()); - - let reqd = Layout::from_size_align(256, 1).unwrap(); - let _ = curs.split_current(reqd).map_err(drop).unwrap(); + assert!(curs.next().is_none()); } #[test] diff --git a/src/lib.rs b/src/lib.rs index 1550895..6cf198d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -5,9 +5,6 @@ )] #![no_std] -#![allow(dead_code)] -#![allow(unused_imports)] - #[cfg(test)] #[macro_use] extern crate std; @@ -188,14 +185,6 @@ impl Heap { self.size - self.used } - pub(crate) fn holes(&self) -> &HoleList { - &self.holes - } - - pub(crate) fn holes_mut(&mut self) -> &mut HoleList { - &mut self.holes - } - /// Extends the size of the heap by creating a new hole at the end /// /// # Unsafety @@ -276,7 +265,7 @@ unsafe impl GlobalAlloc for LockedHeap { .lock() .allocate_first_fit(layout) .ok() - .map_or(0 as *mut u8, |allocation| allocation.as_ptr()) + .map_or(core::ptr::null_mut(), |allocation| allocation.as_ptr()) } unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) { diff --git a/src/test.rs b/src/test.rs index 2845787..3d1c947 100644 --- a/src/test.rs +++ b/src/test.rs @@ -88,7 +88,7 @@ fn allocate_and_free_double_usize() { *(x.as_ptr() as *mut (usize, usize)) = (0xdeafdeadbeafbabe, 0xdeafdeadbeafbabe); heap.deallocate(x, layout.clone()); - let real_first = heap.holes().first.next.as_ref().unwrap().as_ref(); + let real_first = heap.holes.first.next.as_ref().unwrap().as_ref(); assert_eq!(real_first.size, heap.size); assert!(real_first.next.is_none()); @@ -310,21 +310,3 @@ fn extend_fragmented_heap() { // Try to allocate there assert!(heap.allocate_first_fit(layout_2.clone()).is_ok()); } - - -// #[test] -// fn break_fragmented_heap() { -// let mut heap = new_heap(); - -// let layout_1 = Layout::from_size_align(505, 1).unwrap(); -// let alloc1 = heap.allocate_first_fit(layout_1.clone()); -// assert!(alloc1.is_ok()); - -// unsafe { -// heap.deallocate(alloc1.unwrap(), layout_1.clone()); -// } - -// let layout_2 = Layout::from_size_align(1024, 1).unwrap(); -// let alloc2 = heap.allocate_first_fit(layout_2.clone()); -// assert!(alloc2.is_ok()); -// } From f3b37494d4c368c3104fb8adc6fbc54c793417e8 Mon Sep 17 00:00:00 2001 From: James Munns Date: Sun, 26 Jun 2022 04:36:44 +0200 Subject: [PATCH 09/13] Cargo fmt --- src/hole.rs | 56 ++++++++++++++++++++++++++--------------------------- src/test.rs | 2 +- 2 files changed, 28 insertions(+), 30 deletions(-) diff --git a/src/hole.rs b/src/hole.rs index ead24c7..9b695dd 100644 --- a/src/hole.rs +++ b/src/hole.rs @@ -21,25 +21,19 @@ pub(crate) struct Cursor { impl Cursor { fn next(mut self) -> Option { unsafe { - self.hole.as_mut().next.map(|nhole| { - Cursor { - prev: self.hole, - hole: nhole, - } + self.hole.as_mut().next.map(|nhole| Cursor { + prev: self.hole, + hole: nhole, }) } } fn current(&self) -> &Hole { - unsafe { - self.hole.as_ref() - } + unsafe { self.hole.as_ref() } } fn previous(&self) -> &Hole { - unsafe { - self.prev.as_ref() - } + unsafe { self.prev.as_ref() } } // On success, it returns the new allocation, and the linked list has been updated @@ -128,8 +122,8 @@ impl Cursor { Some(HoleInfo { addr: back_padding_start, size: unsafe { hole_end.offset_from(back_padding_start) } - .try_into() - .unwrap(), + .try_into() + .unwrap(), }) } else { // No, it does not. @@ -142,7 +136,9 @@ impl Cursor { //////////////////////////////////////////////////////////////////////////// let Cursor { mut prev, mut hole } = self; // Remove the current location from the previous node - unsafe { prev.as_mut().next = None; } + unsafe { + prev.as_mut().next = None; + } // Take the next node out of our current node let maybe_next_addr: Option> = unsafe { hole.as_mut().next.take() }; @@ -153,8 +149,10 @@ impl Cursor { (None, None) => { // No padding at all, how lucky! We still need to connect the PREVIOUS node // to the NEXT node, if there was one - unsafe { prev.as_mut().next = maybe_next_addr; } - }, + unsafe { + prev.as_mut().next = maybe_next_addr; + } + } (None, Some(singlepad)) | (Some(singlepad), None) => unsafe { // We have front padding OR back padding, but not both. // @@ -195,7 +193,7 @@ impl Cursor { // Then connect the OLD previous to the NEW FRONT padding prev.as_mut().next = Some(NonNull::new_unchecked(frontpad_ptr)); - } + }, } // Well that went swimmingly! Hand off the allocation, with surgery performed successfully! @@ -324,10 +322,10 @@ impl HoleList { match cursor.split_current(aligned_layout) { Ok((ptr, _len)) => { return Ok((NonNull::new(ptr).ok_or(())?, aligned_layout)); - }, + } Err(curs) => { cursor = curs.next().ok_or(())?; - }, + } } } } @@ -358,10 +356,11 @@ impl HoleList { /// Returns information about the first hole for test purposes. #[cfg(test)] pub fn first_hole(&self) -> Option<(*const u8, usize)> { - self.first - .next - .as_ref() - .map(|hole| (hole.as_ptr() as *mut u8 as *const u8, unsafe { hole.as_ref().size })) + self.first.next.as_ref().map(|hole| { + (hole.as_ptr() as *mut u8 as *const u8, unsafe { + hole.as_ref().size + }) + }) } } @@ -381,7 +380,7 @@ struct HoleInfo { unsafe fn make_hole(addr: *mut u8, size: usize) -> NonNull { let hole_addr = addr.cast::(); debug_assert_eq!(addr as usize % align_of::(), 0); - hole_addr.write(Hole {size, next: None}); + hole_addr.write(Hole { size, next: None }); NonNull::new_unchecked(hole_addr) } @@ -475,7 +474,6 @@ impl Cursor { hole = next; } } - } } @@ -508,7 +506,7 @@ fn deallocate(list: &mut HoleList, addr: *mut u8, size: usize) { // Yup! It lives at the front of the list. Hooray! Attempt to merge // it with just ONE next node, since it is at the front of the list (cursor, 1) - }, + } Err(mut cursor) => { // Nope. It lives somewhere else. Advance the list until we find its home while let Err(()) = cursor.try_insert_after(hole) { @@ -519,7 +517,7 @@ fn deallocate(list: &mut HoleList, addr: *mut u8, size: usize) { // the current node to the new node, then once more to combine the new node // with the node after that. (cursor, 2) - }, + } }; // We now need to merge up to two times to combine the current node with the next @@ -529,14 +527,14 @@ fn deallocate(list: &mut HoleList, addr: *mut u8, size: usize) { #[cfg(test)] pub mod test { + use crate::Heap; use core::alloc::Layout; use std::mem::MaybeUninit; use std::prelude::v1::*; - use crate::Heap; #[repr(align(128))] struct Chonk { - data: [MaybeUninit; N] + data: [MaybeUninit; N], } impl Chonk { diff --git a/src/test.rs b/src/test.rs index 3d1c947..41e4fec 100644 --- a/src/test.rs +++ b/src/test.rs @@ -5,7 +5,7 @@ use std::prelude::v1::*; #[repr(align(128))] struct Chonk { - data: [MaybeUninit; N] + data: [MaybeUninit; N], } impl Chonk { From 6129afaa626edae18dff45f25e8b819bfba5f674 Mon Sep 17 00:00:00 2001 From: James Munns Date: Sun, 26 Jun 2022 04:40:11 +0200 Subject: [PATCH 10/13] Fix constructor --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 6cf198d..1929f0e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -45,7 +45,7 @@ impl Heap { #[cfg(not(feature = "const_mut_refs"))] pub fn empty() -> Heap { Heap { - bottom: 0, + bottom: core::ptr::null_mut(), size: 0, used: 0, holes: HoleList::empty(), From 0d263fc3efb9fac6f15e95473679bc4407adf991 Mon Sep 17 00:00:00 2001 From: James Munns Date: Sun, 26 Jun 2022 17:16:27 +0200 Subject: [PATCH 11/13] Address review comments, update CHANGELOG, add CI miri tests --- .github/workflows/build.yml | 32 +++++++++++++++ Changelog.md | 6 +++ src/hole.rs | 78 ++++++++++++++++++++++++------------- src/lib.rs | 60 ++++++++++++++++++++-------- src/test.rs | 47 ++++++++++++++++++++++ 5 files changed, 181 insertions(+), 42 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a2fa8a7..3d2956b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,6 +10,27 @@ on: - cron: '40 5 * * *' # every day at 5:40 pull_request: +env: + # disable incremental compilation. + # + # incremental compilation is useful as part of an edit-build-test-edit cycle, + # as it lets the compiler avoid recompiling code that hasn't changed. however, + # on CI, we're not making small edits; we're almost always building the entire + # project from scratch. thus, incremental compilation on CI actually + # introduces *additional* overhead to support making future builds + # faster...but no future builds will ever occur in any given CI environment. + # + # see https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow + # for details. + CARGO_INCREMENTAL: 0 + # allow more retries for network requests in cargo (downloading crates) and + # rustup (installing toolchains). this should help to reduce flaky CI failures + # from transient network timeouts or other issues. + CARGO_NET_RETRY: 10 + RUSTUP_MAX_RETRIES: 10 + # don't emit giant backtraces in the CI logs. + RUST_BACKTRACE: short + jobs: test: name: "Test" @@ -110,6 +131,17 @@ jobs: - name: "Run cargo test with `use_spin_nightly` feature" run: cargo test --features use_spin_nightly + test_miri: + name: "Miri tests" + runs-on: ubuntu-latest + env: + MIRIFLAGS: "-Zmiri-disable-isolation -Zmiri-strict-provenance -Zmiri-tag-raw-pointers" + # TODO: Make sure the first test fails by not adding -Zmiri-ignore-leaks + steps: + - uses: actions/checkout@v1 + - run: rustup toolchain install nightly --profile minimal --component rust-src miri + - run: cargo +nightly miri test --all-features + check_formatting: name: "Check Formatting" runs-on: ubuntu-latest diff --git a/Changelog.md b/Changelog.md index f6cefae..c13805d 100644 --- a/Changelog.md +++ b/Changelog.md @@ -1,5 +1,11 @@ # Unreleased +- Changed constructor to take `*mut u8` instead of `usize` ([#62]) + - NOTE: Breaking API change - will require 0.10.0 release +- Reworked internals to pass Miri tests ([#62]) + +[#62]: https://github.com/phil-opp/linked-list-allocator/pull/62 + # 0.9.1 – 2021-10-17 - Add safe constructor and initialization for `Heap` ([#55](https://github.com/phil-opp/linked-list-allocator/pull/55)) diff --git a/src/hole.rs b/src/hole.rs index 9b695dd..5c25649 100644 --- a/src/hole.rs +++ b/src/hole.rs @@ -83,9 +83,7 @@ impl Cursor { // it will just have a smaller size after we have chopped off the "tail" for // the allocation. addr: hole_addr_u8, - size: unsafe { aligned_addr.offset_from(hole_addr_u8) } - .try_into() - .unwrap(), + size: (aligned_addr as usize) - (hole_addr_u8 as usize), }); aligned_addr }; @@ -109,24 +107,25 @@ impl Cursor { // sort of assume that the allocation is actually a bit larger than it // actually needs to be. // - // TODO(AJM): Write a test for this - does the code actually handle this - // correctly? Do we need to check the delta is less than an aligned hole - // size? + // NOTE: Because we always use `HoleList::align_layout`, the size of + // the new allocation is always "rounded up" to cover any partial gaps that + // would have occurred. For this reason, we DON'T need to "round up" + // to account for an unaligned hole spot. let hole_layout = Layout::new::(); let back_padding_start = align_up(allocation_end, hole_layout.align()); let back_padding_end = back_padding_start.wrapping_add(hole_layout.size()); // Will the proposed new back padding actually fit in the old hole slot? back_padding = if back_padding_end <= hole_end { - // Yes, it does! + // Yes, it does! Place a back padding node Some(HoleInfo { addr: back_padding_start, - size: unsafe { hole_end.offset_from(back_padding_start) } - .try_into() - .unwrap(), + size: (hole_end as usize) - (back_padding_start as usize), }) } else { - // No, it does not. + // No, it does not. We are now pretending the allocation now + // holds the extra 0..size_of::() bytes that are not + // big enough to hold what SHOULD be back_padding None }; } @@ -236,6 +235,7 @@ impl HoleList { } #[cfg(test)] + #[allow(dead_code)] pub(crate) fn debug(&mut self) { if let Some(cursor) = self.cursor() { let mut cursor = cursor; @@ -261,11 +261,11 @@ impl HoleList { /// Creates a `HoleList` that contains the given hole. /// - /// ## Safety + /// # Safety /// - /// This function is unsafe because it - /// creates a hole at the given `hole_addr`. This can cause undefined behavior if this address - /// is invalid or if memory from the `[hole_addr, hole_addr+size)` range is used somewhere else. + /// This function is unsafe because it creates a hole at the given `hole_addr`. + /// This can cause undefined behavior if this address is invalid or if memory from the + /// `[hole_addr, hole_addr+size)` range is used somewhere else. /// /// The pointer to `hole_addr` is automatically aligned. pub unsafe fn new(hole_addr: *mut u8, hole_size: usize) -> HoleList { @@ -314,6 +314,10 @@ impl HoleList { /// /// This function uses the “first fit” strategy, so it uses the first hole that is big /// enough. Thus the runtime is in O(n) but it should be reasonably fast for small allocations. + // + // NOTE: We could probably replace this with an `Option` instead of a `Result` in a later + // release to remove this clippy warning + #[allow(clippy::result_unit_err)] pub fn allocate_first_fit(&mut self, layout: Layout) -> Result<(NonNull, Layout), ()> { let aligned_layout = Self::align_layout(layout); let mut cursor = self.cursor().ok_or(())?; @@ -332,16 +336,18 @@ impl HoleList { /// Frees the allocation given by `ptr` and `layout`. /// - /// `ptr` must be a pointer returned by a call to the [`allocate_first_fit`] function with - /// identical layout. Undefined behavior may occur for invalid arguments. - /// The function performs exactly the same layout adjustments as [`allocate_first_fit`] and - /// returns the aligned layout. - /// /// This function walks the list and inserts the given block at the correct place. If the freed /// block is adjacent to another free block, the blocks are merged again. /// This operation is in `O(n)` since the list needs to be sorted by address. /// /// [`allocate_first_fit`]: HoleList::allocate_first_fit + /// + /// # Safety + /// + /// `ptr` must be a pointer returned by a call to the [`allocate_first_fit`] function with + /// identical layout. Undefined behavior may occur for invalid arguments. + /// The function performs exactly the same layout adjustments as [`allocate_first_fit`] and + /// returns the aligned layout. pub unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout) -> Layout { let aligned_layout = Self::align_layout(layout); deallocate(self, ptr.as_ptr(), aligned_layout.size()); @@ -379,7 +385,11 @@ struct HoleInfo { unsafe fn make_hole(addr: *mut u8, size: usize) -> NonNull { let hole_addr = addr.cast::(); - debug_assert_eq!(addr as usize % align_of::(), 0); + debug_assert_eq!( + addr as usize % align_of::(), + 0, + "Hole address not aligned!", + ); hole_addr.write(Hole { size, next: None }); NonNull::new_unchecked(hole_addr) } @@ -393,8 +403,11 @@ impl Cursor { let node_size = unsafe { node.as_ref().size }; let hole_u8 = self.hole.as_ptr().cast::(); - assert!(node_u8.wrapping_add(node_size) <= hole_u8); - assert_eq!(self.previous().size, 0); + assert!( + node_u8.wrapping_add(node_size) <= hole_u8, + "Freed node aliases existing hole! Bad free?", + ); + debug_assert_eq!(self.previous().size, 0); let Cursor { mut prev, hole } = self; unsafe { @@ -415,12 +428,18 @@ impl Cursor { let hole_size = self.current().size; // Does hole overlap node? - assert!(hole_u8.wrapping_add(hole_size) <= node_u8); + assert!( + hole_u8.wrapping_add(hole_size) <= node_u8, + "Freed node aliases existing hole! Bad free?", + ); // If we have a next, does the node overlap next? if let Some(next) = self.current().next.as_ref() { let node_u8 = node_u8 as *const u8; - assert!(node_u8.wrapping_add(node_size) <= next.as_ptr().cast::()) + assert!( + node_u8.wrapping_add(node_size) <= next.as_ptr().cast::(), + "Freed node aliases existing hole! Bad free?", + ); } // All good! Let's insert that after. @@ -447,6 +466,11 @@ impl Cursor { }; // Can we directly merge these? e.g. are they touching? + // + // NOTE: Because we always use `HoleList::align_layout`, the size of + // the new hole is always "rounded up" to cover any partial gaps that + // would have occurred. For this reason, we DON'T need to "round up" + // to account for an unaligned hole spot. let hole_u8 = hole.as_ptr().cast::(); let hole_sz = unsafe { hole.as_ref().size }; let next_u8 = next.as_ptr().cast::(); @@ -510,7 +534,9 @@ fn deallocate(list: &mut HoleList, addr: *mut u8, size: usize) { Err(mut cursor) => { // Nope. It lives somewhere else. Advance the list until we find its home while let Err(()) = cursor.try_insert_after(hole) { - cursor = cursor.next().unwrap(); + cursor = cursor + .next() + .expect("Reached end of holes without finding deallocation hole!"); } // Great! We found a home for it, our cursor is now JUST BEFORE the new // node we inserted, so we need to try to merge up to twice: One to combine diff --git a/src/lib.rs b/src/lib.rs index 1929f0e..e83cc77 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -17,7 +17,7 @@ use core::alloc::GlobalAlloc; use core::alloc::Layout; #[cfg(feature = "alloc_ref")] use core::alloc::{AllocError, Allocator}; -use core::convert::{TryFrom, TryInto}; +use core::convert::TryInto; use core::mem::MaybeUninit; #[cfg(feature = "use_spin")] use core::ops::Deref; @@ -64,10 +64,17 @@ impl Heap { /// Initializes an empty heap /// - /// # Unsafety + /// # Safety /// /// This function must be called at most once and must only be used on an /// empty heap. + /// + /// The bottom address must be valid and the memory in the + /// `[heap_bottom, heap_bottom + heap_size)` range must not be used for anything else. + /// This function is unsafe because it can cause undefined behavior if the given address + /// is invalid. + /// + /// The provided memory range must be valid for the `'static` lifetime. pub unsafe fn init(&mut self, heap_bottom: *mut u8, heap_size: usize) { self.bottom = heap_bottom; self.size = heap_size; @@ -104,10 +111,16 @@ impl Heap { unsafe { self.init(address, size) } } - /// Creates a new heap with the given `bottom` and `size`. The bottom address must be valid - /// and the memory in the `[heap_bottom, heap_bottom + heap_size)` range must not be used for - /// anything else. This function is unsafe because it can cause undefined behavior if the - /// given address is invalid. + /// Creates a new heap with the given `bottom` and `size`. + /// + /// # Safety + /// + /// The bottom address must be valid and the memory in the + /// `[heap_bottom, heap_bottom + heap_size)` range must not be used for anything else. + /// This function is unsafe because it can cause undefined behavior if the given address + /// is invalid. + /// + /// The provided memory range must be valid for the `'static` lifetime. pub unsafe fn new(heap_bottom: *mut u8, heap_size: usize) -> Heap { if heap_size < HoleList::min_size() { Self::empty() @@ -138,6 +151,10 @@ impl Heap { /// This function scans the list of free memory blocks and uses the first block that is big /// enough. The runtime is in O(n) where n is the number of free blocks, but it should be /// reasonably fast for small allocations. + // + // NOTE: We could probably replace this with an `Option` instead of a `Result` in a later + // release to remove this clippy warning + #[allow(clippy::result_unit_err)] pub fn allocate_first_fit(&mut self, layout: Layout) -> Result, ()> { match self.holes.allocate_first_fit(layout) { Ok((ptr, aligned_layout)) => { @@ -149,12 +166,16 @@ impl Heap { } /// Frees the given allocation. `ptr` must be a pointer returned - /// by a call to the `allocate_first_fit` function with identical size and alignment. Undefined - /// behavior may occur for invalid arguments, thus this function is unsafe. + /// by a call to the `allocate_first_fit` function with identical size and alignment. /// /// This function walks the list of free memory blocks and inserts the freed block at the /// correct place. If the freed block is adjacent to another free block, the blocks are merged /// again. This operation is in `O(n)` since the list needs to be sorted by address. + /// + /// # Safety + /// + /// `ptr` must be a pointer returned by a call to the [`allocate_first_fit`] function with + /// identical layout. Undefined behavior may occur for invalid arguments. pub unsafe fn deallocate(&mut self, ptr: NonNull, layout: Layout) { self.used -= self.holes.deallocate(ptr, layout).size(); } @@ -171,8 +192,7 @@ impl Heap { /// Return the top address of the heap pub fn top(&self) -> *mut u8 { - self.bottom - .wrapping_offset(isize::try_from(self.size).unwrap()) + self.bottom.wrapping_add(self.size) } /// Returns the size of the used part of the heap @@ -187,9 +207,11 @@ impl Heap { /// Extends the size of the heap by creating a new hole at the end /// - /// # Unsafety + /// # Safety /// - /// The new extended area must be valid + /// The amount of data given in `by` MUST exist directly after the original + /// range of data provided when constructing the [Heap]. The additional data + /// must have the same lifetime of the original range of data. pub unsafe fn extend(&mut self, by: usize) { let top = self.top(); let layout = Layout::from_size_align(by, 1).unwrap(); @@ -235,10 +257,16 @@ impl LockedHeap { LockedHeap(Spinlock::new(Heap::empty())) } - /// Creates a new heap with the given `bottom` and `size`. The bottom address must be valid - /// and the memory in the `[heap_bottom, heap_bottom + heap_size)` range must not be used for - /// anything else. This function is unsafe because it can cause undefined behavior if the - /// given address is invalid. + /// Creates a new heap with the given `bottom` and `size`. + /// + /// # Safety + /// + /// The bottom address must be valid and the memory in the + /// `[heap_bottom, heap_bottom + heap_size)` range must not be used for anything else. + /// This function is unsafe because it can cause undefined behavior if the given address + /// is invalid. + /// + /// The provided memory range must be valid for the `'static` lifetime. pub unsafe fn new(heap_bottom: *mut u8, heap_size: usize) -> LockedHeap { LockedHeap(Spinlock::new(Heap { bottom: heap_bottom, diff --git a/src/test.rs b/src/test.rs index 41e4fec..f61bc10 100644 --- a/src/test.rs +++ b/src/test.rs @@ -210,6 +210,53 @@ fn allocate_multiple_sizes() { } } +// This test makes sure that the heap works correctly when the input slice has +// a variety of non-Hole aligned starting addresses +#[test] +fn allocate_multiple_unaligned() { + for offset in 0..=Layout::new::().size() { + let mut heap = new_heap_skip(offset); + let base_size = size_of::(); + let base_align = align_of::(); + + let layout_1 = Layout::from_size_align(base_size * 2, base_align).unwrap(); + let layout_2 = Layout::from_size_align(base_size * 7, base_align).unwrap(); + let layout_3 = Layout::from_size_align(base_size * 3, base_align * 4).unwrap(); + let layout_4 = Layout::from_size_align(base_size * 4, base_align).unwrap(); + + let x = heap.allocate_first_fit(layout_1.clone()).unwrap(); + let y = heap.allocate_first_fit(layout_2.clone()).unwrap(); + assert_eq!(y.as_ptr() as usize, x.as_ptr() as usize + base_size * 2); + let z = heap.allocate_first_fit(layout_3.clone()).unwrap(); + assert_eq!(z.as_ptr() as usize % (base_size * 4), 0); + + unsafe { + heap.deallocate(x, layout_1.clone()); + } + + let a = heap.allocate_first_fit(layout_4.clone()).unwrap(); + let b = heap.allocate_first_fit(layout_1.clone()).unwrap(); + assert_eq!(b, x); + + unsafe { + heap.deallocate(y, layout_2); + heap.deallocate(z, layout_3); + heap.deallocate(a, layout_4); + heap.deallocate(b, layout_1); + } + } +} + +fn new_heap_skip(ct: usize) -> Heap { + const HEAP_SIZE: usize = 1000; + let heap_space = Box::leak(Box::new(Chonk::::new())); + let data = &mut heap_space.data[ct..]; + let heap = Heap::from_slice(data); + // assert!(heap.bottom == assumed_location); + // assert!(heap.size == HEAP_SIZE); + heap +} + #[test] fn allocate_usize() { let mut heap = new_heap(); From 4204ae3c734da51fe61b68fdfd738937808e2f75 Mon Sep 17 00:00:00 2001 From: James Munns Date: Sun, 26 Jun 2022 17:19:54 +0200 Subject: [PATCH 12/13] Let miri ignore leaks --- .github/workflows/build.yml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3d2956b..392731c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -135,8 +135,7 @@ jobs: name: "Miri tests" runs-on: ubuntu-latest env: - MIRIFLAGS: "-Zmiri-disable-isolation -Zmiri-strict-provenance -Zmiri-tag-raw-pointers" - # TODO: Make sure the first test fails by not adding -Zmiri-ignore-leaks + MIRIFLAGS: "-Zmiri-disable-isolation -Zmiri-strict-provenance -Zmiri-tag-raw-pointers -Zmiri-ignore-leaks" steps: - uses: actions/checkout@v1 - run: rustup toolchain install nightly --profile minimal --component rust-src miri From 085c26426f06909560a7069f72253192440bdef4 Mon Sep 17 00:00:00 2001 From: James Munns Date: Mon, 27 Jun 2022 08:54:54 +0200 Subject: [PATCH 13/13] Restore Send bound unsafely --- src/lib.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/lib.rs b/src/lib.rs index e83cc77..d257629 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -40,6 +40,8 @@ pub struct Heap { holes: HoleList, } +unsafe impl Send for Heap {} + impl Heap { /// Creates an empty heap. All allocate calls will return `None`. #[cfg(not(feature = "const_mut_refs"))]