Skip to content

Commit c31e812

Browse files
authored
Merge pull request #4 from phil-opp/global_allocator
Use new allocator API
2 parents 07a9205 + 1fa15df commit c31e812

File tree

3 files changed

+165
-103
lines changed

3 files changed

+165
-103
lines changed

src/hole.rs

+50-34
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
use core::ptr::Unique;
22
use core::mem::{self, size_of};
3+
use alloc::allocator::{Layout, AllocErr};
34

45
use super::align_up;
56

@@ -26,11 +27,13 @@ impl HoleList {
2627
assert!(size_of::<Hole>() == Self::min_size());
2728

2829
let ptr = hole_addr as *mut Hole;
29-
mem::replace(&mut *ptr,
30-
Hole {
31-
size: hole_size,
32-
next: None,
33-
});
30+
mem::replace(
31+
&mut *ptr,
32+
Hole {
33+
size: hole_size,
34+
next: None,
35+
},
36+
);
3437

3538
HoleList {
3639
first: Hole {
@@ -41,14 +44,15 @@ impl HoleList {
4144
}
4245

4346
/// Searches the list for a big enough hole. A hole is big enough if it can hold an allocation
44-
/// of `size` bytes with the given `align`. If such a hole is found in the list, a block of the
45-
/// required size is allocated from it. Then the start address of that block is returned.
47+
/// of `layout.size()` bytes with the given `layout.align()`. If such a hole is found in the
48+
/// list, a block of the required size is allocated from it. Then the start address of that
49+
/// block is returned.
4650
/// This function uses the “first fit” strategy, so it uses the first hole that is big
4751
/// enough. Thus the runtime is in O(n) but it should be reasonably fast for small allocations.
48-
pub fn allocate_first_fit(&mut self, size: usize, align: usize) -> Option<*mut u8> {
49-
assert!(size >= Self::min_size());
52+
pub fn allocate_first_fit(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
53+
assert!(layout.size() >= Self::min_size());
5054

51-
allocate_first_fit(&mut self.first, size, align).map(|allocation| {
55+
allocate_first_fit(&mut self.first, layout).map(|allocation| {
5256
if let Some(padding) = allocation.front_padding {
5357
deallocate(&mut self.first, padding.addr, padding.size);
5458
}
@@ -59,14 +63,14 @@ impl HoleList {
5963
})
6064
}
6165

62-
/// Frees the allocation given by `ptr` and `size`. `ptr` must be a pointer returned by a call
63-
/// to the `allocate_first_fit` function with identical size. Undefined behavior may occur for
66+
/// Frees the allocation given by `ptr` and `layout`. `ptr` must be a pointer returned by a call
67+
/// to the `allocate_first_fit` function with identical layout. Undefined behavior may occur for
6468
/// invalid arguments.
6569
/// This function walks the list and inserts the given block at the correct place. If the freed
6670
/// block is adjacent to another free block, the blocks are merged again.
6771
/// This operation is in `O(n)` since the list needs to be sorted by address.
68-
pub unsafe fn deallocate(&mut self, ptr: *mut u8, size: usize) {
69-
deallocate(&mut self.first, ptr as usize, size)
72+
pub unsafe fn deallocate(&mut self, ptr: *mut u8, layout: Layout) {
73+
deallocate(&mut self.first, ptr as usize, layout.size())
7074
}
7175

7276
/// Returns the minimal allocation size. Smaller allocations or deallocations are not allowed.
@@ -77,7 +81,9 @@ impl HoleList {
7781
/// Returns information about the first hole for test purposes.
7882
#[cfg(test)]
7983
pub fn first_hole(&self) -> Option<(usize, usize)> {
80-
self.first.next.as_ref().map(|hole| (hole.as_ptr() as usize, unsafe { hole.as_ref().size }))
84+
self.first.next.as_ref().map(|hole| {
85+
(hole.as_ptr() as usize, unsafe { hole.as_ref().size })
86+
})
8187
}
8288
}
8389

@@ -125,22 +131,27 @@ struct Allocation {
125131
}
126132

127133
/// Splits the given hole into `(front_padding, hole, back_padding)` if it's big enough to allocate
128-
/// `required_size` bytes with the `required_align`. Else `None` is returned.
134+
/// `required_layout.size()` bytes with the `required_layout.align()`. Else `None` is returned.
129135
/// Front padding occurs if the required alignment is higher than the hole's alignment. Back
130136
/// padding occurs if the required size is smaller than the size of the aligned hole. All padding
131137
/// must be at least `HoleList::min_size()` big or the hole is unusable.
132-
fn split_hole(hole: HoleInfo, required_size: usize, required_align: usize) -> Option<Allocation> {
138+
fn split_hole(hole: HoleInfo, required_layout: Layout) -> Option<Allocation> {
139+
let required_size = required_layout.size();
140+
let required_align = required_layout.align();
141+
133142
let (aligned_addr, front_padding) = if hole.addr == align_up(hole.addr, required_align) {
134143
// hole has already the required alignment
135144
(hole.addr, None)
136145
} else {
137146
// the required alignment causes some padding before the allocation
138147
let aligned_addr = align_up(hole.addr + HoleList::min_size(), required_align);
139-
(aligned_addr,
140-
Some(HoleInfo {
141-
addr: hole.addr,
142-
size: aligned_addr - hole.addr,
143-
}))
148+
(
149+
aligned_addr,
150+
Some(HoleInfo {
151+
addr: hole.addr,
152+
size: aligned_addr - hole.addr,
153+
}),
154+
)
144155
};
145156

146157
let aligned_hole = {
@@ -179,29 +190,30 @@ fn split_hole(hole: HoleInfo, required_size: usize, required_align: usize) -> Op
179190
}
180191

181192
/// Searches the list starting at the next hole of `previous` for a big enough hole. A hole is big
182-
/// enough if it can hold an allocation of `size` bytes with the given `align`. When a hole is used
183-
/// for an allocation, there may be some needed padding before and/or after the allocation. This
184-
/// padding is returned as part of the `Allocation`. The caller must take care of freeing it again.
193+
/// enough if it can hold an allocation of `layout.size()` bytes with the given `layou.align()`.
194+
/// When a hole is used for an allocation, there may be some needed padding before and/or after
195+
/// the allocation. This padding is returned as part of the `Allocation`. The caller must take
196+
/// care of freeing it again.
185197
/// This function uses the “first fit” strategy, so it breaks as soon as a big enough hole is
186198
/// found (and returns it).
187-
fn allocate_first_fit(mut previous: &mut Hole, size: usize, align: usize) -> Option<Allocation> {
199+
fn allocate_first_fit(mut previous: &mut Hole, layout: Layout) -> Result<Allocation, AllocErr> {
188200
loop {
189-
let allocation: Option<Allocation> = previous.next
190-
.as_mut()
191-
.and_then(|current| split_hole(unsafe { current.as_ref() }.info(), size, align));
201+
let allocation: Option<Allocation> = previous.next.as_mut().and_then(|current| {
202+
split_hole(unsafe { current.as_ref() }.info(), layout.clone())
203+
});
192204
match allocation {
193205
Some(allocation) => {
194206
// hole is big enough, so remove it from the list by updating the previous pointer
195207
previous.next = previous.next_unwrap().next.take();
196-
return Some(allocation);
208+
return Ok(allocation);
197209
}
198210
None if previous.next.is_some() => {
199211
// try next hole
200212
previous = move_helper(previous).next_unwrap();
201213
}
202214
None => {
203215
// this was the last hole, so no hole is big enough -> allocation not possible
204-
return None;
216+
return Err(AllocErr::Exhausted { request: layout });
205217
}
206218
}
207219
}
@@ -225,11 +237,15 @@ fn deallocate(mut hole: &mut Hole, addr: usize, mut size: usize) {
225237

226238
// Each freed block must be handled by the previous hole in memory. Thus the freed
227239
// address must be always behind the current hole.
228-
assert!(hole_addr + hole.size <= addr,
229-
"invalid deallocation (probably a double free)");
240+
assert!(
241+
hole_addr + hole.size <= addr,
242+
"invalid deallocation (probably a double free)"
243+
);
230244

231245
// get information about the next block
232-
let next_hole_info = hole.next.as_ref().map(|next| unsafe { next.as_ref().info() });
246+
let next_hole_info = hole.next
247+
.as_ref()
248+
.map(|next| unsafe { next.as_ref().info() });
233249

234250
match next_hole_info {
235251
Some(next) if hole_addr + hole.size == addr && addr + size == next.addr => {

src/lib.rs

+24-5
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,17 @@
11
#![feature(unique)]
22
#![feature(const_fn)]
3+
#![feature(alloc, allocator_api)]
34
#![no_std]
45

6+
extern crate alloc;
7+
58
#[cfg(test)]
69
#[macro_use]
710
extern crate std;
811

912
use hole::{Hole, HoleList};
1013
use core::mem;
14+
use alloc::allocator::{Alloc, Layout, AllocErr};
1115

1216
mod hole;
1317
#[cfg(test)]
@@ -59,13 +63,15 @@ impl Heap {
5963
/// This function scans the list of free memory blocks and uses the first block that is big
6064
/// enough. The runtime is in O(n) where n is the number of free blocks, but it should be
6165
/// reasonably fast for small allocations.
62-
pub fn allocate_first_fit(&mut self, mut size: usize, align: usize) -> Option<*mut u8> {
66+
pub fn allocate_first_fit(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
67+
let mut size = layout.size();
6368
if size < HoleList::min_size() {
6469
size = HoleList::min_size();
6570
}
6671
let size = align_up(size, mem::align_of::<Hole>());
72+
let layout = Layout::from_size_align(size, layout.align()).unwrap();
6773

68-
self.holes.allocate_first_fit(size, align)
74+
self.holes.allocate_first_fit(layout)
6975
}
7076

7177
/// Frees the given allocation. `ptr` must be a pointer returned
@@ -75,13 +81,15 @@ impl Heap {
7581
/// This function walks the list of free memory blocks and inserts the freed block at the
7682
/// correct place. If the freed block is adjacent to another free block, the blocks are merged
7783
/// again. This operation is in `O(n)` since the list needs to be sorted by address.
78-
pub unsafe fn deallocate(&mut self, ptr: *mut u8, mut size: usize, _align: usize) {
84+
pub unsafe fn deallocate(&mut self, ptr: *mut u8, layout: Layout) {
85+
let mut size = layout.size();
7986
if size < HoleList::min_size() {
8087
size = HoleList::min_size();
8188
}
8289
let size = align_up(size, mem::align_of::<Hole>());
90+
let layout = Layout::from_size_align(size, layout.align()).unwrap();
8391

84-
self.holes.deallocate(ptr, size);
92+
self.holes.deallocate(ptr, layout);
8593
}
8694

8795
/// Returns the bottom address of the heap.
@@ -106,11 +114,22 @@ impl Heap {
106114
/// The new extended area must be valid
107115
pub unsafe fn extend(&mut self, by: usize) {
108116
let top = self.top();
109-
self.holes.deallocate(top as *mut u8, by);
117+
let layout = Layout::from_size_align(by, 1).unwrap();
118+
self.holes.deallocate(top as *mut u8, layout);
110119
self.size += by;
111120
}
112121
}
113122

123+
unsafe impl Alloc for Heap {
124+
unsafe fn alloc(&mut self, layout: Layout) -> Result<*mut u8, AllocErr> {
125+
self.allocate_first_fit(layout)
126+
}
127+
128+
unsafe fn dealloc(&mut self, ptr: *mut u8, layout: Layout) {
129+
self.deallocate(ptr, layout)
130+
}
131+
}
132+
114133
/// Align downwards. Returns the greatest x with alignment `align`
115134
/// so that x <= addr. The alignment must be a power of 2.
116135
pub fn align_down(addr: usize, align: usize) -> usize {

0 commit comments

Comments
 (0)