Skip to content

port to the new allocator API #14108

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
May 11, 2014
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 13 additions & 17 deletions src/libcollections/hashmap.rs
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,6 @@ use std::result::{Ok, Err};
use std::slice::ImmutableVector;

mod table {
extern crate libc;

use std::clone::Clone;
use std::cmp;
use std::cmp::Eq;
Expand All @@ -42,10 +40,10 @@ mod table {
use std::prelude::Drop;
use std::ptr;
use std::ptr::RawPtr;
use std::rt::libc_heap;
use std::intrinsics::{size_of, min_align_of, transmute};
use std::intrinsics::{move_val_init, set_memory};
use std::mem::{min_align_of, size_of};
use std::intrinsics::{move_val_init, set_memory, transmute};
use std::iter::{Iterator, range_step_inclusive};
use std::rt::heap::{allocate, deallocate};

static EMPTY_BUCKET: u64 = 0u64;

Expand Down Expand Up @@ -185,10 +183,6 @@ mod table {
assert_eq!(round_up_to_next(5, 4), 8);
}

fn has_alignment(n: uint, alignment: uint) -> bool {
round_up_to_next(n, alignment) == n
}

// Returns a tuple of (minimum required malloc alignment, hash_offset,
// key_offset, val_offset, array_size), from the start of a mallocated array.
fn calculate_offsets(
Expand Down Expand Up @@ -243,12 +237,7 @@ mod table {
keys_size, min_align_of::< K >(),
vals_size, min_align_of::< V >());

let buffer = libc_heap::malloc_raw(size) as *mut u8;

// FIXME #13094: If malloc was not at as aligned as we expected,
// our offset calculations are just plain wrong. We could support
// any alignment if we switched from `malloc` to `posix_memalign`.
assert!(has_alignment(buffer as uint, malloc_alignment));
let buffer = allocate(size, malloc_alignment);

let hashes = buffer.offset(hash_offset as int) as *mut u64;
let keys = buffer.offset(keys_offset as int) as *mut K;
Expand Down Expand Up @@ -418,7 +407,7 @@ mod table {
// modified to no longer assume this.
#[test]
fn can_alias_safehash_as_u64() {
unsafe { assert_eq!(size_of::<SafeHash>(), size_of::<u64>()) };
assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
}

pub struct Entries<'a, K, V> {
Expand Down Expand Up @@ -560,8 +549,15 @@ mod table {

assert_eq!(self.size, 0);

let hashes_size = self.capacity * size_of::<u64>();
let keys_size = self.capacity * size_of::<K>();
let vals_size = self.capacity * size_of::<V>();
let (align, _, _, _, size) = calculate_offsets(hashes_size, min_align_of::<u64>(),
keys_size, min_align_of::<K>(),
vals_size, min_align_of::<V>());

unsafe {
libc::free(self.hashes as *mut libc::c_void);
deallocate(self.hashes as *mut u8, size, align);
// Remember how everything was allocated out of one buffer
// during initialization? We only need one call to free here.
}
Expand Down
34 changes: 19 additions & 15 deletions src/libstd/sync/deque.rs
Original file line number Diff line number Diff line change
Expand Up @@ -51,8 +51,7 @@
use clone::Clone;
use iter::{range, Iterator};
use kinds::Send;
use libc;
use mem;
use mem::{forget, min_align_of, size_of, transmute};
use ops::Drop;
use option::{Option, Some, None};
use owned::Box;
Expand All @@ -62,6 +61,7 @@ use slice::ImmutableVector;
use sync::arc::UnsafeArc;
use sync::atomics::{AtomicInt, AtomicPtr, SeqCst};
use unstable::sync::Exclusive;
use rt::heap::{allocate, deallocate};
use vec::Vec;

// Once the queue is less than 1/K full, then it will be downsized. Note that
Expand Down Expand Up @@ -229,7 +229,7 @@ impl<T: Send> Deque<T> {
Deque {
bottom: AtomicInt::new(0),
top: AtomicInt::new(0),
array: AtomicPtr::new(unsafe { mem::transmute(buf) }),
array: AtomicPtr::new(unsafe { transmute(buf) }),
pool: pool,
}
}
Expand Down Expand Up @@ -271,7 +271,7 @@ impl<T: Send> Deque<T> {
return Some(data);
} else {
self.bottom.store(t + 1, SeqCst);
mem::forget(data); // someone else stole this value
forget(data); // someone else stole this value
return None;
}
}
Expand All @@ -293,7 +293,7 @@ impl<T: Send> Deque<T> {
if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
Data(data)
} else {
mem::forget(data); // someone else stole this value
forget(data); // someone else stole this value
Abort
}
}
Expand All @@ -314,15 +314,15 @@ impl<T: Send> Deque<T> {
// continue to be read after we flag this buffer for reclamation.
unsafe fn swap_buffer(&mut self, b: int, old: *mut Buffer<T>,
buf: Buffer<T>) -> *mut Buffer<T> {
let newbuf: *mut Buffer<T> = mem::transmute(box buf);
let newbuf: *mut Buffer<T> = transmute(box buf);
self.array.store(newbuf, SeqCst);
let ss = (*newbuf).size();
self.bottom.store(b + ss, SeqCst);
let t = self.top.load(SeqCst);
if self.top.compare_and_swap(t, t + ss, SeqCst) != t {
self.bottom.store(b, SeqCst);
}
self.pool.free(mem::transmute(old));
self.pool.free(transmute(old));
return newbuf;
}
}
Expand All @@ -339,15 +339,19 @@ impl<T: Send> Drop for Deque<T> {
for i in range(t, b) {
let _: T = unsafe { (*a).get(i) };
}
self.pool.free(unsafe { mem::transmute(a) });
self.pool.free(unsafe { transmute(a) });
}
}

#[inline]
fn buffer_alloc_size<T>(log_size: int) -> uint {
(1 << log_size) * size_of::<T>()
}

impl<T: Send> Buffer<T> {
unsafe fn new(log_size: int) -> Buffer<T> {
let size = (1 << log_size) * mem::size_of::<T>();
let buffer = libc::malloc(size as libc::size_t);
assert!(!buffer.is_null());
let size = buffer_alloc_size::<T>(log_size);
let buffer = allocate(size, min_align_of::<T>());
Buffer {
storage: buffer as *T,
log_size: log_size,
Expand All @@ -372,7 +376,7 @@ impl<T: Send> Buffer<T> {
unsafe fn put(&mut self, i: int, t: T) {
let ptr = self.storage.offset(i & self.mask());
ptr::copy_nonoverlapping_memory(ptr as *mut T, &t as *T, 1);
mem::forget(t);
forget(t);
}

// Again, unsafe because this has incredibly dubious ownership violations.
Expand All @@ -390,7 +394,8 @@ impl<T: Send> Buffer<T> {
impl<T: Send> Drop for Buffer<T> {
fn drop(&mut self) {
// It is assumed that all buffers are empty on drop.
unsafe { libc::free(self.storage as *mut libc::c_void) }
let size = buffer_alloc_size::<T>(self.log_size);
unsafe { deallocate(self.storage as *mut u8, size, min_align_of::<T>()) }
}
}

Expand Down Expand Up @@ -606,8 +611,7 @@ mod tests {
let s = s.clone();
let unique_box = box AtomicUint::new(0);
let thread_box = unsafe {
*mem::transmute::<&Box<AtomicUint>,
**mut AtomicUint>(&unique_box)
*mem::transmute::<&Box<AtomicUint>, **mut AtomicUint>(&unique_box)
};
(Thread::start(proc() {
unsafe {
Expand Down