Skip to content

Commit 20356e4

Browse files
committed
auto merge of #14108 : thestinger/rust/jemalloc, r=huonw
2 parents c690bda + 420708f commit 20356e4

File tree

2 files changed

+32
-32
lines changed

2 files changed

+32
-32
lines changed

src/libcollections/hashmap.rs

Lines changed: 13 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,6 @@ use std::result::{Ok, Err};
3030
use std::slice::ImmutableVector;
3131

3232
mod table {
33-
extern crate libc;
34-
3533
use std::clone::Clone;
3634
use std::cmp;
3735
use std::cmp::Eq;
@@ -42,10 +40,10 @@ mod table {
4240
use std::prelude::Drop;
4341
use std::ptr;
4442
use std::ptr::RawPtr;
45-
use std::rt::libc_heap;
46-
use std::intrinsics::{size_of, min_align_of, transmute};
47-
use std::intrinsics::{move_val_init, set_memory};
43+
use std::mem::{min_align_of, size_of};
44+
use std::intrinsics::{move_val_init, set_memory, transmute};
4845
use std::iter::{Iterator, range_step_inclusive};
46+
use std::rt::heap::{allocate, deallocate};
4947

5048
static EMPTY_BUCKET: u64 = 0u64;
5149

@@ -185,10 +183,6 @@ mod table {
185183
assert_eq!(round_up_to_next(5, 4), 8);
186184
}
187185

188-
fn has_alignment(n: uint, alignment: uint) -> bool {
189-
round_up_to_next(n, alignment) == n
190-
}
191-
192186
// Returns a tuple of (minimum required malloc alignment, hash_offset,
193187
// key_offset, val_offset, array_size), from the start of a mallocated array.
194188
fn calculate_offsets(
@@ -243,12 +237,7 @@ mod table {
243237
keys_size, min_align_of::< K >(),
244238
vals_size, min_align_of::< V >());
245239

246-
let buffer = libc_heap::malloc_raw(size) as *mut u8;
247-
248-
// FIXME #13094: If malloc was not at as aligned as we expected,
249-
// our offset calculations are just plain wrong. We could support
250-
// any alignment if we switched from `malloc` to `posix_memalign`.
251-
assert!(has_alignment(buffer as uint, malloc_alignment));
240+
let buffer = allocate(size, malloc_alignment);
252241

253242
let hashes = buffer.offset(hash_offset as int) as *mut u64;
254243
let keys = buffer.offset(keys_offset as int) as *mut K;
@@ -418,7 +407,7 @@ mod table {
418407
// modified to no longer assume this.
419408
#[test]
420409
fn can_alias_safehash_as_u64() {
421-
unsafe { assert_eq!(size_of::<SafeHash>(), size_of::<u64>()) };
410+
assert_eq!(size_of::<SafeHash>(), size_of::<u64>())
422411
}
423412

424413
pub struct Entries<'a, K, V> {
@@ -560,8 +549,15 @@ mod table {
560549

561550
assert_eq!(self.size, 0);
562551

552+
let hashes_size = self.capacity * size_of::<u64>();
553+
let keys_size = self.capacity * size_of::<K>();
554+
let vals_size = self.capacity * size_of::<V>();
555+
let (align, _, _, _, size) = calculate_offsets(hashes_size, min_align_of::<u64>(),
556+
keys_size, min_align_of::<K>(),
557+
vals_size, min_align_of::<V>());
558+
563559
unsafe {
564-
libc::free(self.hashes as *mut libc::c_void);
560+
deallocate(self.hashes as *mut u8, size, align);
565561
// Remember how everything was allocated out of one buffer
566562
// during initialization? We only need one call to free here.
567563
}

src/libstd/sync/deque.rs

Lines changed: 19 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,7 @@
5151
use clone::Clone;
5252
use iter::{range, Iterator};
5353
use kinds::Send;
54-
use libc;
55-
use mem;
54+
use mem::{forget, min_align_of, size_of, transmute};
5655
use ops::Drop;
5756
use option::{Option, Some, None};
5857
use owned::Box;
@@ -62,6 +61,7 @@ use slice::ImmutableVector;
6261
use sync::arc::UnsafeArc;
6362
use sync::atomics::{AtomicInt, AtomicPtr, SeqCst};
6463
use unstable::sync::Exclusive;
64+
use rt::heap::{allocate, deallocate};
6565
use vec::Vec;
6666

6767
// Once the queue is less than 1/K full, then it will be downsized. Note that
@@ -229,7 +229,7 @@ impl<T: Send> Deque<T> {
229229
Deque {
230230
bottom: AtomicInt::new(0),
231231
top: AtomicInt::new(0),
232-
array: AtomicPtr::new(unsafe { mem::transmute(buf) }),
232+
array: AtomicPtr::new(unsafe { transmute(buf) }),
233233
pool: pool,
234234
}
235235
}
@@ -271,7 +271,7 @@ impl<T: Send> Deque<T> {
271271
return Some(data);
272272
} else {
273273
self.bottom.store(t + 1, SeqCst);
274-
mem::forget(data); // someone else stole this value
274+
forget(data); // someone else stole this value
275275
return None;
276276
}
277277
}
@@ -293,7 +293,7 @@ impl<T: Send> Deque<T> {
293293
if self.top.compare_and_swap(t, t + 1, SeqCst) == t {
294294
Data(data)
295295
} else {
296-
mem::forget(data); // someone else stole this value
296+
forget(data); // someone else stole this value
297297
Abort
298298
}
299299
}
@@ -314,15 +314,15 @@ impl<T: Send> Deque<T> {
314314
// continue to be read after we flag this buffer for reclamation.
315315
unsafe fn swap_buffer(&mut self, b: int, old: *mut Buffer<T>,
316316
buf: Buffer<T>) -> *mut Buffer<T> {
317-
let newbuf: *mut Buffer<T> = mem::transmute(box buf);
317+
let newbuf: *mut Buffer<T> = transmute(box buf);
318318
self.array.store(newbuf, SeqCst);
319319
let ss = (*newbuf).size();
320320
self.bottom.store(b + ss, SeqCst);
321321
let t = self.top.load(SeqCst);
322322
if self.top.compare_and_swap(t, t + ss, SeqCst) != t {
323323
self.bottom.store(b, SeqCst);
324324
}
325-
self.pool.free(mem::transmute(old));
325+
self.pool.free(transmute(old));
326326
return newbuf;
327327
}
328328
}
@@ -339,15 +339,19 @@ impl<T: Send> Drop for Deque<T> {
339339
for i in range(t, b) {
340340
let _: T = unsafe { (*a).get(i) };
341341
}
342-
self.pool.free(unsafe { mem::transmute(a) });
342+
self.pool.free(unsafe { transmute(a) });
343343
}
344344
}
345345

346+
#[inline]
347+
fn buffer_alloc_size<T>(log_size: int) -> uint {
348+
(1 << log_size) * size_of::<T>()
349+
}
350+
346351
impl<T: Send> Buffer<T> {
347352
unsafe fn new(log_size: int) -> Buffer<T> {
348-
let size = (1 << log_size) * mem::size_of::<T>();
349-
let buffer = libc::malloc(size as libc::size_t);
350-
assert!(!buffer.is_null());
353+
let size = buffer_alloc_size::<T>(log_size);
354+
let buffer = allocate(size, min_align_of::<T>());
351355
Buffer {
352356
storage: buffer as *T,
353357
log_size: log_size,
@@ -372,7 +376,7 @@ impl<T: Send> Buffer<T> {
372376
unsafe fn put(&mut self, i: int, t: T) {
373377
let ptr = self.storage.offset(i & self.mask());
374378
ptr::copy_nonoverlapping_memory(ptr as *mut T, &t as *T, 1);
375-
mem::forget(t);
379+
forget(t);
376380
}
377381

378382
// Again, unsafe because this has incredibly dubious ownership violations.
@@ -390,7 +394,8 @@ impl<T: Send> Buffer<T> {
390394
impl<T: Send> Drop for Buffer<T> {
391395
fn drop(&mut self) {
392396
// It is assumed that all buffers are empty on drop.
393-
unsafe { libc::free(self.storage as *mut libc::c_void) }
397+
let size = buffer_alloc_size::<T>(self.log_size);
398+
unsafe { deallocate(self.storage as *mut u8, size, min_align_of::<T>()) }
394399
}
395400
}
396401

@@ -606,8 +611,7 @@ mod tests {
606611
let s = s.clone();
607612
let unique_box = box AtomicUint::new(0);
608613
let thread_box = unsafe {
609-
*mem::transmute::<&Box<AtomicUint>,
610-
**mut AtomicUint>(&unique_box)
614+
*mem::transmute::<&Box<AtomicUint>, **mut AtomicUint>(&unique_box)
611615
};
612616
(Thread::start(proc() {
613617
unsafe {

0 commit comments

Comments
 (0)