From 7da24ea1a9466ef3af3e86da65fcccd8f17f1ae5 Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Sun, 11 May 2014 04:19:05 -0400 Subject: [PATCH 1/2] hashmap: port to the new allocator API --- src/libcollections/hashmap.rs | 30 +++++++++++++----------------- 1 file changed, 13 insertions(+), 17 deletions(-) diff --git a/src/libcollections/hashmap.rs b/src/libcollections/hashmap.rs index 1a222a27e47..7dfc89b01de 100644 --- a/src/libcollections/hashmap.rs +++ b/src/libcollections/hashmap.rs @@ -30,8 +30,6 @@ use std::result::{Ok, Err}; use std::slice::ImmutableVector; mod table { - extern crate libc; - use std::clone::Clone; use std::cmp; use std::cmp::Eq; @@ -42,10 +40,10 @@ mod table { use std::prelude::Drop; use std::ptr; use std::ptr::RawPtr; - use std::rt::libc_heap; - use std::intrinsics::{size_of, min_align_of, transmute}; - use std::intrinsics::{move_val_init, set_memory}; + use std::mem::{min_align_of, size_of}; + use std::intrinsics::{move_val_init, set_memory, transmute}; use std::iter::{Iterator, range_step_inclusive}; + use std::rt::heap::{allocate, deallocate}; static EMPTY_BUCKET: u64 = 0u64; @@ -185,10 +183,6 @@ mod table { assert_eq!(round_up_to_next(5, 4), 8); } - fn has_alignment(n: uint, alignment: uint) -> bool { - round_up_to_next(n, alignment) == n - } - // Returns a tuple of (minimum required malloc alignment, hash_offset, // key_offset, val_offset, array_size), from the start of a mallocated array. fn calculate_offsets( @@ -243,12 +237,7 @@ mod table { keys_size, min_align_of::< K >(), vals_size, min_align_of::< V >()); - let buffer = libc_heap::malloc_raw(size) as *mut u8; - - // FIXME #13094: If malloc was not at as aligned as we expected, - // our offset calculations are just plain wrong. We could support - // any alignment if we switched from `malloc` to `posix_memalign`. - assert!(has_alignment(buffer as uint, malloc_alignment)); + let buffer = allocate(size, malloc_alignment); let hashes = buffer.offset(hash_offset as int) as *mut u64; let keys = buffer.offset(keys_offset as int) as *mut K; @@ -418,7 +407,7 @@ mod table { // modified to no longer assume this. #[test] fn can_alias_safehash_as_u64() { - unsafe { assert_eq!(size_of::(), size_of::()) }; + assert_eq!(size_of::(), size_of::()) } pub struct Entries<'a, K, V> { @@ -560,8 +549,15 @@ mod table { assert_eq!(self.size, 0); + let hashes_size = self.capacity * size_of::(); + let keys_size = self.capacity * size_of::(); + let vals_size = self.capacity * size_of::(); + let (align, _, _, _, size) = calculate_offsets(hashes_size, min_align_of::(), + keys_size, min_align_of::(), + vals_size, min_align_of::()); + unsafe { - libc::free(self.hashes as *mut libc::c_void); + deallocate(self.hashes as *mut u8, size, align); // Remember how everything was allocated out of one buffer // during initialization? We only need one call to free here. } From 420708f38992b488570bc8702a887593a57d746f Mon Sep 17 00:00:00 2001 From: Daniel Micay Date: Sun, 11 May 2014 05:34:06 -0400 Subject: [PATCH 2/2] sync::deque: port to the new allocator API --- src/libstd/sync/deque.rs | 34 +++++++++++++++++++--------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/src/libstd/sync/deque.rs b/src/libstd/sync/deque.rs index 175bb03d262..30b95ffb34f 100644 --- a/src/libstd/sync/deque.rs +++ b/src/libstd/sync/deque.rs @@ -51,8 +51,7 @@ use clone::Clone; use iter::{range, Iterator}; use kinds::Send; -use libc; -use mem; +use mem::{forget, min_align_of, size_of, transmute}; use ops::Drop; use option::{Option, Some, None}; use owned::Box; @@ -62,6 +61,7 @@ use slice::ImmutableVector; use sync::arc::UnsafeArc; use sync::atomics::{AtomicInt, AtomicPtr, SeqCst}; use unstable::sync::Exclusive; +use rt::heap::{allocate, deallocate}; use vec::Vec; // Once the queue is less than 1/K full, then it will be downsized. Note that @@ -229,7 +229,7 @@ impl Deque { Deque { bottom: AtomicInt::new(0), top: AtomicInt::new(0), - array: AtomicPtr::new(unsafe { mem::transmute(buf) }), + array: AtomicPtr::new(unsafe { transmute(buf) }), pool: pool, } } @@ -271,7 +271,7 @@ impl Deque { return Some(data); } else { self.bottom.store(t + 1, SeqCst); - mem::forget(data); // someone else stole this value + forget(data); // someone else stole this value return None; } } @@ -293,7 +293,7 @@ impl Deque { if self.top.compare_and_swap(t, t + 1, SeqCst) == t { Data(data) } else { - mem::forget(data); // someone else stole this value + forget(data); // someone else stole this value Abort } } @@ -314,7 +314,7 @@ impl Deque { // continue to be read after we flag this buffer for reclamation. unsafe fn swap_buffer(&mut self, b: int, old: *mut Buffer, buf: Buffer) -> *mut Buffer { - let newbuf: *mut Buffer = mem::transmute(box buf); + let newbuf: *mut Buffer = transmute(box buf); self.array.store(newbuf, SeqCst); let ss = (*newbuf).size(); self.bottom.store(b + ss, SeqCst); @@ -322,7 +322,7 @@ impl Deque { if self.top.compare_and_swap(t, t + ss, SeqCst) != t { self.bottom.store(b, SeqCst); } - self.pool.free(mem::transmute(old)); + self.pool.free(transmute(old)); return newbuf; } } @@ -339,15 +339,19 @@ impl Drop for Deque { for i in range(t, b) { let _: T = unsafe { (*a).get(i) }; } - self.pool.free(unsafe { mem::transmute(a) }); + self.pool.free(unsafe { transmute(a) }); } } +#[inline] +fn buffer_alloc_size(log_size: int) -> uint { + (1 << log_size) * size_of::() +} + impl Buffer { unsafe fn new(log_size: int) -> Buffer { - let size = (1 << log_size) * mem::size_of::(); - let buffer = libc::malloc(size as libc::size_t); - assert!(!buffer.is_null()); + let size = buffer_alloc_size::(log_size); + let buffer = allocate(size, min_align_of::()); Buffer { storage: buffer as *T, log_size: log_size, @@ -372,7 +376,7 @@ impl Buffer { unsafe fn put(&mut self, i: int, t: T) { let ptr = self.storage.offset(i & self.mask()); ptr::copy_nonoverlapping_memory(ptr as *mut T, &t as *T, 1); - mem::forget(t); + forget(t); } // Again, unsafe because this has incredibly dubious ownership violations. @@ -390,7 +394,8 @@ impl Buffer { impl Drop for Buffer { fn drop(&mut self) { // It is assumed that all buffers are empty on drop. - unsafe { libc::free(self.storage as *mut libc::c_void) } + let size = buffer_alloc_size::(self.log_size); + unsafe { deallocate(self.storage as *mut u8, size, min_align_of::()) } } } @@ -606,8 +611,7 @@ mod tests { let s = s.clone(); let unique_box = box AtomicUint::new(0); let thread_box = unsafe { - *mem::transmute::<&Box, - **mut AtomicUint>(&unique_box) + *mem::transmute::<&Box, **mut AtomicUint>(&unique_box) }; (Thread::start(proc() { unsafe {