Auto merge of #37220 - Mark-Simulacrum:arena-alloc-slice, r=eddyb

alloc_slice in TypedArena

Added `TypedArena::alloc_slice`, and moved from using `TypedArena<Vec<T>>` to `TypedArena<T>`.

`TypedArena::alloc_slice` is implemented by copying the slices elements into the typed arena, requiring that `T: Copy` when using it. We allocate a new chunk when there's insufficient space remaining in the previous chunk, and we cannot resize the old chunk in place. This is non-optimal, since we may waste allocated space when allocating (especially longer) slices, but is considered good enough for the time being.

This change also reduces heap fragmentation, since the arena now directly stores objects instead of storing the Vec's length and pointer to its contents.

Performance:
```
futures-rs-test  5.048s vs  5.061s --> 0.997x faster (variance: 1.028x, 1.020x)
helloworld       0.284s vs  0.295s --> 0.963x faster (variance: 1.207x, 1.189x)
html5ever-2016-  8.396s vs  8.208s --> 1.023x faster (variance: 1.019x, 1.036x)
hyper.0.5.0      5.768s vs  5.797s --> 0.995x faster (variance: 1.027x, 1.028x)
inflate-0.1.0    5.213s vs  5.069s --> 1.028x faster (variance: 1.008x, 1.022x)
issue-32062-equ  0.428s vs  0.467s --> 0.916x faster (variance: 1.188x, 1.015x)
issue-32278-big  1.949s vs  2.010s --> 0.970x faster (variance: 1.112x, 1.049x)
jld-day15-parse  1.795s vs  1.877s --> 0.956x faster (variance: 1.037x, 1.015x)
piston-image-0. 13.554s vs 13.522s --> 1.002x faster (variance: 1.019x, 1.020x)
rust-encoding-0  2.489s vs  2.465s --> 1.010x faster (variance: 1.047x, 1.086x)
syntex-0.42.2   34.646s vs 34.593s --> 1.002x faster (variance: 1.007x, 1.005x)
syntex-0.42.2-i 17.181s vs 17.163s --> 1.001x faster (variance: 1.004x, 1.004x)
```

r? @eddyb
This commit is contained in:
bors 2016-10-19 09:52:48 -07:00 committed by GitHub
commit d337f345ca
19 changed files with 93 additions and 56 deletions

View File

@ -46,6 +46,7 @@ use std::intrinsics;
use std::marker::{PhantomData, Send};
use std::mem;
use std::ptr;
use std::slice;
use alloc::heap;
use alloc::raw_vec::RawVec;
@ -133,7 +134,7 @@ impl<T> TypedArena<T> {
#[inline]
pub fn alloc(&self, object: T) -> &mut T {
if self.ptr == self.end {
self.grow()
self.grow(1)
}
unsafe {
@ -154,24 +155,56 @@ impl<T> TypedArena<T> {
}
}
/// Allocates a slice of objects that are copy into the `TypedArena`, returning a mutable
/// reference to it. Will panic if passed a zero-sized types.
#[inline]
pub fn alloc_slice(&self, slice: &[T]) -> &mut [T]
where T: Copy {
assert!(mem::size_of::<T>() != 0);
if slice.len() == 0 {
return unsafe { slice::from_raw_parts_mut(heap::EMPTY as *mut T, 0) };
}
let available_capacity_bytes = self.end.get() as usize - self.ptr.get() as usize;
let at_least_bytes = slice.len() * mem::size_of::<T>();
if available_capacity_bytes < at_least_bytes {
self.grow(slice.len());
}
unsafe {
let start_ptr = self.ptr.get();
let arena_slice = slice::from_raw_parts_mut(start_ptr, slice.len());
self.ptr.set(start_ptr.offset(arena_slice.len() as isize));
arena_slice.copy_from_slice(slice);
arena_slice
}
}
/// Grows the arena.
#[inline(never)]
#[cold]
fn grow(&self) {
fn grow(&self, n: usize) {
unsafe {
let mut chunks = self.chunks.borrow_mut();
let (chunk, new_capacity);
let (chunk, mut new_capacity);
if let Some(last_chunk) = chunks.last_mut() {
if last_chunk.storage.double_in_place() {
let used_bytes = self.ptr.get() as usize - last_chunk.start() as usize;
let currently_used_cap = used_bytes / mem::size_of::<T>();
if last_chunk.storage.reserve_in_place(currently_used_cap, n) {
self.end.set(last_chunk.end());
return;
} else {
let prev_capacity = last_chunk.storage.cap();
new_capacity = prev_capacity.checked_mul(2).unwrap();
loop {
new_capacity = prev_capacity.checked_mul(2).unwrap();
if new_capacity >= currently_used_cap + n {
break;
}
}
}
} else {
let elem_size = cmp::max(1, mem::size_of::<T>());
new_capacity = cmp::max(1, PAGE / elem_size);
new_capacity = cmp::max(n, PAGE / elem_size);
}
chunk = TypedArenaChunk::<T>::new(new_capacity);
self.ptr.set(chunk.start());

View File

@ -163,7 +163,7 @@ impl<'tcx> Rvalue<'tcx> {
let lhs_ty = lhs.ty(mir, tcx);
let rhs_ty = rhs.ty(mir, tcx);
let ty = op.ty(tcx, lhs_ty, rhs_ty);
let ty = tcx.mk_tup(vec![ty, tcx.types.bool]);
let ty = tcx.mk_tup(&[ty, tcx.types.bool]);
Some(ty)
}
&Rvalue::UnaryOp(_, ref operand) => {
@ -184,7 +184,7 @@ impl<'tcx> Rvalue<'tcx> {
}
AggregateKind::Tuple => {
Some(tcx.mk_tup(
ops.iter().map(|op| op.ty(mir, tcx)).collect()
&ops.iter().map(|op| op.ty(mir, tcx)).collect::<Vec<_>>()
))
}
AggregateKind::Adt(def, _, substs, _) => {

View File

@ -486,7 +486,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
{
let arguments_tuple = match tuple_arguments {
TupleArgumentsFlag::No => sig.0.inputs[0],
TupleArgumentsFlag::Yes => self.mk_tup(sig.0.inputs.to_vec()),
TupleArgumentsFlag::Yes => self.mk_tup(&sig.0.inputs),
};
let trait_ref = ty::TraitRef {
def_id: fn_trait_def_id,

View File

@ -54,8 +54,8 @@ use hir;
pub struct CtxtArenas<'tcx> {
// internings
type_: TypedArena<TyS<'tcx>>,
type_list: TypedArena<Vec<Ty<'tcx>>>,
substs: TypedArena<Vec<Kind<'tcx>>>,
type_list: TypedArena<Ty<'tcx>>,
substs: TypedArena<Kind<'tcx>>,
bare_fn: TypedArena<BareFnTy<'tcx>>,
region: TypedArena<Region>,
stability: TypedArena<attr::Stability>,
@ -1117,6 +1117,7 @@ impl<'tcx> Borrow<Region> for Interned<'tcx, Region> {
macro_rules! intern_method {
($lt_tcx:tt, $name:ident: $method:ident($alloc:ty,
$alloc_method:ident,
$alloc_to_key:expr,
$alloc_to_ret:expr,
$needs_infer:expr) -> $ty:ty) => {
@ -1142,7 +1143,8 @@ macro_rules! intern_method {
let v = unsafe {
mem::transmute(v)
};
let i = ($alloc_to_ret)(self.global_interners.arenas.$name.alloc(v));
let i = ($alloc_to_ret)(self.global_interners.arenas.$name
.$alloc_method(v));
self.global_interners.$name.borrow_mut().insert(Interned(i));
return i;
}
@ -1156,7 +1158,7 @@ macro_rules! intern_method {
}
}
let i = ($alloc_to_ret)(self.interners.arenas.$name.alloc(v));
let i = ($alloc_to_ret)(self.interners.arenas.$name.$alloc_method(v));
self.interners.$name.borrow_mut().insert(Interned(i));
i
}
@ -1180,7 +1182,7 @@ macro_rules! direct_interners {
}
}
intern_method!($lt_tcx, $name: $method($ty, |x| x, |x| x, $needs_infer) -> $ty);)+
intern_method!($lt_tcx, $name: $method($ty, alloc, |x| x, |x| x, $needs_infer) -> $ty);)+
}
}
@ -1200,16 +1202,18 @@ direct_interners!('tcx,
}) -> Region
);
intern_method!('tcx,
type_list: mk_type_list(Vec<Ty<'tcx>>, Deref::deref, |xs: &[Ty]| -> &Slice<Ty> {
unsafe { mem::transmute(xs) }
}, keep_local) -> Slice<Ty<'tcx>>
);
macro_rules! slice_interners {
($($field:ident: $method:ident($ty:ident)),+) => (
$(intern_method!('tcx, $field: $method(&[$ty<'tcx>], alloc_slice, Deref::deref,
|xs: &[$ty]| -> &Slice<$ty> {
unsafe { mem::transmute(xs) }
}, |xs: &[$ty]| xs.iter().any(keep_local)) -> Slice<$ty<'tcx>>);)+
)
}
intern_method!('tcx,
substs: mk_substs(Vec<Kind<'tcx>>, Deref::deref, |xs: &[Kind]| -> &Slice<Kind> {
unsafe { mem::transmute(xs) }
}, keep_local) -> Slice<Kind<'tcx>>
slice_interners!(
type_list: mk_type_list(Ty),
substs: mk_substs(Kind)
);
impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
@ -1314,12 +1318,12 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
self.mk_ty(TySlice(ty))
}
pub fn mk_tup(self, ts: Vec<Ty<'tcx>>) -> Ty<'tcx> {
pub fn mk_tup(self, ts: &[Ty<'tcx>]) -> Ty<'tcx> {
self.mk_ty(TyTuple(self.mk_type_list(ts)))
}
pub fn mk_nil(self) -> Ty<'tcx> {
self.mk_tup(Vec::new())
self.mk_tup(&[])
}
pub fn mk_diverging_default(self) -> Ty<'tcx> {
@ -1361,7 +1365,7 @@ impl<'a, 'gcx, 'tcx> TyCtxt<'a, 'gcx, 'tcx> {
pub fn mk_closure(self,
closure_id: DefId,
substs: &'tcx Substs<'tcx>,
tys: Vec<Ty<'tcx>>)
tys: &[Ty<'tcx>])
-> Ty<'tcx> {
self.mk_closure_from_closure_substs(closure_id, ClosureSubsts {
func_substs: substs,

View File

@ -1797,7 +1797,7 @@ impl<'a, 'tcx> AdtDefData<'tcx, 'tcx> {
_ if tys.references_error() => tcx.types.err,
0 => tcx.types.bool,
1 => tys[0],
_ => tcx.mk_tup(tys)
_ => tcx.mk_tup(&tys)
};
match self.sized_constraint.get(dep_node()) {

View File

@ -491,8 +491,8 @@ pub fn super_relate_tys<'a, 'gcx, 'tcx, R>(relation: &mut R,
if as_.len() == bs.len() {
let ts = as_.iter().zip(bs)
.map(|(a, b)| relation.relate(a, b))
.collect::<Result<_, _>>()?;
Ok(tcx.mk_tup(ts))
.collect::<Result<Vec<_>, _>>()?;
Ok(tcx.mk_tup(&ts))
} else if !(as_.is_empty() || bs.is_empty()) {
Err(TypeError::TupleSize(
expected_found(relation, &as_.len(), &bs.len())))
@ -547,7 +547,7 @@ impl<'tcx> Relate<'tcx> for ty::ClosureSubsts<'tcx> {
let upvar_tys = relation.relate_zip(&a.upvar_tys, &b.upvar_tys)?;
Ok(ty::ClosureSubsts {
func_substs: substs,
upvar_tys: relation.tcx().mk_type_list(upvar_tys)
upvar_tys: relation.tcx().mk_type_list(&upvar_tys)
})
}
}

View File

@ -448,8 +448,8 @@ impl<'tcx> TypeFoldable<'tcx> for ty::TraitObject<'tcx> {
impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::Slice<Ty<'tcx>> {
fn super_fold_with<'gcx: 'tcx, F: TypeFolder<'gcx, 'tcx>>(&self, folder: &mut F) -> Self {
let tys = self.iter().map(|t| t.fold_with(folder)).collect();
folder.tcx().mk_type_list(tys)
let tys = self.iter().map(|t| t.fold_with(folder)).collect::<Vec<_>>();
folder.tcx().mk_type_list(&tys)
}
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> bool {

View File

@ -167,7 +167,7 @@ impl<'a, 'gcx, 'tcx> Substs<'tcx> {
pub fn new<I>(tcx: TyCtxt<'a, 'gcx, 'tcx>, params: I)
-> &'tcx Substs<'tcx>
where I: IntoIterator<Item=Kind<'tcx>> {
tcx.mk_substs(params.into_iter().collect())
tcx.mk_substs(&params.into_iter().collect::<Vec<_>>())
}
pub fn maybe_new<I, E>(tcx: TyCtxt<'a, 'gcx, 'tcx>, params: I)
@ -311,7 +311,7 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx Substs<'tcx> {
if params[..] == self[..] {
self
} else {
folder.tcx().mk_substs(params)
folder.tcx().mk_substs(&params)
}
}

View File

@ -275,7 +275,7 @@ impl<'a, 'gcx, 'tcx> Env<'a, 'gcx, 'tcx> {
}
pub fn t_pair(&self, ty1: Ty<'tcx>, ty2: Ty<'tcx>) -> Ty<'tcx> {
self.infcx.tcx.mk_tup(vec![ty1, ty2])
self.infcx.tcx.mk_tup(&[ty1, ty2])
}
pub fn t_param(&self, index: u32) -> Ty<'tcx> {
@ -803,8 +803,8 @@ fn walk_ty() {
let tcx = env.infcx.tcx;
let int_ty = tcx.types.isize;
let uint_ty = tcx.types.usize;
let tup1_ty = tcx.mk_tup(vec![int_ty, uint_ty, int_ty, uint_ty]);
let tup2_ty = tcx.mk_tup(vec![tup1_ty, tup1_ty, uint_ty]);
let tup1_ty = tcx.mk_tup(&[int_ty, uint_ty, int_ty, uint_ty]);
let tup2_ty = tcx.mk_tup(&[tup1_ty, tup1_ty, uint_ty]);
let uniq_ty = tcx.mk_box(tup2_ty);
let walked: Vec<_> = uniq_ty.walk().collect();
assert_eq!(walked,
@ -819,8 +819,8 @@ fn walk_ty_skip_subtree() {
let tcx = env.infcx.tcx;
let int_ty = tcx.types.isize;
let uint_ty = tcx.types.usize;
let tup1_ty = tcx.mk_tup(vec![int_ty, uint_ty, int_ty, uint_ty]);
let tup2_ty = tcx.mk_tup(vec![tup1_ty, tup1_ty, uint_ty]);
let tup1_ty = tcx.mk_tup(&[int_ty, uint_ty, int_ty, uint_ty]);
let tup2_ty = tcx.mk_tup(&[tup1_ty, tup1_ty, uint_ty]);
let uniq_ty = tcx.mk_box(tup2_ty);
// types we expect to see (in order), plus a boolean saying

View File

@ -374,7 +374,7 @@ impl<'a, 'tcx> SpecializedDecoder<ty::GenericPredicates<'tcx>> for DecodeContext
impl<'a, 'tcx> SpecializedDecoder<&'tcx Substs<'tcx>> for DecodeContext<'a, 'tcx> {
fn specialized_decode(&mut self) -> Result<&'tcx Substs<'tcx>, Self::Error> {
Ok(self.tcx().mk_substs(Decodable::decode(self)?))
Ok(self.tcx().mk_substs(&Vec::decode(self)?))
}
}
@ -386,7 +386,7 @@ impl<'a, 'tcx> SpecializedDecoder<&'tcx ty::Region> for DecodeContext<'a, 'tcx>
impl<'a, 'tcx> SpecializedDecoder<&'tcx ty::Slice<Ty<'tcx>>> for DecodeContext<'a, 'tcx> {
fn specialized_decode(&mut self) -> Result<&'tcx ty::Slice<Ty<'tcx>>, Self::Error> {
Ok(self.tcx().mk_type_list(Decodable::decode(self)?))
Ok(self.tcx().mk_type_list(&Vec::decode(self)?))
}
}

View File

@ -257,7 +257,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
let source_info = self.source_info(span);
let bool_ty = self.hir.bool_ty();
if self.hir.check_overflow() && op.is_checkable() && ty.is_integral() {
let result_tup = self.hir.tcx().mk_tup(vec![ty, bool_ty]);
let result_tup = self.hir.tcx().mk_tup(&[ty, bool_ty]);
let result_value = self.temp(result_tup);
self.cfg.push_assign(block, source_info,

View File

@ -327,7 +327,7 @@ fn trans_fn_pointer_shim<'a, 'tcx>(
}
};
let sig = tcx.erase_late_bound_regions_and_normalize(sig);
let tuple_input_ty = tcx.mk_tup(sig.inputs.to_vec());
let tuple_input_ty = tcx.mk_tup(&sig.inputs);
let sig = ty::FnSig {
inputs: vec![bare_fn_ty_maybe_ref,
tuple_input_ty],

View File

@ -773,7 +773,7 @@ impl<'a, 'tcx> MirConstContext<'a, 'tcx> {
let rhs = self.const_operand(rhs, span)?;
let ty = lhs.ty;
let val_ty = op.ty(tcx, lhs.ty, rhs.ty);
let binop_ty = tcx.mk_tup(vec![val_ty, tcx.types.bool]);
let binop_ty = tcx.mk_tup(&[val_ty, tcx.types.bool]);
let (lhs, rhs) = (lhs.llval, rhs.llval);
assert!(!ty.is_fp());

View File

@ -430,7 +430,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
lhs.immediate(), rhs.immediate(),
lhs.ty);
let val_ty = op.ty(bcx.tcx(), lhs.ty, rhs.ty);
let operand_ty = bcx.tcx().mk_tup(vec![val_ty, bcx.tcx().types.bool]);
let operand_ty = bcx.tcx().mk_tup(&[val_ty, bcx.tcx().types.bool]);
let operand = OperandRef {
val: result,
ty: operand_ty

View File

@ -660,7 +660,7 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o {
span: output_span
};
(self.tcx().mk_tup(inputs), output_binding)
(self.tcx().mk_tup(&inputs), output_binding)
}
pub fn instantiate_poly_trait_ref(&self,
@ -1663,8 +1663,8 @@ impl<'o, 'gcx: 'tcx, 'tcx> AstConv<'gcx, 'tcx>+'o {
hir::TyTup(ref fields) => {
let flds = fields.iter()
.map(|t| self.ast_ty_to_ty(rscope, &t))
.collect();
tcx.mk_tup(flds)
.collect::<Vec<_>>();
tcx.mk_tup(&flds)
}
hir::TyBareFn(ref bf) => {
require_c_abi_if_variadic(tcx, &bf.decl, bf.abi, ast_ty.span);

View File

@ -169,7 +169,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
let max_len = cmp::max(expected_len, elements.len());
let element_tys: Vec<_> = (0 .. max_len).map(|_| self.next_ty_var()).collect();
let pat_ty = tcx.mk_tup(element_tys.clone());
let pat_ty = tcx.mk_tup(&element_tys);
self.demand_eqtype(pat.span, expected, pat_ty);
for (i, elem) in elements.iter().enumerate_and_adjust(max_len, ddpos) {
self.check_pat(elem, &element_tys[i]);

View File

@ -71,7 +71,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
let closure_type = self.tcx.mk_closure(expr_def_id,
self.parameter_environment.free_substs,
upvar_tys);
&upvar_tys);
let fn_sig = self.tcx
.liberate_late_bound_regions(self.tcx.region_maps.call_site_extent(expr.id, body.id),
@ -88,7 +88,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
// Tuple up the arguments and insert the resulting function type into
// the `closures` table.
fn_ty.sig.0.inputs = vec![self.tcx.mk_tup(fn_ty.sig.0.inputs)];
fn_ty.sig.0.inputs = vec![self.tcx.mk_tup(&fn_ty.sig.0.inputs)];
debug!("closure for {:?} --> sig={:?} opt_kind={:?}",
expr_def_id,

View File

@ -89,7 +89,7 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) {
"cxchg" | "cxchgweak" => (1, vec!(tcx.mk_mut_ptr(param(ccx, 0)),
param(ccx, 0),
param(ccx, 0)),
tcx.mk_tup(vec!(param(ccx, 0), tcx.types.bool))),
tcx.mk_tup(&[param(ccx, 0), tcx.types.bool])),
"load" => (1, vec!(tcx.mk_imm_ptr(param(ccx, 0))),
param(ccx, 0)),
"store" => (1, vec!(tcx.mk_mut_ptr(param(ccx, 0)), param(ccx, 0)),
@ -274,7 +274,7 @@ pub fn check_intrinsic_type(ccx: &CrateCtxt, it: &hir::ForeignItem) {
"add_with_overflow" | "sub_with_overflow" | "mul_with_overflow" =>
(1, vec!(param(ccx, 0), param(ccx, 0)),
tcx.mk_tup(vec!(param(ccx, 0), tcx.types.bool))),
tcx.mk_tup(&[param(ccx, 0), tcx.types.bool])),
"unchecked_div" | "unchecked_rem" =>
(1, vec![param(ccx, 0), param(ccx, 0)], param(ccx, 0)),

View File

@ -2380,7 +2380,7 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
let err_inputs = match tuple_arguments {
DontTupleArguments => err_inputs,
TupleArguments => vec![self.tcx.mk_tup(err_inputs)],
TupleArguments => vec![self.tcx.mk_tup(&err_inputs)],
};
self.check_argument_types(sp, &err_inputs[..], &[], args_no_rcvr,
@ -3733,11 +3733,11 @@ impl<'a, 'gcx, 'tcx> FnCtxt<'a, 'gcx, 'tcx> {
};
err_field = err_field || t.references_error();
t
}).collect();
}).collect::<Vec<_>>();
if err_field {
tcx.types.err
} else {
tcx.mk_tup(elt_ts)
tcx.mk_tup(&elt_ts)
}
}
hir::ExprStruct(ref path, ref fields, ref base_expr) => {