diff --git a/rust-version b/rust-version index 1ab8e6b5a8c..4198e8cf3c0 100644 --- a/rust-version +++ b/rust-version @@ -1 +1 @@ -bf45371f262e184b4a77adea88c8ac01ac79759b +ca1e68b3229e710c3948a361ee770d846a88e6da diff --git a/src/concurrency/data_race.rs b/src/concurrency/data_race.rs index c1bcd236813..36178269e02 100644 --- a/src/concurrency/data_race.rs +++ b/src/concurrency/data_race.rs @@ -535,7 +535,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { ) -> InterpResult<'tcx> { let this = self.eval_context_mut(); this.validate_overlapping_atomic(dest)?; - this.allow_data_races_mut(move |this| this.write_scalar(val, &(*dest).into()))?; + this.allow_data_races_mut(move |this| this.write_scalar(val, &dest.into()))?; this.validate_atomic_store(dest, atomic)?; // FIXME: it's not possible to get the value before write_scalar. A read_scalar will cause // side effects from a read the program did not perform. So we have to initialise @@ -562,7 +562,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { // Atomics wrap around on overflow. let val = this.binary_op(op, &old, rhs)?; let val = if neg { this.unary_op(mir::UnOp::Not, &val)? } else { val }; - this.allow_data_races_mut(|this| this.write_immediate(*val, &(*place).into()))?; + this.allow_data_races_mut(|this| this.write_immediate(*val, &place.into()))?; this.validate_atomic_rmw(place, atomic)?; @@ -587,7 +587,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { this.validate_overlapping_atomic(place)?; let old = this.allow_data_races_mut(|this| this.read_scalar(&place.into()))?; - this.allow_data_races_mut(|this| this.write_scalar(new, &(*place).into()))?; + this.allow_data_races_mut(|this| this.write_scalar(new, &place.into()))?; this.validate_atomic_rmw(place, atomic)?; @@ -616,7 +616,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { if lt { &rhs } else { &old } }; - this.allow_data_races_mut(|this| this.write_immediate(**new_val, &(*place).into()))?; + this.allow_data_races_mut(|this| this.write_immediate(**new_val, &place.into()))?; this.validate_atomic_rmw(place, atomic)?; @@ -675,7 +675,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { // if successful, perform a full rw-atomic validation // otherwise treat this as an atomic load with the fail ordering. if cmpxchg_success { - this.allow_data_races_mut(|this| this.write_scalar(new, &(*place).into()))?; + this.allow_data_races_mut(|this| this.write_scalar(new, &place.into()))?; this.validate_atomic_rmw(place, success)?; this.buffered_atomic_rmw(new, place, success, old.to_scalar_or_uninit())?; } else { @@ -964,7 +964,7 @@ impl VClockAlloc { let (index, clocks) = global.current_thread_state(); let mut alloc_ranges = self.alloc_ranges.borrow_mut(); for (offset, range) in alloc_ranges.iter_mut(range.start, range.size) { - if let Err(DataRace) = range.read_race_detect(&*clocks, index) { + if let Err(DataRace) = range.read_race_detect(&clocks, index) { // Report data-race. return Self::report_data_race( global, @@ -992,7 +992,7 @@ impl VClockAlloc { if global.race_detecting() { let (index, clocks) = global.current_thread_state(); for (offset, range) in self.alloc_ranges.get_mut().iter_mut(range.start, range.size) { - if let Err(DataRace) = range.write_race_detect(&*clocks, index, write_type) { + if let Err(DataRace) = range.write_race_detect(&clocks, index, write_type) { // Report data-race return Self::report_data_race( global, @@ -1072,7 +1072,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> { for (offset, range) in alloc_meta.alloc_ranges.borrow_mut().iter_mut(base_offset, size) { - if let Err(DataRace) = op(range, &mut *clocks, index, atomic) { + if let Err(DataRace) = op(range, &mut clocks, index, atomic) { mem::drop(clocks); return VClockAlloc::report_data_race( data_race, diff --git a/src/eval.rs b/src/eval.rs index c9fc05500a3..1536b826ac4 100644 --- a/src/eval.rs +++ b/src/eval.rs @@ -215,7 +215,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>( let arg_place = ecx.allocate(ecx.layout_of(arg_type)?, MiriMemoryKind::Machine.into())?; ecx.write_os_str_to_c_str(OsStr::new(arg), arg_place.ptr, size)?; - ecx.mark_immutable(&*arg_place); + ecx.mark_immutable(&arg_place); argvs.push(arg_place.to_ref(&ecx)); } // Make an array with all these pointers, in the Miri memory. @@ -227,7 +227,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>( let place = ecx.mplace_field(&argvs_place, idx)?; ecx.write_immediate(arg, &place.into())?; } - ecx.mark_immutable(&*argvs_place); + ecx.mark_immutable(&argvs_place); // A pointer to that place is the 3rd argument for main. let argv = argvs_place.to_ref(&ecx); // Store `argc` and `argv` for macOS `_NSGetArg{c,v}`. @@ -235,7 +235,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>( let argc_place = ecx.allocate(ecx.machine.layouts.isize, MiriMemoryKind::Machine.into())?; ecx.write_scalar(argc, &argc_place.into())?; - ecx.mark_immutable(&*argc_place); + ecx.mark_immutable(&argc_place); ecx.machine.argc = Some(*argc_place); let argv_place = ecx.allocate( @@ -243,7 +243,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>( MiriMemoryKind::Machine.into(), )?; ecx.write_immediate(argv, &argv_place.into())?; - ecx.mark_immutable(&*argv_place); + ecx.mark_immutable(&argv_place); ecx.machine.argv = Some(*argv_place); } // Store command line as UTF-16 for Windows `GetCommandLineW`. @@ -260,7 +260,7 @@ pub fn create_ecx<'mir, 'tcx: 'mir>( let place = ecx.mplace_field(&cmd_place, idx)?; ecx.write_scalar(Scalar::from_u16(c), &place.into())?; } - ecx.mark_immutable(&*cmd_place); + ecx.mark_immutable(&cmd_place); } argv }; diff --git a/src/stacked_borrows.rs b/src/stacked_borrows.rs index 3fc0eaf10c0..2cf5eb70c17 100644 --- a/src/stacked_borrows.rs +++ b/src/stacked_borrows.rs @@ -928,7 +928,7 @@ trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx orig_tag, item, (alloc_id, range, offset), - &mut *global, + &mut global, current_span, history, exposed_tags, @@ -1090,14 +1090,14 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx #[inline(always)] fn ecx(&mut self) -> &mut MiriEvalContext<'mir, 'tcx> { - &mut self.ecx + self.ecx } fn visit_value(&mut self, place: &MPlaceTy<'tcx, Tag>) -> InterpResult<'tcx> { if let Some((mutbl, protector)) = qualify(place.layout.ty, self.kind) { let val = self.ecx.read_immediate(&place.into())?; let val = self.ecx.retag_reference(&val, mutbl, protector)?; - self.ecx.write_immediate(*val, &(*place).into())?; + self.ecx.write_immediate(*val, &place.into())?; } else { // Maybe we need to go deeper. self.walk_value(place)?;