Initial data-race detector,

passes all current tests but additional tests are required
This commit is contained in:
JCTyBlaidd 2020-11-02 00:23:27 +00:00
parent 9a2cfbfc0a
commit 89814f1b3f
10 changed files with 1795 additions and 173 deletions

7
Cargo.lock generated
View File

@ -282,6 +282,7 @@ dependencies = [
"rustc-workspace-hack",
"rustc_version",
"shell-escape",
"smallvec",
]
[[package]]
@ -496,6 +497,12 @@ version = "0.1.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "45bb67a18fa91266cc7807181f62f9178a6873bfad7dc788c42e6430db40184f"
[[package]]
name = "smallvec"
version = "1.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fbee7696b84bbf3d89a1c2eccff0850e3047ed46bfcd2e92c29a2d074d57e252"
[[package]]
name = "socket2"
version = "0.3.15"

View File

@ -30,6 +30,7 @@ log = "0.4"
shell-escape = "0.1.4"
hex = "0.4.0"
rand = "0.7"
smallvec = "1.4.2"
# A noop dependency that changes in the Rust repository, it's a bit of a hack.
# See the `src/tools/rustc-workspace-hack/README.md` file in `rust-lang/rust`

1406
src/data_race.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -22,6 +22,7 @@ extern crate rustc_mir;
extern crate rustc_span;
extern crate rustc_target;
mod data_race;
mod diagnostics;
mod eval;
mod helpers;
@ -52,6 +53,10 @@ pub use crate::shims::panic::{CatchUnwindData, EvalContextExt as _};
pub use crate::shims::tls::{EvalContextExt as _, TlsData};
pub use crate::shims::EvalContextExt as _;
pub use crate::data_race::{
AtomicReadOp, AtomicWriteOp, AtomicRWOp, AtomicFenceOp, DataRaceLockHandle,
EvalContextExt as DataRaceEvalContextExt
};
pub use crate::diagnostics::{
register_diagnostic, report_error, EvalContextExt as DiagnosticsEvalContextExt,
TerminationInfo, NonHaltingDiagnostic,

View File

@ -109,12 +109,15 @@ impl fmt::Display for MiriMemoryKind {
pub struct AllocExtra {
/// Stacked Borrows state is only added if it is enabled.
pub stacked_borrows: Option<stacked_borrows::AllocExtra>,
/// Data race detection via the use of a vector-clock.
pub data_race: data_race::AllocExtra,
}
/// Extra global memory data
#[derive(Clone, Debug)]
pub struct MemoryExtra {
pub stacked_borrows: Option<stacked_borrows::MemoryExtra>,
pub data_race: data_race::MemoryExtra,
pub intptrcast: intptrcast::MemoryExtra,
/// Mapping extern static names to their canonical allocation.
@ -144,8 +147,10 @@ impl MemoryExtra {
} else {
None
};
let data_race = Rc::new(data_race::GlobalState::new());
MemoryExtra {
stacked_borrows,
data_race,
intptrcast: Default::default(),
extern_statics: FxHashMap::default(),
rng: RefCell::new(rng),
@ -467,6 +472,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
// No stacks, no tag.
(None, Tag::Untagged)
};
let race_alloc = data_race::AllocExtra::new_allocation(&memory_extra.data_race, alloc.size);
let mut stacked_borrows = memory_extra.stacked_borrows.as_ref().map(|sb| sb.borrow_mut());
let alloc: Allocation<Tag, Self::AllocExtra> = alloc.with_tags_and_extra(
|alloc| {
@ -478,7 +484,7 @@ impl<'mir, 'tcx> Machine<'mir, 'tcx> for Evaluator<'mir, 'tcx> {
Tag::Untagged
}
},
AllocExtra { stacked_borrows: stacks },
AllocExtra { stacked_borrows: stacks, data_race: race_alloc },
);
(Cow::Owned(alloc), base_tag)
}
@ -584,6 +590,7 @@ impl AllocationExtra<Tag> for AllocExtra {
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
alloc.extra.data_race.read(ptr, size)?;
if let Some(stacked_borrows) = &alloc.extra.stacked_borrows {
stacked_borrows.memory_read(ptr, size)
} else {
@ -597,6 +604,7 @@ impl AllocationExtra<Tag> for AllocExtra {
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
alloc.extra.data_race.write(ptr, size)?;
if let Some(stacked_borrows) = &mut alloc.extra.stacked_borrows {
stacked_borrows.memory_written(ptr, size)
} else {
@ -610,6 +618,7 @@ impl AllocationExtra<Tag> for AllocExtra {
ptr: Pointer<Tag>,
size: Size,
) -> InterpResult<'tcx> {
alloc.extra.data_race.deallocate(ptr, size)?;
if let Some(stacked_borrows) = &mut alloc.extra.stacked_borrows {
stacked_borrows.memory_deallocated(ptr, size)
} else {

View File

@ -4,7 +4,7 @@ use log::trace;
use rustc_attr as attr;
use rustc_ast::ast::FloatTy;
use rustc_middle::{mir, ty};
use rustc_middle::{mir, mir::BinOp, ty};
use rustc_middle::ty::layout::IntegerExt;
use rustc_apfloat::{Float, Round};
use rustc_target::abi::{Align, Integer, LayoutOf};
@ -306,157 +306,117 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
// Atomic operations
#[rustfmt::skip]
| "atomic_load"
| "atomic_load_relaxed"
| "atomic_load_acq"
=> {
let &[place] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
"atomic_load" => this.atomic_load(args, dest, AtomicReadOp::SeqCst)?,
"atomic_load_relaxed" => this.atomic_load(args, dest, AtomicReadOp::Relaxed)?,
"atomic_load_acq" => this.atomic_load(args, dest, AtomicReadOp::Acquire)?,
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
"atomic_store" => this.atomic_store(args, AtomicWriteOp::SeqCst)?,
"atomic_store_relaxed" => this.atomic_store(args, AtomicWriteOp::Relaxed)?,
"atomic_store_rel" => this.atomic_store(args, AtomicWriteOp::Release)?,
this.write_scalar(val, dest)?;
}
"atomic_fence_acq" => this.atomic_fence(args, AtomicFenceOp::Acquire)?,
"atomic_fence_rel" => this.atomic_fence(args, AtomicFenceOp::Release)?,
"atomic_fence_acqrel" => this.atomic_fence(args, AtomicFenceOp::AcqRel)?,
"atomic_fence" => this.atomic_fence(args, AtomicFenceOp::SeqCst)?,
#[rustfmt::skip]
| "atomic_store"
| "atomic_store_relaxed"
| "atomic_store_rel"
=> {
let &[place, val] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
"atomic_singlethreadfence_acq" => this.compiler_fence(args, AtomicFenceOp::Acquire)?,
"atomic_singlethreadfence_rel" => this.compiler_fence(args, AtomicFenceOp::Release)?,
"atomic_singlethreadfence_acqrel" => this.compiler_fence(args, AtomicFenceOp::AcqRel)?,
"atomic_singlethreadfence" => this.compiler_fence(args, AtomicFenceOp::SeqCst)?,
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
"atomic_xchg" => this.atomic_exchange(args, dest, AtomicRWOp::SeqCst)?,
"atomic_xchg_acq" => this.atomic_exchange(args, dest, AtomicRWOp::Acquire)?,
"atomic_xchg_rel" => this.atomic_exchange(args, dest, AtomicRWOp::Release)?,
"atomic_xchg_acqrel" => this.atomic_exchange(args, dest, AtomicRWOp::AcqRel)?,
"atomic_xchg_relaxed" => this.atomic_exchange(args, dest, AtomicRWOp::Relaxed)?,
this.write_scalar(val, place.into())?;
}
"atomic_cxchg" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::SeqCst, AtomicReadOp::SeqCst
)?,
"atomic_cxchg_acq" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::Acquire, AtomicReadOp::Acquire
)?,
"atomic_cxchg_rel" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::Release, AtomicReadOp::Relaxed
)?,
"atomic_cxchg_acqrel" => this.atomic_compare_exchange
(args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Acquire
)?,
"atomic_cxchg_relaxed" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::Relaxed, AtomicReadOp::Relaxed
)?,
"atomic_cxchg_acq_failrelaxed" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::Acquire, AtomicReadOp::Relaxed
)?,
"atomic_cxchg_acqrel_failrelaxed" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Relaxed
)?,
"atomic_cxchg_failrelaxed" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Relaxed
)?,
"atomic_cxchg_failacq" => this.atomic_compare_exchange(
args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Acquire
)?,
#[rustfmt::skip]
| "atomic_fence_acq"
| "atomic_fence_rel"
| "atomic_fence_acqrel"
| "atomic_fence"
| "atomic_singlethreadfence_acq"
| "atomic_singlethreadfence_rel"
| "atomic_singlethreadfence_acqrel"
| "atomic_singlethreadfence"
=> {
let &[] = check_arg_count(args)?;
// FIXME: this will become relevant once we try to detect data races.
}
"atomic_cxchgweak" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::SeqCst, AtomicReadOp::SeqCst
)?,
"atomic_cxchgweak_acq" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::Acquire, AtomicReadOp::Acquire
)?,
"atomic_cxchgweak_rel" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::Release, AtomicReadOp::Relaxed
)?,
"atomic_cxchgweak_acqrel" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Acquire
)?,
"atomic_cxchgweak_relaxed" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::Relaxed, AtomicReadOp::Relaxed
)?,
"atomic_cxchgweak_acq_failrelaxed" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::Acquire, AtomicReadOp::Relaxed
)?,
"atomic_cxchgweak_acqrel_failrelaxed" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::AcqRel, AtomicReadOp::Relaxed
)?,
"atomic_cxchgweak_failrelaxed" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Relaxed
)?,
"atomic_cxchgweak_failacq" => this.atomic_compare_exchange_weak(
args, dest, AtomicRWOp::SeqCst, AtomicReadOp::Acquire
)?,
_ if intrinsic_name.starts_with("atomic_xchg") => {
let &[place, new] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let new = this.read_scalar(new)?;
let old = this.read_scalar(place.into())?;
"atomic_or" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::SeqCst)?,
"atomic_or_acq" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::Acquire)?,
"atomic_or_rel" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::Release)?,
"atomic_or_acqrel" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::AcqRel)?,
"atomic_or_relaxed" => this.atomic_op(args, dest, BinOp::BitOr, false, AtomicRWOp::Relaxed)?,
"atomic_xor" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::SeqCst)?,
"atomic_xor_acq" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::Acquire)?,
"atomic_xor_rel" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::Release)?,
"atomic_xor_acqrel" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::AcqRel)?,
"atomic_xor_relaxed" => this.atomic_op(args, dest, BinOp::BitXor, false, AtomicRWOp::Relaxed)?,
"atomic_and" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::SeqCst)?,
"atomic_and_acq" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::Acquire)?,
"atomic_and_rel" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::Release)?,
"atomic_and_acqrel" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::AcqRel)?,
"atomic_and_relaxed" => this.atomic_op(args, dest, BinOp::BitAnd, false, AtomicRWOp::Relaxed)?,
"atomic_nand" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::SeqCst)?,
"atomic_nand_acq" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::Acquire)?,
"atomic_nand_rel" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::Release)?,
"atomic_nand_acqrel" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::AcqRel)?,
"atomic_nand_relaxed" => this.atomic_op(args, dest, BinOp::BitAnd, true, AtomicRWOp::Relaxed)?,
"atomic_xadd" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::SeqCst)?,
"atomic_xadd_acq" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::Acquire)?,
"atomic_xadd_rel" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::Release)?,
"atomic_xadd_acqrel" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::AcqRel)?,
"atomic_xadd_relaxed" => this.atomic_op(args, dest, BinOp::Add, false, AtomicRWOp::Relaxed)?,
"atomic_xsub" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::SeqCst)?,
"atomic_xsub_acq" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::Acquire)?,
"atomic_xsub_rel" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::Release)?,
"atomic_xsub_acqrel" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::AcqRel)?,
"atomic_xsub_relaxed" => this.atomic_op(args, dest, BinOp::Sub, false, AtomicRWOp::Relaxed)?,
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
this.write_scalar(old, dest)?; // old value is returned
this.write_scalar(new, place.into())?;
}
_ if intrinsic_name.starts_with("atomic_cxchg") => {
let &[place, expect_old, new] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
let new = this.read_scalar(new)?;
let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()`
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
// `binary_op` will bail if either of them is not a scalar.
let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
let res = Immediate::ScalarPair(old.to_scalar_or_uninit(), eq.into());
// Return old value.
this.write_immediate(res, dest)?;
// Update ptr depending on comparison.
if eq.to_bool()? {
this.write_scalar(new, place.into())?;
}
}
#[rustfmt::skip]
| "atomic_or"
| "atomic_or_acq"
| "atomic_or_rel"
| "atomic_or_acqrel"
| "atomic_or_relaxed"
| "atomic_xor"
| "atomic_xor_acq"
| "atomic_xor_rel"
| "atomic_xor_acqrel"
| "atomic_xor_relaxed"
| "atomic_and"
| "atomic_and_acq"
| "atomic_and_rel"
| "atomic_and_acqrel"
| "atomic_and_relaxed"
| "atomic_nand"
| "atomic_nand_acq"
| "atomic_nand_rel"
| "atomic_nand_acqrel"
| "atomic_nand_relaxed"
| "atomic_xadd"
| "atomic_xadd_acq"
| "atomic_xadd_rel"
| "atomic_xadd_acqrel"
| "atomic_xadd_relaxed"
| "atomic_xsub"
| "atomic_xsub_acq"
| "atomic_xsub_rel"
| "atomic_xsub_acqrel"
| "atomic_xsub_relaxed"
=> {
let &[place, rhs] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
if !place.layout.ty.is_integral() {
bug!("Atomic arithmetic operations only work on integer types");
}
let rhs = this.read_immediate(rhs)?;
let old = this.read_immediate(place.into())?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
this.write_immediate(*old, dest)?; // old value is returned
let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
"or" => (mir::BinOp::BitOr, false),
"xor" => (mir::BinOp::BitXor, false),
"and" => (mir::BinOp::BitAnd, false),
"xadd" => (mir::BinOp::Add, false),
"xsub" => (mir::BinOp::Sub, false),
"nand" => (mir::BinOp::BitAnd, true),
_ => bug!(),
};
// Atomics wrap around on overflow.
let val = this.binary_op(op, old, rhs)?;
let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
this.write_immediate(*val, place.into())?;
}
// Query type information
"assert_inhabited" |
@ -498,6 +458,169 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
Ok(())
}
fn atomic_load(
&mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
atomic: AtomicReadOp
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let &[place] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
// make sure it fits into a scalar; otherwise it cannot be atomic
let val = this.read_scalar_racy(place)?;
this.validate_atomic_load(place, atomic)?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
this.write_scalar(val, dest)?;
Ok(())
}
fn atomic_store(&mut self, args: &[OpTy<'tcx, Tag>], atomic: AtomicWriteOp) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let &[place, val] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let val = this.read_scalar(val)?; // make sure it fits into a scalar; otherwise it cannot be atomic
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
// Perform atomic store
this.write_scalar_racy(val, place)?;
this.validate_atomic_store(place, atomic)?;
Ok(())
}
fn compiler_fence(&mut self, args: &[OpTy<'tcx, Tag>], atomic: AtomicFenceOp) -> InterpResult<'tcx> {
let &[] = check_arg_count(args)?;
let _ = atomic;
//FIXME: compiler fences are currently ignored
Ok(())
}
fn atomic_fence(&mut self, args: &[OpTy<'tcx, Tag>], atomic: AtomicFenceOp) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let &[] = check_arg_count(args)?;
this.validate_atomic_fence(atomic)?;
Ok(())
}
fn atomic_op(
&mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
op: mir::BinOp, neg: bool, atomic: AtomicRWOp
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let &[place, rhs] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
if !place.layout.ty.is_integral() {
bug!("Atomic arithmetic operations only work on integer types");
}
let rhs = this.read_immediate(rhs)?;
let old = this.read_immediate_racy(place)?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
this.write_immediate(*old, dest)?; // old value is returned
// Atomics wrap around on overflow.
let val = this.binary_op(op, old, rhs)?;
let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
this.write_immediate_racy(*val, place)?;
this.validate_atomic_rmw(place, atomic)?;
Ok(())
}
fn atomic_exchange(
&mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>, atomic: AtomicRWOp
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let &[place, new] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let new = this.read_scalar(new)?;
let old = this.read_scalar_racy(place)?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
this.write_scalar(old, dest)?; // old value is returned
this.write_scalar_racy(new, place)?;
this.validate_atomic_rmw(place, atomic)?;
Ok(())
}
fn atomic_compare_exchange(
&mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
success: AtomicRWOp, fail: AtomicReadOp
) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
let &[place, expect_old, new] = check_arg_count(args)?;
let place = this.deref_operand(place)?;
let expect_old = this.read_immediate(expect_old)?; // read as immediate for the sake of `binary_op()`
let new = this.read_scalar(new)?;
// Failure ordering cannot be stronger than success ordering, therefore first attempt
// to read with the failure ordering and if successfull then try again with the success
// read ordering and write in the success case.
// Read as immediate for the sake of `binary_op()`
let old = this.read_immediate_racy(place)?;
// Check alignment requirements. Atomics must always be aligned to their size,
// even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
// be 8-aligned).
let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
// `binary_op` will bail if either of them is not a scalar.
let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
let res = Immediate::ScalarPair(old.to_scalar_or_uninit(), eq.into());
// Return old value.
this.write_immediate(res, dest)?;
// Update ptr depending on comparison.
// if successful, perform a full rw-atomic validation
// otherwise treat this as an atomic load with the fail ordering
if eq.to_bool()? {
this.write_scalar_racy(new, place)?;
this.validate_atomic_rmw(place, success)?;
} else {
this.validate_atomic_load(place, fail)?;
}
Ok(())
}
fn atomic_compare_exchange_weak(
&mut self, args: &[OpTy<'tcx, Tag>], dest: PlaceTy<'tcx, Tag>,
success: AtomicRWOp, fail: AtomicReadOp
) -> InterpResult<'tcx> {
// FIXME: the weak part of this is currently not modelled,
// it is assumed to always succeed unconditionally.
self.atomic_compare_exchange(args, dest, success, fail)
}
fn float_to_int_unchecked<F>(
&self,
f: F,

View File

@ -62,7 +62,7 @@ fn mutex_get_kind<'mir, 'tcx: 'mir>(
mutex_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
ecx.read_scalar_at_offset(mutex_op, offset, ecx.machine.layouts.i32)
ecx.read_scalar_at_offset_racy(mutex_op, offset, ecx.machine.layouts.i32)
}
fn mutex_set_kind<'mir, 'tcx: 'mir>(
@ -71,14 +71,14 @@ fn mutex_set_kind<'mir, 'tcx: 'mir>(
kind: impl Into<ScalarMaybeUninit<Tag>>,
) -> InterpResult<'tcx, ()> {
let offset = if ecx.pointer_size().bytes() == 8 { 16 } else { 12 };
ecx.write_scalar_at_offset(mutex_op, offset, kind, ecx.machine.layouts.i32)
ecx.write_scalar_at_offset_racy(mutex_op, offset, kind, ecx.machine.layouts.i32)
}
fn mutex_get_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
mutex_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
ecx.read_scalar_at_offset(mutex_op, 4, ecx.machine.layouts.u32)
ecx.read_scalar_at_offset_racy(mutex_op, 4, ecx.machine.layouts.u32)
}
fn mutex_set_id<'mir, 'tcx: 'mir>(
@ -86,7 +86,7 @@ fn mutex_set_id<'mir, 'tcx: 'mir>(
mutex_op: OpTy<'tcx, Tag>,
id: impl Into<ScalarMaybeUninit<Tag>>,
) -> InterpResult<'tcx, ()> {
ecx.write_scalar_at_offset(mutex_op, 4, id, ecx.machine.layouts.u32)
ecx.write_scalar_at_offset_racy(mutex_op, 4, id, ecx.machine.layouts.u32)
}
fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
@ -116,7 +116,7 @@ fn rwlock_get_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
rwlock_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
ecx.read_scalar_at_offset(rwlock_op, 4, ecx.machine.layouts.u32)
ecx.read_scalar_at_offset_racy(rwlock_op, 4, ecx.machine.layouts.u32)
}
fn rwlock_set_id<'mir, 'tcx: 'mir>(
@ -124,7 +124,7 @@ fn rwlock_set_id<'mir, 'tcx: 'mir>(
rwlock_op: OpTy<'tcx, Tag>,
id: impl Into<ScalarMaybeUninit<Tag>>,
) -> InterpResult<'tcx, ()> {
ecx.write_scalar_at_offset(rwlock_op, 4, id, ecx.machine.layouts.u32)
ecx.write_scalar_at_offset_racy(rwlock_op, 4, id, ecx.machine.layouts.u32)
}
fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
@ -177,7 +177,7 @@ fn cond_get_id<'mir, 'tcx: 'mir>(
ecx: &MiriEvalContext<'mir, 'tcx>,
cond_op: OpTy<'tcx, Tag>,
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
ecx.read_scalar_at_offset(cond_op, 4, ecx.machine.layouts.u32)
ecx.read_scalar_at_offset_racy(cond_op, 4, ecx.machine.layouts.u32)
}
fn cond_set_id<'mir, 'tcx: 'mir>(
@ -185,7 +185,7 @@ fn cond_set_id<'mir, 'tcx: 'mir>(
cond_op: OpTy<'tcx, Tag>,
id: impl Into<ScalarMaybeUninit<Tag>>,
) -> InterpResult<'tcx, ()> {
ecx.write_scalar_at_offset(cond_op, 4, id, ecx.machine.layouts.u32)
ecx.write_scalar_at_offset_racy(cond_op, 4, id, ecx.machine.layouts.u32)
}
fn cond_get_or_create_id<'mir, 'tcx: 'mir>(

View File

@ -19,21 +19,29 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
For example, Miri does not detect data races yet.",
);
// Create the new thread
let new_thread_id = this.create_thread();
// Also switch to new thread so that we can push the first stackframe.
let old_thread_id = this.set_active_thread(new_thread_id);
// Write the current thread-id, switch to the next thread later
// to treat this write operation as occuring on this thread index
let thread_info_place = this.deref_operand(thread)?;
this.write_scalar(
Scalar::from_uint(new_thread_id.to_u32(), thread_info_place.layout.size),
thread_info_place.into(),
)?;
// Read the function argument that will be sent to the new thread
// again perform the read before the thread starts executing.
let fn_ptr = this.read_scalar(start_routine)?.check_init()?;
let instance = this.memory.get_fn(fn_ptr)?.as_instance()?;
let func_arg = this.read_immediate(arg)?;
// Also switch to new thread so that we can push the first stackframe.
// after this all accesses will be treated as occuring in the new thread
let old_thread_id = this.set_active_thread(new_thread_id);
// Perform the function pointer load in the new thread frame
let instance = this.memory.get_fn(fn_ptr)?.as_instance()?;
// Note: the returned value is currently ignored (see the FIXME in
// pthread_join below) because the Rust standard library does not use
// it.
@ -47,6 +55,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
StackPopCleanup::None { cleanup: true },
)?;
// Restore the old active thread frame
this.set_active_thread(old_thread_id);
Ok(0)

View File

@ -61,6 +61,8 @@ struct Mutex {
lock_count: usize,
/// The queue of threads waiting for this mutex.
queue: VecDeque<ThreadId>,
/// Data race handle
data_race: DataRaceLockHandle
}
declare_id!(RwLockId);
@ -77,6 +79,10 @@ struct RwLock {
writer_queue: VecDeque<ThreadId>,
/// The queue of reader threads waiting for this lock.
reader_queue: VecDeque<ThreadId>,
/// Data race handle for writers
data_race: DataRaceLockHandle,
/// Data race handle for readers
data_race_reader: DataRaceLockHandle,
}
declare_id!(CondvarId);
@ -94,12 +100,14 @@ struct CondvarWaiter {
#[derive(Default, Debug)]
struct Condvar {
waiters: VecDeque<CondvarWaiter>,
data_race: DataRaceLockHandle,
}
/// The futex state.
#[derive(Default, Debug)]
struct Futex {
waiters: VecDeque<FutexWaiter>,
data_race: DataRaceLockHandle,
}
/// A thread waiting on a futex.
@ -205,6 +213,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
mutex.owner = Some(thread);
}
mutex.lock_count = mutex.lock_count.checked_add(1).unwrap();
this.memory.extra.data_race.validate_lock_acquire(&mutex.data_race, thread);
}
/// Try unlocking by decreasing the lock count and returning the old lock
@ -232,6 +241,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
mutex.owner = None;
// The mutex is completely unlocked. Try transfering ownership
// to another thread.
this.memory.extra.data_race.validate_lock_release(&mut mutex.data_race, current_owner);
this.mutex_dequeue_and_lock(id);
}
Some(old_lock_count)
@ -284,15 +294,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let this = self.eval_context_mut();
assert!(!this.rwlock_is_write_locked(id), "the lock is write locked");
trace!("rwlock_reader_lock: {:?} now also held (one more time) by {:?}", id, reader);
let count = this.machine.threads.sync.rwlocks[id].readers.entry(reader).or_insert(0);
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
let count = rwlock.readers.entry(reader).or_insert(0);
*count = count.checked_add(1).expect("the reader counter overflowed");
this.memory.extra.data_race.validate_lock_acquire(&rwlock.data_race, reader);
}
/// Try read-unlock the lock for `reader` and potentially give the lock to a new owner.
/// Returns `true` if succeeded, `false` if this `reader` did not hold the lock.
fn rwlock_reader_unlock(&mut self, id: RwLockId, reader: ThreadId) -> bool {
let this = self.eval_context_mut();
match this.machine.threads.sync.rwlocks[id].readers.entry(reader) {
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
match rwlock.readers.entry(reader) {
Entry::Occupied(mut entry) => {
let count = entry.get_mut();
assert!(*count > 0, "rwlock locked with count == 0");
@ -306,8 +319,16 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
Entry::Vacant(_) => return false, // we did not even own this lock
}
this.memory.extra.data_race.validate_lock_release_shared(&mut rwlock.data_race_reader, reader);
// The thread was a reader. If the lock is not held any more, give it to a writer.
if this.rwlock_is_locked(id).not() {
// All the readers are finished, so set the writer data-race handle to the value
// of the union of all reader data race handles, since the set of readers
// happen-before the writers
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
rwlock.data_race.set_values(&rwlock.data_race_reader);
this.rwlock_dequeue_and_lock_writer(id);
}
true
@ -332,7 +353,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
let this = self.eval_context_mut();
assert!(!this.rwlock_is_locked(id), "the rwlock is already locked");
trace!("rwlock_writer_lock: {:?} now held by {:?}", id, writer);
this.machine.threads.sync.rwlocks[id].writer = Some(writer);
let rwlock = &mut this.machine.threads.sync.rwlocks[id];
rwlock.writer = Some(writer);
this.memory.extra.data_race.validate_lock_acquire(&rwlock.data_race, writer);
}
#[inline]
@ -347,6 +370,11 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
}
rwlock.writer = None;
trace!("rwlock_writer_unlock: {:?} unlocked by {:?}", id, expected_writer);
// Release memory to both reader and writer vector clocks
// since this writer happens-before both the union of readers once they are finished
// and the next writer
this.memory.extra.data_race.validate_lock_release(&mut rwlock.data_race, current_writer);
this.memory.extra.data_race.validate_lock_release(&mut rwlock.data_race_reader, current_writer);
// The thread was a writer.
//
// We are prioritizing writers here against the readers. As a
@ -405,10 +433,18 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
/// variable.
fn condvar_signal(&mut self, id: CondvarId) -> Option<(ThreadId, MutexId)> {
let this = self.eval_context_mut();
this.machine.threads.sync.condvars[id]
.waiters
let current_thread = this.get_active_thread();
let condvar = &mut this.machine.threads.sync.condvars[id];
let data_race = &mut this.memory.extra.data_race;
// Each condvar signal happens-before the end of the condvar wake
data_race.validate_lock_release(&mut condvar.data_race, current_thread);
condvar.waiters
.pop_front()
.map(|waiter| (waiter.thread, waiter.mutex))
.map(|waiter| {
data_race.validate_lock_acquire(&mut condvar.data_race, waiter.thread);
(waiter.thread, waiter.mutex)
})
}
#[inline]
@ -420,15 +456,25 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
fn futex_wait(&mut self, addr: Pointer<stacked_borrows::Tag>, thread: ThreadId) {
let this = self.eval_context_mut();
let waiters = &mut this.machine.threads.sync.futexes.entry(addr.erase_tag()).or_default().waiters;
let futex = &mut this.machine.threads.sync.futexes.entry(addr.erase_tag()).or_default();
let waiters = &mut futex.waiters;
assert!(waiters.iter().all(|waiter| waiter.thread != thread), "thread is already waiting");
waiters.push_back(FutexWaiter { thread });
}
fn futex_wake(&mut self, addr: Pointer<stacked_borrows::Tag>) -> Option<ThreadId> {
let this = self.eval_context_mut();
let waiters = &mut this.machine.threads.sync.futexes.get_mut(&addr.erase_tag())?.waiters;
waiters.pop_front().map(|waiter| waiter.thread)
let current_thread = this.get_active_thread();
let futex = &mut this.machine.threads.sync.futexes.get_mut(&addr.erase_tag())?;
let data_race = &mut this.memory.extra.data_race;
// Each futex-wake happens-before the end of the futex wait
data_race.validate_lock_release(&mut futex.data_race, current_thread);
let res = futex.waiters.pop_front().map(|waiter| {
data_race.validate_lock_acquire(&futex.data_race, waiter.thread);
waiter.thread
});
res
}
fn futex_remove_waiter(&mut self, addr: Pointer<stacked_borrows::Tag>, thread: ThreadId) {

View File

@ -327,7 +327,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
}
/// Mark that the active thread tries to join the thread with `joined_thread_id`.
fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
fn join_thread(&mut self, joined_thread_id: ThreadId, data_race: &data_race::GlobalState) -> InterpResult<'tcx> {
if self.threads[joined_thread_id].join_status != ThreadJoinStatus::Joinable {
throw_ub_format!("trying to join a detached or already joined thread");
}
@ -351,6 +351,9 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
self.active_thread,
joined_thread_id
);
}else{
// The thread has already terminated - mark join happens-before
data_race.thread_joined(self.active_thread, joined_thread_id);
}
Ok(())
}
@ -425,7 +428,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
/// Wakes up threads joining on the active one and deallocates thread-local statics.
/// The `AllocId` that can now be freed is returned.
fn thread_terminated(&mut self) -> Vec<AllocId> {
fn thread_terminated(&mut self, data_race: &data_race::GlobalState) -> Vec<AllocId> {
let mut free_tls_statics = Vec::new();
{
let mut thread_local_statics = self.thread_local_alloc_ids.borrow_mut();
@ -443,6 +446,8 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
// Check if we need to unblock any threads.
for (i, thread) in self.threads.iter_enumerated_mut() {
if thread.state == ThreadState::BlockedOnJoin(self.active_thread) {
// The thread has terminated, mark happens-before edge to joining thread
data_race.thread_joined(i, self.active_thread);
trace!("unblocking {:?} because {:?} terminated", i, self.active_thread);
thread.state = ThreadState::Enabled;
}
@ -456,7 +461,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
/// used in stateless model checkers such as Loom: run the active thread as
/// long as we can and switch only when we have to (the active thread was
/// blocked, terminated, or has explicitly asked to be preempted).
fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
fn schedule(&mut self, data_race: &data_race::GlobalState) -> InterpResult<'tcx, SchedulingAction> {
// Check whether the thread has **just** terminated (`check_terminated`
// checks whether the thread has popped all its stack and if yes, sets
// the thread state to terminated).
@ -501,6 +506,7 @@ impl<'mir, 'tcx: 'mir> ThreadManager<'mir, 'tcx> {
if thread.state == ThreadState::Enabled {
if !self.yield_active_thread || id != self.active_thread {
self.active_thread = id;
data_race.thread_set_active(self.active_thread);
break;
}
}
@ -554,7 +560,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn create_thread(&mut self) -> ThreadId {
let this = self.eval_context_mut();
this.machine.threads.create_thread()
let id = this.machine.threads.create_thread();
this.memory.extra.data_race.thread_created(id);
id
}
#[inline]
@ -566,12 +574,15 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn join_thread(&mut self, joined_thread_id: ThreadId) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
this.machine.threads.join_thread(joined_thread_id)
let data_race = &*this.memory.extra.data_race;
this.machine.threads.join_thread(joined_thread_id, data_race)?;
Ok(())
}
#[inline]
fn set_active_thread(&mut self, thread_id: ThreadId) -> ThreadId {
let this = self.eval_context_mut();
this.memory.extra.data_race.thread_set_active(thread_id);
this.machine.threads.set_active_thread_id(thread_id)
}
@ -626,6 +637,9 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn set_active_thread_name(&mut self, new_thread_name: Vec<u8>) {
let this = self.eval_context_mut();
if let Ok(string) = String::from_utf8(new_thread_name.clone()) {
this.memory.extra.data_race.thread_set_name(string);
}
this.machine.threads.set_thread_name(new_thread_name);
}
@ -695,7 +709,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn schedule(&mut self) -> InterpResult<'tcx, SchedulingAction> {
let this = self.eval_context_mut();
this.machine.threads.schedule()
let data_race = &*this.memory.extra.data_race;
this.machine.threads.schedule(data_race)
}
/// Handles thread termination of the active thread: wakes up threads joining on this one,
@ -705,7 +720,8 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
#[inline]
fn thread_terminated(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
for alloc_id in this.machine.threads.thread_terminated() {
let data_race = &*this.memory.extra.data_race;
for alloc_id in this.machine.threads.thread_terminated(data_race) {
let ptr = this.memory.global_base_pointer(alloc_id.into())?;
this.memory.deallocate(ptr, None, MiriMemoryKind::Tls.into())?;
}