Use proper atomic rmw for {mutex, rwlock, cond, srwlock}_get_or_create_id
This commit is contained in:
parent
f84a976a77
commit
1a7f6d504a
@ -441,6 +441,23 @@ impl MemoryCellClocks {
|
||||
/// Evaluation context extensions.
|
||||
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for MiriEvalContext<'mir, 'tcx> {}
|
||||
pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||
/// Calculates the MPlaceTy given the offset and layout of an access on an operand
|
||||
fn offset_and_layout_to_place(
|
||||
&self,
|
||||
op: &OpTy<'tcx, Tag>,
|
||||
offset: u64,
|
||||
layout: TyAndLayout<'tcx>,
|
||||
) -> InterpResult<'tcx, MPlaceTy<'tcx, Tag>> {
|
||||
let this = self.eval_context_ref();
|
||||
let op_place = this.deref_operand(op)?;
|
||||
let offset = Size::from_bytes(offset);
|
||||
|
||||
// Ensure that the access is within bounds.
|
||||
assert!(op_place.layout.size >= offset + layout.size);
|
||||
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
|
||||
Ok(value_place)
|
||||
}
|
||||
|
||||
/// Atomic variant of read_scalar_at_offset.
|
||||
fn read_scalar_at_offset_atomic(
|
||||
&self,
|
||||
@ -450,12 +467,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||
atomic: AtomicReadOp,
|
||||
) -> InterpResult<'tcx, ScalarMaybeUninit<Tag>> {
|
||||
let this = self.eval_context_ref();
|
||||
let op_place = this.deref_operand(op)?;
|
||||
let offset = Size::from_bytes(offset);
|
||||
|
||||
// Ensure that the following read at an offset is within bounds.
|
||||
assert!(op_place.layout.size >= offset + layout.size);
|
||||
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
|
||||
let value_place = this.offset_and_layout_to_place(op, offset, layout)?;
|
||||
this.read_scalar_atomic(&value_place, atomic)
|
||||
}
|
||||
|
||||
@ -469,12 +481,7 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: MiriEvalContextExt<'mir, 'tcx> {
|
||||
atomic: AtomicWriteOp,
|
||||
) -> InterpResult<'tcx> {
|
||||
let this = self.eval_context_mut();
|
||||
let op_place = this.deref_operand(op)?;
|
||||
let offset = Size::from_bytes(offset);
|
||||
|
||||
// Ensure that the following read at an offset is within bounds.
|
||||
assert!(op_place.layout.size >= offset + layout.size);
|
||||
let value_place = op_place.offset(offset, MemPlaceMeta::None, layout, this)?;
|
||||
let value_place = this.offset_and_layout_to_place(op, offset, layout)?;
|
||||
this.write_scalar_atomic(value.into(), &value_place, atomic)
|
||||
}
|
||||
|
||||
|
@ -112,15 +112,23 @@ fn mutex_get_or_create_id<'mir, 'tcx: 'mir>(
|
||||
ecx: &mut MiriEvalContext<'mir, 'tcx>,
|
||||
mutex_op: &OpTy<'tcx, Tag>,
|
||||
) -> InterpResult<'tcx, MutexId> {
|
||||
let id = mutex_get_id(ecx, mutex_op)?.to_u32()?;
|
||||
if id == 0 {
|
||||
// 0 is a default value and also not a valid mutex id. Need to allocate
|
||||
// a new mutex.
|
||||
let value_place = ecx.offset_and_layout_to_place(mutex_op, 4, ecx.machine.layouts.u32)?;
|
||||
let (old, success) = ecx
|
||||
.atomic_compare_exchange_scalar(
|
||||
&value_place,
|
||||
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
|
||||
ecx.mutex_next_id().to_u32_scalar().into(),
|
||||
AtomicRwOp::Relaxed,
|
||||
AtomicReadOp::Relaxed,
|
||||
false,
|
||||
)?
|
||||
.to_scalar_pair()?;
|
||||
|
||||
if success.to_bool().expect("compare_exchange's second return value is a bool") {
|
||||
let id = ecx.mutex_create();
|
||||
mutex_set_id(ecx, mutex_op, id.to_u32_scalar())?;
|
||||
Ok(id)
|
||||
} else {
|
||||
Ok(MutexId::from_u32(id))
|
||||
Ok(MutexId::from_u32(old.to_u32().expect("layout is u32")))
|
||||
}
|
||||
}
|
||||
|
||||
@ -156,15 +164,23 @@ fn rwlock_get_or_create_id<'mir, 'tcx: 'mir>(
|
||||
ecx: &mut MiriEvalContext<'mir, 'tcx>,
|
||||
rwlock_op: &OpTy<'tcx, Tag>,
|
||||
) -> InterpResult<'tcx, RwLockId> {
|
||||
let id = rwlock_get_id(ecx, rwlock_op)?.to_u32()?;
|
||||
if id == 0 {
|
||||
// 0 is a default value and also not a valid rwlock id. Need to allocate
|
||||
// a new read-write lock.
|
||||
let value_place = ecx.offset_and_layout_to_place(rwlock_op, 4, ecx.machine.layouts.u32)?;
|
||||
let (old, success) = ecx
|
||||
.atomic_compare_exchange_scalar(
|
||||
&value_place,
|
||||
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
|
||||
ecx.rwlock_next_id().to_u32_scalar().into(),
|
||||
AtomicRwOp::Relaxed,
|
||||
AtomicReadOp::Relaxed,
|
||||
false,
|
||||
)?
|
||||
.to_scalar_pair()?;
|
||||
|
||||
if success.to_bool().expect("compare_exchange's second return value is a bool") {
|
||||
let id = ecx.rwlock_create();
|
||||
rwlock_set_id(ecx, rwlock_op, id.to_u32_scalar())?;
|
||||
Ok(id)
|
||||
} else {
|
||||
Ok(RwLockId::from_u32(id))
|
||||
Ok(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
|
||||
}
|
||||
}
|
||||
|
||||
@ -228,15 +244,24 @@ fn cond_get_or_create_id<'mir, 'tcx: 'mir>(
|
||||
ecx: &mut MiriEvalContext<'mir, 'tcx>,
|
||||
cond_op: &OpTy<'tcx, Tag>,
|
||||
) -> InterpResult<'tcx, CondvarId> {
|
||||
let id = cond_get_id(ecx, cond_op)?.to_u32()?;
|
||||
if id == 0 {
|
||||
// 0 is a default value and also not a valid conditional variable id.
|
||||
// Need to allocate a new id.
|
||||
let value_place = ecx.offset_and_layout_to_place(cond_op, 4, ecx.machine.layouts.u32)?;
|
||||
|
||||
let (old, success) = ecx
|
||||
.atomic_compare_exchange_scalar(
|
||||
&value_place,
|
||||
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
|
||||
ecx.condvar_next_id().to_u32_scalar().into(),
|
||||
AtomicRwOp::Relaxed,
|
||||
AtomicReadOp::Relaxed,
|
||||
false,
|
||||
)?
|
||||
.to_scalar_pair()?;
|
||||
|
||||
if success.to_bool().expect("compare_exchange's second return value is a bool") {
|
||||
let id = ecx.condvar_create();
|
||||
cond_set_id(ecx, cond_op, id.to_u32_scalar())?;
|
||||
Ok(id)
|
||||
} else {
|
||||
Ok(CondvarId::from_u32(id))
|
||||
Ok(CondvarId::from_u32(old.to_u32().expect("layout is u32")))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7,15 +7,24 @@ fn srwlock_get_or_create_id<'mir, 'tcx: 'mir>(
|
||||
ecx: &mut MiriEvalContext<'mir, 'tcx>,
|
||||
lock_op: &OpTy<'tcx, Tag>,
|
||||
) -> InterpResult<'tcx, RwLockId> {
|
||||
let id = ecx.read_scalar_at_offset(lock_op, 0, ecx.machine.layouts.u32)?.to_u32()?;
|
||||
if id == 0 {
|
||||
// 0 is a default value and also not a valid rwlock id. Need to allocate
|
||||
// a new rwlock.
|
||||
let value_place = ecx.offset_and_layout_to_place(lock_op, 0, ecx.machine.layouts.u32)?;
|
||||
|
||||
let (old, success) = ecx
|
||||
.atomic_compare_exchange_scalar(
|
||||
&value_place,
|
||||
&ImmTy::from_uint(0u32, ecx.machine.layouts.u32),
|
||||
ecx.rwlock_next_id().to_u32_scalar().into(),
|
||||
AtomicRwOp::AcqRel,
|
||||
AtomicReadOp::Acquire,
|
||||
false,
|
||||
)?
|
||||
.to_scalar_pair()?;
|
||||
|
||||
if success.to_bool().expect("compare_exchange's second return value is a bool") {
|
||||
let id = ecx.rwlock_create();
|
||||
ecx.write_scalar_at_offset(lock_op, 0, id.to_u32_scalar(), ecx.machine.layouts.u32)?;
|
||||
Ok(id)
|
||||
} else {
|
||||
Ok(RwLockId::from_u32(id))
|
||||
Ok(RwLockId::from_u32(old.to_u32().expect("layout is u32")))
|
||||
}
|
||||
}
|
||||
|
||||
|
21
src/sync.rs
21
src/sync.rs
@ -208,6 +208,13 @@ trait EvalContextExtPriv<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
// situations.
|
||||
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
|
||||
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
|
||||
#[inline]
|
||||
/// Peek the id of the next mutex
|
||||
fn mutex_next_id(&self) -> MutexId {
|
||||
let this = self.eval_context_ref();
|
||||
this.machine.threads.sync.mutexes.next_index()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Create state for a new mutex.
|
||||
fn mutex_create(&mut self) -> MutexId {
|
||||
@ -290,6 +297,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
this.block_thread(thread);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Peek the id of the next read write lock
|
||||
fn rwlock_next_id(&self) -> RwLockId {
|
||||
let this = self.eval_context_ref();
|
||||
this.machine.threads.sync.rwlocks.next_index()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Create state for a new read write lock.
|
||||
fn rwlock_create(&mut self) -> RwLockId {
|
||||
@ -438,6 +452,13 @@ pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx
|
||||
this.block_thread(writer);
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Peek the id of the next Condvar
|
||||
fn condvar_next_id(&self) -> CondvarId {
|
||||
let this = self.eval_context_ref();
|
||||
this.machine.threads.sync.condvars.next_index()
|
||||
}
|
||||
|
||||
#[inline]
|
||||
/// Create state for a new conditional variable.
|
||||
fn condvar_create(&mut self) -> CondvarId {
|
||||
|
Loading…
x
Reference in New Issue
Block a user