2019-06-29 07:15:05 -05:00
|
|
|
//! Implement thread-local storage.
|
|
|
|
|
2020-04-29 17:12:09 -05:00
|
|
|
use std::collections::btree_map::Entry as BTreeEntry;
|
|
|
|
use std::collections::hash_map::Entry as HashMapEntry;
|
2021-05-16 04:28:01 -05:00
|
|
|
use std::collections::BTreeMap;
|
2017-07-31 06:30:44 -05:00
|
|
|
|
2020-03-30 04:07:32 -05:00
|
|
|
use log::trace;
|
|
|
|
|
2020-04-26 23:49:36 -05:00
|
|
|
use rustc_data_structures::fx::FxHashMap;
|
2020-04-02 17:05:35 -05:00
|
|
|
use rustc_middle::ty;
|
2021-05-16 04:28:01 -05:00
|
|
|
use rustc_target::abi::{HasDataLayout, Size};
|
2021-03-14 09:30:37 -05:00
|
|
|
use rustc_target::spec::abi::Abi;
|
2018-09-19 18:00:59 -05:00
|
|
|
|
2020-06-27 06:19:35 -05:00
|
|
|
use crate::*;
|
2017-07-31 06:30:44 -05:00
|
|
|
|
2018-09-19 18:00:59 -05:00
|
|
|
pub type TlsKey = u128;
|
|
|
|
|
2020-03-16 18:48:44 -05:00
|
|
|
#[derive(Clone, Debug)]
|
2018-09-19 18:00:59 -05:00
|
|
|
pub struct TlsEntry<'tcx> {
|
2019-07-05 02:56:42 -05:00
|
|
|
/// The data for this key. None is used to represent NULL.
|
|
|
|
/// (We normalize this early to avoid having to do a NULL-ptr-test each time we access the data.)
|
2020-03-16 18:48:44 -05:00
|
|
|
data: BTreeMap<ThreadId, Scalar<Tag>>,
|
2019-08-13 02:29:01 -05:00
|
|
|
dtor: Option<ty::Instance<'tcx>>,
|
2018-09-19 18:00:59 -05:00
|
|
|
}
|
|
|
|
|
2020-04-26 23:49:36 -05:00
|
|
|
#[derive(Clone, Debug)]
|
|
|
|
struct RunningDtorsState {
|
2020-04-29 15:16:22 -05:00
|
|
|
/// The last TlsKey used to retrieve a TLS destructor. `None` means that we
|
|
|
|
/// have not tried to retrieve a TLS destructor yet or that we already tried
|
|
|
|
/// all keys.
|
2020-04-26 23:49:36 -05:00
|
|
|
last_dtor_key: Option<TlsKey>,
|
|
|
|
}
|
|
|
|
|
2018-10-16 11:01:50 -05:00
|
|
|
#[derive(Debug)]
|
2018-09-19 18:00:59 -05:00
|
|
|
pub struct TlsData<'tcx> {
|
|
|
|
/// The Key to use for the next thread-local allocation.
|
2019-08-13 02:29:01 -05:00
|
|
|
next_key: TlsKey,
|
2018-09-19 18:00:59 -05:00
|
|
|
|
|
|
|
/// pthreads-style thread-local storage.
|
2019-08-13 02:29:01 -05:00
|
|
|
keys: BTreeMap<TlsKey, TlsEntry<'tcx>>,
|
2020-03-27 13:39:00 -05:00
|
|
|
|
2020-04-26 17:52:01 -05:00
|
|
|
/// A single per thread destructor of the thread local storage (that's how
|
|
|
|
/// things work on macOS) with a data argument.
|
2020-04-29 15:16:22 -05:00
|
|
|
macos_thread_dtors: BTreeMap<ThreadId, (ty::Instance<'tcx>, Scalar<Tag>)>,
|
2020-03-27 13:39:00 -05:00
|
|
|
|
2020-04-26 23:49:36 -05:00
|
|
|
/// State for currently running TLS dtors. If this map contains a key for a
|
|
|
|
/// specific thread, it means that we are in the "destruct" phase, during
|
|
|
|
/// which some operations are UB.
|
|
|
|
dtors_running: FxHashMap<ThreadId, RunningDtorsState>,
|
2018-09-19 18:00:59 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
impl<'tcx> Default for TlsData<'tcx> {
|
|
|
|
fn default() -> Self {
|
|
|
|
TlsData {
|
|
|
|
next_key: 1, // start with 1 as we must not use 0 on Windows
|
|
|
|
keys: Default::default(),
|
2020-04-29 15:16:22 -05:00
|
|
|
macos_thread_dtors: Default::default(),
|
2020-04-16 21:40:02 -05:00
|
|
|
dtors_running: Default::default(),
|
2018-09-19 18:00:59 -05:00
|
|
|
}
|
|
|
|
}
|
2017-07-31 06:30:44 -05:00
|
|
|
}
|
|
|
|
|
2018-09-19 18:00:59 -05:00
|
|
|
impl<'tcx> TlsData<'tcx> {
|
2020-03-08 17:34:54 -05:00
|
|
|
/// Generate a new TLS key with the given destructor.
|
|
|
|
/// `max_size` determines the integer size the key has to fit in.
|
2021-05-16 04:28:01 -05:00
|
|
|
pub fn create_tls_key(
|
|
|
|
&mut self,
|
|
|
|
dtor: Option<ty::Instance<'tcx>>,
|
|
|
|
max_size: Size,
|
|
|
|
) -> InterpResult<'tcx, TlsKey> {
|
2018-09-19 18:00:59 -05:00
|
|
|
let new_key = self.next_key;
|
|
|
|
self.next_key += 1;
|
2021-03-08 09:57:09 -06:00
|
|
|
self.keys.try_insert(new_key, TlsEntry { data: Default::default(), dtor }).unwrap();
|
2017-07-31 06:30:44 -05:00
|
|
|
trace!("New TLS key allocated: {} with dtor {:?}", new_key, dtor);
|
2020-03-08 17:34:54 -05:00
|
|
|
|
|
|
|
if max_size.bits() < 128 && new_key >= (1u128 << max_size.bits() as u128) {
|
|
|
|
throw_unsup_format!("we ran out of TLS key space");
|
|
|
|
}
|
|
|
|
Ok(new_key)
|
2017-07-31 06:30:44 -05:00
|
|
|
}
|
|
|
|
|
2019-06-08 15:14:47 -05:00
|
|
|
pub fn delete_tls_key(&mut self, key: TlsKey) -> InterpResult<'tcx> {
|
2018-09-19 18:00:59 -05:00
|
|
|
match self.keys.remove(&key) {
|
2017-07-31 06:30:44 -05:00
|
|
|
Some(_) => {
|
|
|
|
trace!("TLS key {} removed", key);
|
|
|
|
Ok(())
|
2017-08-10 10:48:38 -05:00
|
|
|
}
|
2020-03-08 17:34:54 -05:00
|
|
|
None => throw_ub_format!("removing a non-existig TLS key: {}", key),
|
2018-07-10 10:32:38 -05:00
|
|
|
}
|
2017-07-31 06:30:44 -05:00
|
|
|
}
|
|
|
|
|
2019-07-05 02:56:42 -05:00
|
|
|
pub fn load_tls(
|
2020-03-28 03:50:24 -05:00
|
|
|
&self,
|
2019-07-05 02:56:42 -05:00
|
|
|
key: TlsKey,
|
2020-03-16 18:48:44 -05:00
|
|
|
thread_id: ThreadId,
|
2019-07-05 02:56:42 -05:00
|
|
|
cx: &impl HasDataLayout,
|
|
|
|
) -> InterpResult<'tcx, Scalar<Tag>> {
|
2018-09-19 18:00:59 -05:00
|
|
|
match self.keys.get(&key) {
|
2020-03-16 18:48:44 -05:00
|
|
|
Some(TlsEntry { data, .. }) => {
|
2020-04-26 17:52:45 -05:00
|
|
|
let value = data.get(&thread_id).copied();
|
2020-03-16 18:48:44 -05:00
|
|
|
trace!("TLS key {} for thread {:?} loaded: {:?}", key, thread_id, value);
|
|
|
|
Ok(value.unwrap_or_else(|| Scalar::null_ptr(cx).into()))
|
2017-08-10 10:48:38 -05:00
|
|
|
}
|
2020-03-08 17:34:54 -05:00
|
|
|
None => throw_ub_format!("loading from a non-existing TLS key: {}", key),
|
2018-07-10 10:32:38 -05:00
|
|
|
}
|
2017-07-31 06:30:44 -05:00
|
|
|
}
|
|
|
|
|
2020-03-16 18:48:44 -05:00
|
|
|
pub fn store_tls(
|
|
|
|
&mut self,
|
2020-04-26 17:52:45 -05:00
|
|
|
key: TlsKey,
|
|
|
|
thread_id: ThreadId,
|
2021-07-15 13:33:08 -05:00
|
|
|
new_data: Scalar<Tag>,
|
|
|
|
cx: &impl HasDataLayout,
|
2020-04-26 17:52:45 -05:00
|
|
|
) -> InterpResult<'tcx> {
|
2018-09-19 18:00:59 -05:00
|
|
|
match self.keys.get_mut(&key) {
|
2020-04-12 03:08:12 -05:00
|
|
|
Some(TlsEntry { data, .. }) => {
|
2021-07-15 13:33:08 -05:00
|
|
|
if new_data.to_machine_usize(cx)? != 0 {
|
|
|
|
trace!("TLS key {} for thread {:?} stored: {:?}", key, thread_id, new_data);
|
|
|
|
data.insert(thread_id, new_data);
|
|
|
|
} else {
|
|
|
|
trace!("TLS key {} for thread {:?} removed", key, thread_id);
|
|
|
|
data.remove(&thread_id);
|
2020-03-16 18:48:44 -05:00
|
|
|
}
|
2017-07-31 06:30:44 -05:00
|
|
|
Ok(())
|
2017-08-10 10:48:38 -05:00
|
|
|
}
|
2020-03-08 17:34:54 -05:00
|
|
|
None => throw_ub_format!("storing to a non-existing TLS key: {}", key),
|
2018-07-10 10:32:38 -05:00
|
|
|
}
|
2017-07-31 06:30:44 -05:00
|
|
|
}
|
2017-08-10 10:48:38 -05:00
|
|
|
|
2020-04-26 17:52:01 -05:00
|
|
|
/// Set the thread wide destructor of the thread local storage for the given
|
|
|
|
/// thread. This function is used to implement `_tlv_atexit` shim on MacOS.
|
2020-04-17 17:38:23 -05:00
|
|
|
///
|
2020-04-26 17:52:01 -05:00
|
|
|
/// Thread wide dtors are available only on MacOS. There is one destructor
|
|
|
|
/// per thread as can be guessed from the following comment in the
|
|
|
|
/// [`_tlv_atexit`
|
2020-04-17 17:38:23 -05:00
|
|
|
/// implementation](https://github.com/opensource-apple/dyld/blob/195030646877261f0c8c7ad8b001f52d6a26f514/src/threadLocalVariables.c#L389):
|
|
|
|
///
|
|
|
|
/// // NOTE: this does not need locks because it only operates on current thread data
|
2020-04-30 12:38:17 -05:00
|
|
|
pub fn set_macos_thread_dtor(
|
2020-04-17 17:38:23 -05:00
|
|
|
&mut self,
|
|
|
|
thread: ThreadId,
|
|
|
|
dtor: ty::Instance<'tcx>,
|
2021-05-16 04:28:01 -05:00
|
|
|
data: Scalar<Tag>,
|
2020-04-17 17:38:23 -05:00
|
|
|
) -> InterpResult<'tcx> {
|
2020-04-26 23:49:36 -05:00
|
|
|
if self.dtors_running.contains_key(&thread) {
|
2020-03-27 13:39:00 -05:00
|
|
|
// UB, according to libstd docs.
|
2021-05-16 04:28:01 -05:00
|
|
|
throw_ub_format!(
|
|
|
|
"setting thread's local storage destructor while destructors are already running"
|
|
|
|
);
|
2020-03-27 13:39:00 -05:00
|
|
|
}
|
2020-04-29 15:16:22 -05:00
|
|
|
if self.macos_thread_dtors.insert(thread, (dtor, data)).is_some() {
|
2021-05-16 04:28:01 -05:00
|
|
|
throw_unsup_format!(
|
|
|
|
"setting more than one thread local storage destructor for the same thread is not supported"
|
|
|
|
);
|
2020-03-27 13:39:00 -05:00
|
|
|
}
|
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
|
2020-03-28 03:50:24 -05:00
|
|
|
/// Returns a dtor, its argument and its index, if one is supposed to run.
|
|
|
|
/// `key` is the last dtors that was run; we return the *next* one after that.
|
2017-07-31 06:30:44 -05:00
|
|
|
///
|
|
|
|
/// An optional destructor function may be associated with each key value.
|
|
|
|
/// At thread exit, if a key value has a non-NULL destructor pointer,
|
|
|
|
/// and the thread has a non-NULL value associated with that key,
|
|
|
|
/// the value of the key is set to NULL, and then the function pointed
|
|
|
|
/// to is called with the previously associated value as its sole argument.
|
|
|
|
/// The order of destructor calls is unspecified if more than one destructor
|
|
|
|
/// exists for a thread when it exits.
|
|
|
|
///
|
|
|
|
/// If, after all the destructors have been called for all non-NULL values
|
|
|
|
/// with associated destructors, there are still some non-NULL values with
|
|
|
|
/// associated destructors, then the process is repeated.
|
|
|
|
/// If, after at least {PTHREAD_DESTRUCTOR_ITERATIONS} iterations of destructor
|
|
|
|
/// calls for outstanding non-NULL values, there are still some non-NULL values
|
|
|
|
/// with associated destructors, implementations may stop calling destructors,
|
|
|
|
/// or they may continue calling destructors until no non-NULL values with
|
|
|
|
/// associated destructors exist, even though this might result in an infinite loop.
|
2017-08-10 10:48:38 -05:00
|
|
|
fn fetch_tls_dtor(
|
|
|
|
&mut self,
|
|
|
|
key: Option<TlsKey>,
|
2020-03-16 18:48:44 -05:00
|
|
|
thread_id: ThreadId,
|
2020-04-16 21:40:02 -05:00
|
|
|
) -> Option<(ty::Instance<'tcx>, Scalar<Tag>, TlsKey)> {
|
2021-03-17 17:34:44 -05:00
|
|
|
use std::ops::Bound::*;
|
2018-05-26 10:07:34 -05:00
|
|
|
|
2018-09-19 18:00:59 -05:00
|
|
|
let thread_local = &mut self.keys;
|
2017-07-31 06:30:44 -05:00
|
|
|
let start = match key {
|
|
|
|
Some(key) => Excluded(key),
|
|
|
|
None => Unbounded,
|
|
|
|
};
|
2021-05-16 04:28:01 -05:00
|
|
|
for (&key, TlsEntry { data, dtor }) in thread_local.range_mut((start, Unbounded)) {
|
2020-03-16 18:48:44 -05:00
|
|
|
match data.entry(thread_id) {
|
2020-04-29 17:12:09 -05:00
|
|
|
BTreeEntry::Occupied(entry) => {
|
2020-03-16 18:48:44 -05:00
|
|
|
if let Some(dtor) = dtor {
|
2020-04-29 15:16:22 -05:00
|
|
|
// Set TLS data to NULL, and call dtor with old value.
|
2020-04-26 22:50:58 -05:00
|
|
|
let data_scalar = entry.remove();
|
2020-04-16 21:40:02 -05:00
|
|
|
let ret = Some((*dtor, data_scalar, key));
|
2020-03-16 18:48:44 -05:00
|
|
|
return ret;
|
|
|
|
}
|
2017-07-31 06:30:44 -05:00
|
|
|
}
|
2020-04-29 17:12:09 -05:00
|
|
|
BTreeEntry::Vacant(_) => {}
|
2017-07-31 06:30:44 -05:00
|
|
|
}
|
|
|
|
}
|
2018-08-07 08:22:11 -05:00
|
|
|
None
|
2017-07-31 06:30:44 -05:00
|
|
|
}
|
2020-04-26 23:49:36 -05:00
|
|
|
|
|
|
|
/// Set that dtors are running for `thread`. It is guaranteed not to change
|
|
|
|
/// the existing values stored in `dtors_running` for this thread. Returns
|
|
|
|
/// `true` if dtors for `thread` are already running.
|
|
|
|
fn set_dtors_running_for_thread(&mut self, thread: ThreadId) -> bool {
|
2020-04-29 17:12:09 -05:00
|
|
|
match self.dtors_running.entry(thread) {
|
|
|
|
HashMapEntry::Occupied(_) => true,
|
|
|
|
HashMapEntry::Vacant(entry) => {
|
|
|
|
// We cannot just do `self.dtors_running.insert` because that
|
|
|
|
// would overwrite `last_dtor_key` with `None`.
|
|
|
|
entry.insert(RunningDtorsState { last_dtor_key: None });
|
|
|
|
false
|
|
|
|
}
|
2020-04-26 23:49:36 -05:00
|
|
|
}
|
|
|
|
}
|
2020-04-27 16:00:39 -05:00
|
|
|
|
|
|
|
/// Delete all TLS entries for the given thread. This function should be
|
|
|
|
/// called after all TLS destructors have already finished.
|
|
|
|
fn delete_all_thread_tls(&mut self, thread_id: ThreadId) {
|
|
|
|
for TlsEntry { data, .. } in self.keys.values_mut() {
|
|
|
|
data.remove(&thread_id);
|
|
|
|
}
|
|
|
|
}
|
2017-07-31 06:30:44 -05:00
|
|
|
}
|
2017-08-01 04:11:57 -05:00
|
|
|
|
2020-04-24 17:16:24 -05:00
|
|
|
impl<'mir, 'tcx: 'mir> EvalContextPrivExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
|
|
|
|
trait EvalContextPrivExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
|
|
|
|
/// Schedule TLS destructors for the main thread on Windows. The
|
|
|
|
/// implementation assumes that we do not support concurrency on Windows
|
|
|
|
/// yet.
|
|
|
|
fn schedule_windows_tls_dtors(&mut self) -> InterpResult<'tcx> {
|
2018-12-11 07:16:58 -06:00
|
|
|
let this = self.eval_context_mut();
|
2020-05-30 15:29:27 -05:00
|
|
|
let active_thread = this.get_active_thread();
|
2020-06-27 07:35:58 -05:00
|
|
|
assert_eq!(this.get_total_thread_count(), 1, "concurrency on Windows is not supported");
|
2020-04-16 21:40:02 -05:00
|
|
|
// Windows has a special magic linker section that is run on certain events.
|
|
|
|
// Instead of searching for that section and supporting arbitrary hooks in there
|
|
|
|
// (that would be basically https://github.com/rust-lang/miri/issues/450),
|
|
|
|
// we specifically look up the static in libstd that we know is placed
|
|
|
|
// in that section.
|
2021-05-16 04:28:01 -05:00
|
|
|
let thread_callback = this.eval_path_scalar(&[
|
|
|
|
"std",
|
|
|
|
"sys",
|
|
|
|
"windows",
|
|
|
|
"thread_local_key",
|
|
|
|
"p_thread_callback",
|
|
|
|
])?;
|
2021-07-15 13:33:08 -05:00
|
|
|
let thread_callback =
|
2022-04-03 15:12:52 -05:00
|
|
|
this.get_ptr_fn(this.scalar_to_ptr(thread_callback))?.as_instance()?;
|
2020-04-16 21:40:02 -05:00
|
|
|
|
|
|
|
// The signature of this function is `unsafe extern "system" fn(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID)`.
|
2020-04-26 22:51:21 -05:00
|
|
|
let reason = this.eval_path_scalar(&["std", "sys", "windows", "c", "DLL_THREAD_DETACH"])?;
|
2021-07-15 13:33:08 -05:00
|
|
|
let ret_place = MPlaceTy::dangling(this.machine.layouts.unit).into();
|
2020-04-16 21:40:02 -05:00
|
|
|
this.call_function(
|
|
|
|
thread_callback,
|
2021-03-14 09:30:37 -05:00
|
|
|
Abi::System { unwind: false },
|
2020-04-16 21:40:02 -05:00
|
|
|
&[Scalar::null_ptr(this).into(), reason.into(), Scalar::null_ptr(this).into()],
|
2021-02-19 18:00:00 -06:00
|
|
|
Some(&ret_place),
|
2022-01-04 04:14:50 -06:00
|
|
|
StackPopCleanup::Root { cleanup: true },
|
2020-04-16 21:40:02 -05:00
|
|
|
)?;
|
|
|
|
|
2020-05-30 15:29:27 -05:00
|
|
|
this.enable_thread(active_thread);
|
2020-04-16 21:40:02 -05:00
|
|
|
Ok(())
|
|
|
|
}
|
2020-03-27 13:39:00 -05:00
|
|
|
|
2020-04-26 17:52:01 -05:00
|
|
|
/// Schedule the MacOS thread destructor of the thread local storage to be
|
2020-04-27 17:21:01 -05:00
|
|
|
/// executed. Returns `true` if scheduled.
|
2020-04-16 21:40:02 -05:00
|
|
|
///
|
2020-04-24 17:16:24 -05:00
|
|
|
/// Note: It is safe to call this function also on other Unixes.
|
2020-04-27 17:21:01 -05:00
|
|
|
fn schedule_macos_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
|
2020-04-16 21:40:02 -05:00
|
|
|
let this = self.eval_context_mut();
|
2020-05-30 15:29:27 -05:00
|
|
|
let thread_id = this.get_active_thread();
|
2020-04-29 15:16:22 -05:00
|
|
|
if let Some((instance, data)) = this.machine.tls.macos_thread_dtors.remove(&thread_id) {
|
2020-04-26 17:52:01 -05:00
|
|
|
trace!("Running macos dtor {:?} on {:?} at {:?}", instance, data, thread_id);
|
2020-04-16 21:40:02 -05:00
|
|
|
|
2021-07-15 13:33:08 -05:00
|
|
|
let ret_place = MPlaceTy::dangling(this.machine.layouts.unit).into();
|
2020-03-28 04:07:23 -05:00
|
|
|
this.call_function(
|
2020-04-16 21:40:02 -05:00
|
|
|
instance,
|
2021-03-14 09:30:37 -05:00
|
|
|
Abi::C { unwind: false },
|
2020-04-16 21:40:02 -05:00
|
|
|
&[data.into()],
|
2021-02-19 18:00:00 -06:00
|
|
|
Some(&ret_place),
|
2022-01-04 04:14:50 -06:00
|
|
|
StackPopCleanup::Root { cleanup: true },
|
2020-03-28 04:07:23 -05:00
|
|
|
)?;
|
|
|
|
|
2020-04-24 17:16:24 -05:00
|
|
|
// Enable the thread so that it steps through the destructor which
|
|
|
|
// we just scheduled. Since we deleted the destructor, it is
|
|
|
|
// guaranteed that we will schedule it again. The `dtors_running`
|
|
|
|
// flag will prevent the code from adding the destructor again.
|
2020-05-30 15:29:27 -05:00
|
|
|
this.enable_thread(thread_id);
|
2020-04-27 17:21:01 -05:00
|
|
|
Ok(true)
|
|
|
|
} else {
|
|
|
|
Ok(false)
|
2020-03-28 04:07:23 -05:00
|
|
|
}
|
2020-04-24 17:16:24 -05:00
|
|
|
}
|
|
|
|
|
2020-04-27 16:00:39 -05:00
|
|
|
/// Schedule a pthread TLS destructor. Returns `true` if found
|
|
|
|
/// a destructor to schedule, and `false` otherwise.
|
2020-04-29 15:16:22 -05:00
|
|
|
fn schedule_next_pthread_tls_dtor(&mut self) -> InterpResult<'tcx, bool> {
|
2020-04-24 17:16:24 -05:00
|
|
|
let this = self.eval_context_mut();
|
2020-05-30 15:29:27 -05:00
|
|
|
let active_thread = this.get_active_thread();
|
2020-03-28 04:07:23 -05:00
|
|
|
|
2020-05-30 15:29:27 -05:00
|
|
|
assert!(this.has_terminated(active_thread), "running TLS dtors for non-terminated thread");
|
2020-04-24 17:16:24 -05:00
|
|
|
// Fetch next dtor after `key`.
|
2020-04-26 23:49:36 -05:00
|
|
|
let last_key = this.machine.tls.dtors_running[&active_thread].last_dtor_key.clone();
|
2020-04-24 17:16:24 -05:00
|
|
|
let dtor = match this.machine.tls.fetch_tls_dtor(last_key, active_thread) {
|
|
|
|
dtor @ Some(_) => dtor,
|
|
|
|
// We ran each dtor once, start over from the beginning.
|
2021-05-16 04:28:01 -05:00
|
|
|
None => this.machine.tls.fetch_tls_dtor(None, active_thread),
|
2020-04-24 17:16:24 -05:00
|
|
|
};
|
|
|
|
if let Some((instance, ptr, key)) = dtor {
|
2021-05-16 04:28:01 -05:00
|
|
|
this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key =
|
|
|
|
Some(key);
|
2020-04-24 17:16:24 -05:00
|
|
|
trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, active_thread);
|
2021-07-15 13:33:08 -05:00
|
|
|
assert!(
|
|
|
|
!ptr.to_machine_usize(this).unwrap() != 0,
|
|
|
|
"data can't be NULL when dtor is called!"
|
|
|
|
);
|
2020-03-27 13:39:00 -05:00
|
|
|
|
2021-07-15 13:33:08 -05:00
|
|
|
let ret_place = MPlaceTy::dangling(this.machine.layouts.unit).into();
|
2020-03-27 13:39:00 -05:00
|
|
|
this.call_function(
|
|
|
|
instance,
|
2021-03-14 09:30:37 -05:00
|
|
|
Abi::C { unwind: false },
|
2020-04-16 21:40:02 -05:00
|
|
|
&[ptr.into()],
|
2021-02-19 18:00:00 -06:00
|
|
|
Some(&ret_place),
|
2022-01-04 04:14:50 -06:00
|
|
|
StackPopCleanup::Root { cleanup: true },
|
2020-03-27 13:39:00 -05:00
|
|
|
)?;
|
|
|
|
|
2020-05-30 15:29:27 -05:00
|
|
|
this.enable_thread(active_thread);
|
2020-04-27 16:00:39 -05:00
|
|
|
return Ok(true);
|
2020-04-24 17:16:24 -05:00
|
|
|
}
|
2020-04-26 23:49:36 -05:00
|
|
|
this.machine.tls.dtors_running.get_mut(&active_thread).unwrap().last_dtor_key = None;
|
2020-04-24 17:16:24 -05:00
|
|
|
|
2020-04-27 16:00:39 -05:00
|
|
|
Ok(false)
|
2020-04-24 17:16:24 -05:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
|
|
|
|
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
|
|
|
|
/// Schedule an active thread's TLS destructor to run on the active thread.
|
|
|
|
/// Note that this function does not run the destructors itself, it just
|
2020-04-26 22:49:53 -05:00
|
|
|
/// schedules them one by one each time it is called and reenables the
|
|
|
|
/// thread so that it can be executed normally by the main execution loop.
|
2020-04-24 17:16:24 -05:00
|
|
|
///
|
2020-04-29 15:16:22 -05:00
|
|
|
/// Note: we consistently run TLS destructors for all threads, including the
|
|
|
|
/// main thread. However, it is not clear that we should run the TLS
|
|
|
|
/// destructors for the main thread. See issue:
|
|
|
|
/// https://github.com/rust-lang/rust/issues/28129.
|
2020-04-26 22:49:53 -05:00
|
|
|
fn schedule_next_tls_dtor_for_active_thread(&mut self) -> InterpResult<'tcx> {
|
2020-04-24 17:16:24 -05:00
|
|
|
let this = self.eval_context_mut();
|
2020-05-30 15:29:27 -05:00
|
|
|
let active_thread = this.get_active_thread();
|
2021-07-15 13:33:08 -05:00
|
|
|
trace!("schedule_next_tls_dtor_for_active_thread on thread {:?}", active_thread);
|
2020-04-24 17:16:24 -05:00
|
|
|
|
2020-04-29 17:20:26 -05:00
|
|
|
if !this.machine.tls.set_dtors_running_for_thread(active_thread) {
|
2020-04-29 15:16:22 -05:00
|
|
|
// This is the first time we got asked to schedule a destructor. The
|
|
|
|
// Windows schedule destructor function must be called exactly once,
|
|
|
|
// this is why it is in this block.
|
2020-11-11 03:29:10 -06:00
|
|
|
if this.tcx.sess.target.os == "windows" {
|
2020-04-29 15:16:22 -05:00
|
|
|
// On Windows, we signal that the thread quit by starting the
|
|
|
|
// relevant function, reenabling the thread, and going back to
|
|
|
|
// the scheduler.
|
2020-04-24 17:16:24 -05:00
|
|
|
this.schedule_windows_tls_dtors()?;
|
2021-05-16 04:28:01 -05:00
|
|
|
return Ok(());
|
2020-04-27 17:21:01 -05:00
|
|
|
}
|
2017-08-01 04:11:57 -05:00
|
|
|
}
|
2020-07-27 06:10:11 -05:00
|
|
|
// The remaining dtors make some progress each time around the scheduler loop,
|
|
|
|
// until they return `false` to indicate that they are done.
|
|
|
|
|
2020-04-29 15:16:22 -05:00
|
|
|
// The macOS thread wide destructor runs "before any TLS slots get
|
|
|
|
// freed", so do that first.
|
|
|
|
if this.schedule_macos_tls_dtor()? {
|
|
|
|
// We have scheduled a MacOS dtor to run on the thread. Execute it
|
|
|
|
// to completion and come back here. Scheduling a destructor
|
|
|
|
// destroys it, so we will not enter this branch again.
|
2021-05-16 04:28:01 -05:00
|
|
|
return Ok(());
|
2020-04-29 15:16:22 -05:00
|
|
|
}
|
|
|
|
if this.schedule_next_pthread_tls_dtor()? {
|
|
|
|
// We have scheduled a pthread destructor and removed it from the
|
|
|
|
// destructors list. Run it to completion and come back here.
|
2021-05-16 04:28:01 -05:00
|
|
|
return Ok(());
|
2020-04-29 15:16:22 -05:00
|
|
|
}
|
|
|
|
|
|
|
|
// All dtors done!
|
|
|
|
this.machine.tls.delete_all_thread_tls(active_thread);
|
2020-07-27 05:53:39 -05:00
|
|
|
this.thread_terminated()?;
|
2020-04-16 21:40:02 -05:00
|
|
|
|
2017-08-01 04:11:57 -05:00
|
|
|
Ok(())
|
|
|
|
}
|
|
|
|
}
|