rust/src/shims/tls.rs

257 lines
10 KiB
Rust
Raw Normal View History

2019-06-29 07:15:05 -05:00
//! Implement thread-local storage.
use std::collections::BTreeMap;
use std::collections::btree_map::Entry;
2017-07-31 06:30:44 -05:00
use log::trace;
2020-04-02 17:05:35 -05:00
use rustc_middle::ty;
2020-04-18 10:53:54 -05:00
use rustc_target::abi::{Size, HasDataLayout};
use crate::{HelpersEvalContextExt, ThreadsEvalContextExt, InterpResult, MPlaceTy, Scalar, StackPopCleanup, Tag};
2020-04-06 16:41:05 -05:00
use crate::machine::{ThreadId, ThreadState};
2017-07-31 06:30:44 -05:00
pub type TlsKey = u128;
#[derive(Clone, Debug)]
pub struct TlsEntry<'tcx> {
/// The data for this key. None is used to represent NULL.
/// (We normalize this early to avoid having to do a NULL-ptr-test each time we access the data.)
/// Will eventually become a map from thread IDs to `Scalar`s, if we ever support more than one thread.
data: BTreeMap<ThreadId, Scalar<Tag>>,
2019-08-13 02:29:01 -05:00
dtor: Option<ty::Instance<'tcx>>,
}
2018-10-16 11:01:50 -05:00
#[derive(Debug)]
pub struct TlsData<'tcx> {
/// The Key to use for the next thread-local allocation.
2019-08-13 02:29:01 -05:00
next_key: TlsKey,
/// pthreads-style thread-local storage.
2019-08-13 02:29:01 -05:00
keys: BTreeMap<TlsKey, TlsEntry<'tcx>>,
2020-03-27 13:39:00 -05:00
/// A single global dtor (that's how things work on macOS) with a data argument.
global_dtor: Option<(ty::Instance<'tcx>, Scalar<Tag>)>,
/// Whether we are in the "destruct" phase, during which some operations are UB.
dtors_running: bool,
}
impl<'tcx> Default for TlsData<'tcx> {
fn default() -> Self {
TlsData {
next_key: 1, // start with 1 as we must not use 0 on Windows
keys: Default::default(),
2020-03-27 13:39:00 -05:00
global_dtor: None,
dtors_running: false,
}
}
2017-07-31 06:30:44 -05:00
}
impl<'tcx> TlsData<'tcx> {
2020-03-08 17:34:54 -05:00
/// Generate a new TLS key with the given destructor.
/// `max_size` determines the integer size the key has to fit in.
pub fn create_tls_key(&mut self, dtor: Option<ty::Instance<'tcx>>, max_size: Size) -> InterpResult<'tcx, TlsKey> {
let new_key = self.next_key;
self.next_key += 1;
self.keys.insert(new_key, TlsEntry { data: Default::default(), dtor }).unwrap_none();
2017-07-31 06:30:44 -05:00
trace!("New TLS key allocated: {} with dtor {:?}", new_key, dtor);
2020-03-08 17:34:54 -05:00
if max_size.bits() < 128 && new_key >= (1u128 << max_size.bits() as u128) {
throw_unsup_format!("we ran out of TLS key space");
}
Ok(new_key)
2017-07-31 06:30:44 -05:00
}
2019-06-08 15:14:47 -05:00
pub fn delete_tls_key(&mut self, key: TlsKey) -> InterpResult<'tcx> {
match self.keys.remove(&key) {
2017-07-31 06:30:44 -05:00
Some(_) => {
trace!("TLS key {} removed", key);
Ok(())
}
2020-03-08 17:34:54 -05:00
None => throw_ub_format!("removing a non-existig TLS key: {}", key),
2018-07-10 10:32:38 -05:00
}
2017-07-31 06:30:44 -05:00
}
pub fn load_tls(
2020-03-28 03:50:24 -05:00
&self,
key: TlsKey,
thread_id: ThreadId,
cx: &impl HasDataLayout,
) -> InterpResult<'tcx, Scalar<Tag>> {
match self.keys.get(&key) {
Some(TlsEntry { data, .. }) => {
let value = data.get(&thread_id).cloned();
trace!("TLS key {} for thread {:?} loaded: {:?}", key, thread_id, value);
Ok(value.unwrap_or_else(|| Scalar::null_ptr(cx).into()))
}
2020-03-08 17:34:54 -05:00
None => throw_ub_format!("loading from a non-existing TLS key: {}", key),
2018-07-10 10:32:38 -05:00
}
2017-07-31 06:30:44 -05:00
}
pub fn store_tls(
&mut self,
key: TlsKey, thread_id: ThreadId, new_data: Option<Scalar<Tag>>) -> InterpResult<'tcx> {
match self.keys.get_mut(&key) {
2020-04-12 03:08:12 -05:00
Some(TlsEntry { data, .. }) => {
match new_data {
Some(ptr) => {
trace!("TLS key {} for thread {:?} stored: {:?}", key, thread_id, ptr);
data.insert(thread_id, ptr);
}
None => {
trace!("TLS key {} for thread {:?} removed", key, thread_id);
data.remove(&thread_id);
}
}
2017-07-31 06:30:44 -05:00
Ok(())
}
2020-03-08 17:34:54 -05:00
None => throw_ub_format!("storing to a non-existing TLS key: {}", key),
2018-07-10 10:32:38 -05:00
}
2017-07-31 06:30:44 -05:00
}
2020-03-27 13:39:00 -05:00
pub fn set_global_dtor(&mut self, dtor: ty::Instance<'tcx>, data: Scalar<Tag>) -> InterpResult<'tcx> {
if self.dtors_running {
// UB, according to libstd docs.
throw_ub_format!("setting global destructor while destructors are already running");
}
if self.global_dtor.is_some() {
throw_unsup_format!("setting more than one global destructor is not supported");
}
self.global_dtor = Some((dtor, data));
Ok(())
}
2020-03-28 03:50:24 -05:00
/// Returns a dtor, its argument and its index, if one is supposed to run.
/// `key` is the last dtors that was run; we return the *next* one after that.
2017-07-31 06:30:44 -05:00
///
/// An optional destructor function may be associated with each key value.
/// At thread exit, if a key value has a non-NULL destructor pointer,
/// and the thread has a non-NULL value associated with that key,
/// the value of the key is set to NULL, and then the function pointed
/// to is called with the previously associated value as its sole argument.
/// The order of destructor calls is unspecified if more than one destructor
/// exists for a thread when it exits.
///
/// If, after all the destructors have been called for all non-NULL values
/// with associated destructors, there are still some non-NULL values with
/// associated destructors, then the process is repeated.
/// If, after at least {PTHREAD_DESTRUCTOR_ITERATIONS} iterations of destructor
/// calls for outstanding non-NULL values, there are still some non-NULL values
/// with associated destructors, implementations may stop calling destructors,
/// or they may continue calling destructors until no non-NULL values with
/// associated destructors exist, even though this might result in an infinite loop.
fn fetch_tls_dtor(
&mut self,
key: Option<TlsKey>,
thread_id: ThreadId,
) -> Option<(ty::Instance<'tcx>, ThreadId, Scalar<Tag>, TlsKey)> {
2017-07-31 06:30:44 -05:00
use std::collections::Bound::*;
2018-05-26 10:07:34 -05:00
let thread_local = &mut self.keys;
2017-07-31 06:30:44 -05:00
let start = match key {
Some(key) => Excluded(key),
None => Unbounded,
};
2020-04-12 03:08:12 -05:00
for (&key, TlsEntry { data, dtor }) in
2018-05-26 10:07:34 -05:00
thread_local.range_mut((start, Unbounded))
{
match data.entry(thread_id) {
Entry::Occupied(entry) => {
let (thread_id, data_scalar) = entry.remove_entry();
if let Some(dtor) = dtor {
let ret = Some((*dtor, thread_id, data_scalar, key));
return ret;
}
2017-07-31 06:30:44 -05:00
}
Entry::Vacant(_) => {}
2017-07-31 06:30:44 -05:00
}
}
2018-08-07 08:22:11 -05:00
None
2017-07-31 06:30:44 -05:00
}
}
2017-08-01 04:11:57 -05:00
impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
2019-06-13 01:52:04 -05:00
pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
2020-04-06 16:41:05 -05:00
/// Run TLS destructors for all threads.
2019-06-08 15:14:47 -05:00
fn run_tls_dtors(&mut self) -> InterpResult<'tcx> {
let this = self.eval_context_mut();
2020-03-27 13:39:00 -05:00
assert!(!this.machine.tls.dtors_running, "running TLS dtors twice");
this.machine.tls.dtors_running = true;
2020-03-28 04:07:23 -05:00
if this.tcx.sess.target.target.target_os == "windows" {
// Windows has a special magic linker section that is run on certain events.
// Instead of searching for that section and supporting arbitrary hooks in there
// (that would be basically https://github.com/rust-lang/miri/issues/450),
// we specifically look up the static in libstd that we know is placed
// in that section.
let thread_callback = this.eval_path_scalar(&["std", "sys", "windows", "thread_local", "p_thread_callback"])?;
let thread_callback = this.memory.get_fn(thread_callback.not_undef()?)?.as_instance()?;
// The signature of this function is `unsafe extern "system" fn(h: c::LPVOID, dwReason: c::DWORD, pv: c::LPVOID)`.
let reason = this.eval_path_scalar(&["std", "sys", "windows", "c", "DLL_PROCESS_DETACH"])?;
2020-04-18 10:53:54 -05:00
let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into();
2020-03-28 04:07:23 -05:00
this.call_function(
thread_callback,
2020-03-29 03:01:31 -05:00
&[Scalar::null_ptr(this).into(), reason.into(), Scalar::null_ptr(this).into()],
2020-03-28 04:07:23 -05:00
Some(ret_place),
StackPopCleanup::None { cleanup: true },
)?;
// step until out of stackframes
this.run()?;
// Windows doesn't have other destructors.
return Ok(());
}
2020-03-27 13:39:00 -05:00
// The macOS global dtor runs "before any TLS slots get freed", so do that first.
if let Some((instance, data)) = this.machine.tls.global_dtor {
trace!("Running global dtor {:?} on {:?}", instance, data);
2020-04-18 10:53:54 -05:00
let ret_place = MPlaceTy::dangling(this.machine.layouts.unit, this).into();
2020-03-27 13:39:00 -05:00
this.call_function(
instance,
&[data.into()],
Some(ret_place),
StackPopCleanup::None { cleanup: true },
)?;
// step until out of stackframes
this.run()?;
}
// Now run the "keyed" destructors.
2020-04-06 16:41:05 -05:00
for (thread_id, thread_state) in this.get_all_thread_ids_with_states() {
assert!(thread_state == ThreadState::Terminated,
"TLS destructors should be executed after all threads terminated.");
this.set_active_thread(thread_id)?;
let mut dtor = this.machine.tls.fetch_tls_dtor(None, thread_id);
while let Some((instance, thread_id, ptr, key)) = dtor {
trace!("Running TLS dtor {:?} on {:?} at {:?}", instance, ptr, thread_id);
assert!(!this.is_null(ptr).unwrap(), "Data can't be NULL when dtor is called!");
let ret_place = MPlaceTy::dangling(this.layout_of(this.tcx.mk_unit())?, this).into();
this.call_function(
instance,
&[ptr.into()],
Some(ret_place),
StackPopCleanup::None { cleanup: true },
)?;
// step until out of stackframes
this.run()?;
// Fetch next dtor after `key`.
dtor = match this.machine.tls.fetch_tls_dtor(Some(key), thread_id) {
dtor @ Some(_) => dtor,
// We ran each dtor once, start over from the beginning.
None => this.machine.tls.fetch_tls_dtor(None, thread_id),
};
}
2017-08-01 04:11:57 -05:00
}
Ok(())
}
}