kmc-solid: Don't do Box::from_raw(&*(x: Box<T>) as *const T as *mut T)

This pattern seems to be considered illegal by Miri.
This commit is contained in:
Tomoaki Kawada 2022-12-01 11:56:31 +09:00
parent f482e55adf
commit ae7633f434

View File

@ -11,18 +11,25 @@
ffi::CStr, ffi::CStr,
hint, io, hint, io,
mem::ManuallyDrop, mem::ManuallyDrop,
ptr::NonNull,
sync::atomic::{AtomicUsize, Ordering}, sync::atomic::{AtomicUsize, Ordering},
sys::thread_local_dtor::run_dtors, sys::thread_local_dtor::run_dtors,
time::Duration, time::Duration,
}; };
pub struct Thread { pub struct Thread {
inner: ManuallyDrop<Box<ThreadInner>>, p_inner: NonNull<ThreadInner>,
/// The ID of the underlying task. /// The ID of the underlying task.
task: abi::ID, task: abi::ID,
} }
// Safety: There's nothing in `Thread` that ties it to the original creator. It
// can be dropped by any threads.
unsafe impl Send for Thread {}
// Safety: `Thread` provides no methods that take `&self`.
unsafe impl Sync for Thread {}
/// State data shared between a parent thread and child thread. It's dropped on /// State data shared between a parent thread and child thread. It's dropped on
/// a transition to one of the final states. /// a transition to one of the final states.
struct ThreadInner { struct ThreadInner {
@ -90,8 +97,9 @@ pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
}); });
unsafe extern "C" fn trampoline(exinf: isize) { unsafe extern "C" fn trampoline(exinf: isize) {
let p_inner: *mut ThreadInner = crate::ptr::from_exposed_addr_mut(exinf as usize);
// Safety: `ThreadInner` is alive at this point // Safety: `ThreadInner` is alive at this point
let inner: &ThreadInner = unsafe { &*crate::ptr::from_exposed_addr(exinf as usize) }; let inner = unsafe { &*p_inner };
// Safety: Since `trampoline` is called only once for each // Safety: Since `trampoline` is called only once for each
// `ThreadInner` and only `trampoline` touches `start`, // `ThreadInner` and only `trampoline` touches `start`,
@ -119,13 +127,13 @@ pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
// No one will ever join, so we'll ask the collector task to // No one will ever join, so we'll ask the collector task to
// delete the task. // delete the task.
// In this case, `inner`'s ownership has been moved to us, // In this case, `*p_inner`'s ownership has been moved to
// And we are responsible for dropping it. The acquire // us, and we are responsible for dropping it. The acquire
// ordering is not necessary because the parent thread made // ordering is not necessary because the parent thread made
// no memory access needing synchronization since the call // no memory access needing synchronization since the call
// to `acre_tsk`. // to `acre_tsk`.
// Safety: See above. // Safety: See above.
let _ = unsafe { Box::from_raw(inner as *const _ as *mut ThreadInner) }; let _ = unsafe { Box::from_raw(p_inner) };
// Safety: There are no pinned references to the stack // Safety: There are no pinned references to the stack
unsafe { terminate_and_delete_current_task() }; unsafe { terminate_and_delete_current_task() };
@ -162,13 +170,14 @@ pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
} }
} }
let inner_ptr = (&*inner) as *const ThreadInner; // Safety: `Box::into_raw` returns a non-null pointer
let p_inner = unsafe { NonNull::new_unchecked(Box::into_raw(inner)) };
let new_task = ItronError::err_if_negative(unsafe { let new_task = ItronError::err_if_negative(unsafe {
abi::acre_tsk(&abi::T_CTSK { abi::acre_tsk(&abi::T_CTSK {
// Activate this task immediately // Activate this task immediately
tskatr: abi::TA_ACT, tskatr: abi::TA_ACT,
exinf: inner_ptr.expose_addr() as abi::EXINF, exinf: p_inner.as_ptr().expose_addr() as abi::EXINF,
// The entry point // The entry point
task: Some(trampoline), task: Some(trampoline),
// Inherit the calling task's base priority // Inherit the calling task's base priority
@ -180,7 +189,7 @@ pub unsafe fn new(stack: usize, p: Box<dyn FnOnce()>) -> io::Result<Thread> {
}) })
.map_err(|e| e.as_io_error())?; .map_err(|e| e.as_io_error())?;
Ok(Self { inner: ManuallyDrop::new(inner), task: new_task }) Ok(Self { p_inner, task: new_task })
} }
pub fn yield_now() { pub fn yield_now() {
@ -197,8 +206,9 @@ pub fn sleep(dur: Duration) {
} }
} }
pub fn join(mut self) { pub fn join(self) {
let inner = &*self.inner; // Safety: `ThreadInner` is alive at this point
let inner = unsafe { self.p_inner.as_ref() };
// Get the current task ID. Panicking here would cause a resource leak, // Get the current task ID. Panicking here would cause a resource leak,
// so just abort on failure. // so just abort on failure.
let current_task = task::current_task_id_aborting(); let current_task = task::current_task_id_aborting();
@ -243,8 +253,8 @@ pub fn join(mut self) {
unsafe { terminate_and_delete_task(self.task) }; unsafe { terminate_and_delete_task(self.task) };
// In either case, we are responsible for dropping `inner`. // In either case, we are responsible for dropping `inner`.
// Safety: The contents of `self.inner` will not be accessed hereafter // Safety: The contents of `*p_inner` will not be accessed hereafter
let _inner = unsafe { ManuallyDrop::take(&mut self.inner) }; let _inner = unsafe { Box::from_raw(self.p_inner.as_ptr()) };
// Skip the destructor (because it would attempt to detach the thread) // Skip the destructor (because it would attempt to detach the thread)
crate::mem::forget(self); crate::mem::forget(self);
@ -253,13 +263,16 @@ pub fn join(mut self) {
impl Drop for Thread { impl Drop for Thread {
fn drop(&mut self) { fn drop(&mut self) {
// Safety: `ThreadInner` is alive at this point
let inner = unsafe { self.p_inner.as_ref() };
// Detach the thread. // Detach the thread.
match self.inner.lifecycle.swap(LIFECYCLE_DETACHED_OR_JOINED, Ordering::Acquire) { match inner.lifecycle.swap(LIFECYCLE_DETACHED_OR_JOINED, Ordering::Acquire) {
LIFECYCLE_INIT => { LIFECYCLE_INIT => {
// [INIT → DETACHED] // [INIT → DETACHED]
// When the time comes, the child will figure out that no // When the time comes, the child will figure out that no
// one will ever join it. // one will ever join it.
// The ownership of `self.inner` is moved to the child thread. // The ownership of `*p_inner` is moved to the child thread.
// However, the release ordering is not necessary because we // However, the release ordering is not necessary because we
// made no memory access needing synchronization since the call // made no memory access needing synchronization since the call
// to `acre_tsk`. // to `acre_tsk`.
@ -278,10 +291,9 @@ fn drop(&mut self) {
// delete by entering the `FINISHED` state. // delete by entering the `FINISHED` state.
unsafe { terminate_and_delete_task(self.task) }; unsafe { terminate_and_delete_task(self.task) };
// Wwe are responsible for dropping `inner`. // Wwe are responsible for dropping `*p_inner`.
// Safety: The contents of `self.inner` will not be accessed // Safety: The contents of `*p_inner` will not be accessed hereafter
// hereafter let _ = unsafe { Box::from_raw(self.p_inner.as_ptr()) };
unsafe { ManuallyDrop::drop(&mut self.inner) };
} }
_ => unsafe { hint::unreachable_unchecked() }, _ => unsafe { hint::unreachable_unchecked() },
} }