From 52ca256d7be99dafa81c531bf1fc6ec2e2a508b9 Mon Sep 17 00:00:00 2001 From: Ben Blum Date: Tue, 2 Jul 2013 21:15:34 -0400 Subject: [PATCH] Add KillHandle and implement exit code propagation to replace join_latch --- src/libstd/rt/kill.rs | 128 ++++++++++++++++++++++++++++++++++++ src/libstd/rt/mod.rs | 3 + src/libstd/unstable/sync.rs | 6 +- 3 files changed, 134 insertions(+), 3 deletions(-) create mode 100644 src/libstd/rt/kill.rs diff --git a/src/libstd/rt/kill.rs b/src/libstd/rt/kill.rs new file mode 100644 index 00000000000..afd2d3b5a1a --- /dev/null +++ b/src/libstd/rt/kill.rs @@ -0,0 +1,128 @@ +// Copyright 2013 The Rust Project Developers. See the COPYRIGHT +// file at the top-level directory of this distribution and at +// http://rust-lang.org/COPYRIGHT. +// +// Licensed under the Apache License, Version 2.0 or the MIT license +// , at your +// option. This file may not be copied, modified, or distributed +// except according to those terms. + +//! Task death: asynchronous killing, linked failure, exit code propagation. + +use cell::Cell; +use option::{Option, Some, None}; +use prelude::*; +use unstable::sync::{UnsafeAtomicRcBox, LittleLock}; +use util; + +// FIXME(#7544)(bblum): think about the cache efficiency of this +struct KillHandleInner { + // ((more fields to be added in a future commit)) + + // Shared state between task and children for exit code propagation. These + // are here so we can re-use the kill handle to implement watched children + // tasks. Using a separate ARClike would introduce extra atomic adds/subs + // into common spawn paths, so this is just for speed. + + // Locklessly accessed; protected by the enclosing refcount's barriers. + any_child_failed: bool, + // A lazy list, consuming which may unwrap() many child tombstones. + child_tombstones: Option<~fn() -> bool>, + // Protects multiple children simultaneously creating tombstones. + graveyard_lock: LittleLock, +} + +/// State shared between tasks used for task killing during linked failure. +#[deriving(Clone)] +pub struct KillHandle(UnsafeAtomicRcBox); + +impl KillHandle { + pub fn new() -> KillHandle { + KillHandle(UnsafeAtomicRcBox::new(KillHandleInner { + // Linked failure fields + // ((none yet)) + // Exit code propagation fields + any_child_failed: false, + child_tombstones: None, + graveyard_lock: LittleLock(), + })) + } + + pub fn notify_immediate_failure(&mut self) { + // A benign data race may happen here if there are failing sibling + // tasks that were also spawned-watched. The refcount's write barriers + // in UnsafeAtomicRcBox ensure that this write will be seen by the + // unwrapper/destructor, whichever task may unwrap it. + unsafe { (*self.get()).any_child_failed = true; } + } + + // For use when a task does not need to collect its children's exit + // statuses, but the task has a parent which might want them. + pub fn reparent_children_to(self, parent: &mut KillHandle) { + // Optimistic path: If another child of the parent's already failed, + // we don't need to worry about any of this. + if unsafe { (*parent.get()).any_child_failed } { + return; + } + + // Try to see if all our children are gone already. + match unsafe { self.try_unwrap() } { + // Couldn't unwrap; children still alive. Reparent entire handle as + // our own tombstone, to be unwrapped later. + Left(this) => { + let this = Cell::new(this); // :( + do add_lazy_tombstone(parent) |other_tombstones| { + let this = Cell::new(this.take()); // :( + let others = Cell::new(other_tombstones); // :( + || { + // Prefer to check tombstones that were there first, + // being "more fair" at the expense of tail-recursion. + others.take().map_consume_default(true, |f| f()) && { + let mut inner = unsafe { this.take().unwrap() }; + (!inner.any_child_failed) && + inner.child_tombstones.take_map_default(true, |f| f()) + } + } + } + } + // Whether or not all children exited, one or more already failed. + Right(KillHandleInner { any_child_failed: true, _ }) => { + parent.notify_immediate_failure(); + } + // All children exited, but some left behind tombstones that we + // don't want to wait on now. Give them to our parent. + Right(KillHandleInner { any_child_failed: false, + child_tombstones: Some(f), _ }) => { + let f = Cell::new(f); // :( + do add_lazy_tombstone(parent) |other_tombstones| { + let f = Cell::new(f.take()); // :( + let others = Cell::new(other_tombstones); // :( + || { + // Prefer fairness to tail-recursion, as in above case. + others.take().map_consume_default(true, |f| f()) && + f.take()() + } + } + } + // All children exited, none failed. Nothing to do! + Right(KillHandleInner { any_child_failed: false, + child_tombstones: None, _ }) => { } + } + + // NB: Takes a pthread mutex -- 'blk' not allowed to reschedule. + fn add_lazy_tombstone(parent: &mut KillHandle, + blk: &fn(Option<~fn() -> bool>) -> ~fn() -> bool) { + + let inner: &mut KillHandleInner = unsafe { &mut *parent.get() }; + unsafe { + do inner.graveyard_lock.lock { + // Update the current "head node" of the lazy list. + inner.child_tombstones = + Some(blk(util::replace(&mut inner.child_tombstones, None))); + } + } + } + } +} + diff --git a/src/libstd/rt/mod.rs b/src/libstd/rt/mod.rs index 51f4737ef85..1c3411b6247 100644 --- a/src/libstd/rt/mod.rs +++ b/src/libstd/rt/mod.rs @@ -83,6 +83,9 @@ pub mod global_heap; /// Implementations of language-critical runtime features like @. pub mod task; +/// Facilities related to task failure, killing, and death. +mod kill; + /// The coroutine task scheduler, built on the `io` event loop. mod sched; diff --git a/src/libstd/unstable/sync.rs b/src/libstd/unstable/sync.rs index 2af914cf458..b208ffd8f91 100644 --- a/src/libstd/unstable/sync.rs +++ b/src/libstd/unstable/sync.rs @@ -225,9 +225,9 @@ impl Drop for UnsafeAtomicRcBox{ /****************************************************************************/ #[allow(non_camel_case_types)] // runtime type -pub type rust_little_lock = *libc::c_void; +type rust_little_lock = *libc::c_void; -struct LittleLock { +pub struct LittleLock { l: rust_little_lock, } @@ -239,7 +239,7 @@ impl Drop for LittleLock { } } -fn LittleLock() -> LittleLock { +pub fn LittleLock() -> LittleLock { unsafe { LittleLock { l: rust_create_little_lock()