Add 'do atomically { .. }' for exclusives
This commit is contained in:
parent
9103e43909
commit
ae094a7adc
@ -9,6 +9,8 @@ export refcount;
|
||||
export log_str;
|
||||
export lock_and_signal, condition, methods;
|
||||
|
||||
import task::atomically;
|
||||
|
||||
enum type_desc = {
|
||||
size: uint,
|
||||
align: uint
|
||||
@ -105,13 +107,17 @@ impl methods for lock_and_signal {
|
||||
unsafe fn lock<T>(f: fn() -> T) -> T {
|
||||
rustrt::rust_lock_cond_lock(self.lock);
|
||||
let _r = unlock(self.lock);
|
||||
f()
|
||||
do atomically {
|
||||
f()
|
||||
}
|
||||
}
|
||||
|
||||
unsafe fn lock_cond<T>(f: fn(condition) -> T) -> T {
|
||||
rustrt::rust_lock_cond_lock(self.lock);
|
||||
let _r = unlock(self.lock);
|
||||
f(condition_(self.lock))
|
||||
do atomically {
|
||||
f(condition_(self.lock))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -60,6 +60,7 @@ export yield;
|
||||
export failing;
|
||||
export get_task;
|
||||
export unkillable;
|
||||
export atomically;
|
||||
|
||||
export local_data_key;
|
||||
export local_data_pop;
|
||||
@ -683,16 +684,36 @@ fn get_task() -> task {
|
||||
*/
|
||||
unsafe fn unkillable(f: fn()) {
|
||||
class allow_failure {
|
||||
let i: (); // since a class must have at least one field
|
||||
new(_i: ()) { self.i = (); }
|
||||
drop { rustrt::rust_task_allow_kill(); }
|
||||
let t: *rust_task;
|
||||
new(t: *rust_task) { self.t = t; }
|
||||
drop { rustrt::rust_task_allow_kill(self.t); }
|
||||
}
|
||||
|
||||
let _allow_failure = allow_failure(());
|
||||
rustrt::rust_task_inhibit_kill();
|
||||
let t = rustrt::rust_get_task();
|
||||
let _allow_failure = allow_failure(t);
|
||||
rustrt::rust_task_inhibit_kill(t);
|
||||
f();
|
||||
}
|
||||
|
||||
/**
|
||||
* A stronger version of unkillable that also inhibits scheduling operations.
|
||||
* For use with exclusive ARCs, which use pthread mutexes directly.
|
||||
*/
|
||||
unsafe fn atomically<U>(f: fn() -> U) -> U {
|
||||
class defer_interrupts {
|
||||
let t: *rust_task;
|
||||
new(t: *rust_task) { self.t = t; }
|
||||
drop {
|
||||
rustrt::rust_task_allow_yield(self.t);
|
||||
rustrt::rust_task_allow_kill(self.t);
|
||||
}
|
||||
}
|
||||
let t = rustrt::rust_get_task();
|
||||
let _interrupts = defer_interrupts(t);
|
||||
rustrt::rust_task_inhibit_kill(t);
|
||||
rustrt::rust_task_inhibit_yield(t);
|
||||
f()
|
||||
}
|
||||
|
||||
/****************************************************************************
|
||||
* Internal
|
||||
@ -1235,8 +1256,10 @@ extern mod rustrt {
|
||||
|
||||
fn rust_task_is_unwinding(task: *rust_task) -> bool;
|
||||
fn rust_osmain_sched_id() -> sched_id;
|
||||
fn rust_task_inhibit_kill();
|
||||
fn rust_task_allow_kill();
|
||||
fn rust_task_inhibit_kill(t: *rust_task);
|
||||
fn rust_task_allow_kill(t: *rust_task);
|
||||
fn rust_task_inhibit_yield(t: *rust_task);
|
||||
fn rust_task_allow_yield(t: *rust_task);
|
||||
fn rust_task_kill_other(task: *rust_task);
|
||||
fn rust_task_kill_all(task: *rust_task);
|
||||
|
||||
@ -1759,6 +1782,21 @@ fn test_unkillable_nested() {
|
||||
po.recv();
|
||||
}
|
||||
|
||||
#[test] #[should_fail] #[ignore(cfg(windows))]
|
||||
fn test_atomically() {
|
||||
unsafe { do atomically { yield(); } }
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_atomically2() {
|
||||
unsafe { do atomically { } } yield(); // shouldn't fail
|
||||
}
|
||||
|
||||
#[test] #[should_fail] #[ignore(cfg(windows))]
|
||||
fn test_atomically_nested() {
|
||||
unsafe { do atomically { do atomically { } yield(); } }
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_child_doesnt_ref_parent() {
|
||||
// If the child refcounts the parent task, this will stack overflow when
|
||||
|
@ -854,17 +854,25 @@ rust_global_env_chan_ptr() {
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_task_inhibit_kill() {
|
||||
rust_task *task = rust_get_current_task();
|
||||
rust_task_inhibit_kill(rust_task *task) {
|
||||
task->inhibit_kill();
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_task_allow_kill() {
|
||||
rust_task *task = rust_get_current_task();
|
||||
rust_task_allow_kill(rust_task *task) {
|
||||
task->allow_kill();
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_task_inhibit_yield(rust_task *task) {
|
||||
task->inhibit_yield();
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_task_allow_yield(rust_task *task) {
|
||||
task->allow_yield();
|
||||
}
|
||||
|
||||
extern "C" void
|
||||
rust_task_kill_other(rust_task *task) { /* Used for linked failure */
|
||||
task->kill();
|
||||
|
@ -39,6 +39,7 @@ rust_task::rust_task(rust_sched_loop *sched_loop, rust_task_state state,
|
||||
killed(false),
|
||||
reentered_rust_stack(false),
|
||||
disallow_kill(0),
|
||||
disallow_yield(0),
|
||||
c_stack(NULL),
|
||||
next_c_sp(0),
|
||||
next_rust_sp(0)
|
||||
@ -234,9 +235,18 @@ rust_task::must_fail_from_being_killed_inner() {
|
||||
return killed && !reentered_rust_stack && disallow_kill == 0;
|
||||
}
|
||||
|
||||
void rust_task_yield_fail(rust_task *task) {
|
||||
LOG_ERR(task, task, "task %" PRIxPTR " yielded in an atomic section",
|
||||
task);
|
||||
task->fail();
|
||||
}
|
||||
|
||||
// Only run this on the rust stack
|
||||
void
|
||||
rust_task::yield(bool *killed) {
|
||||
if (disallow_yield > 0) {
|
||||
call_on_c_stack(this, (void *)rust_task_yield_fail);
|
||||
}
|
||||
// FIXME (#2875): clean this up
|
||||
if (must_fail_from_being_killed()) {
|
||||
{
|
||||
@ -672,6 +682,17 @@ rust_task::allow_kill() {
|
||||
disallow_kill--;
|
||||
}
|
||||
|
||||
void rust_task::inhibit_yield() {
|
||||
scoped_lock with(lifecycle_lock);
|
||||
disallow_yield++;
|
||||
}
|
||||
|
||||
void rust_task::allow_yield() {
|
||||
scoped_lock with(lifecycle_lock);
|
||||
assert(disallow_yield > 0 && "Illegal allow_yield(): already yieldable!");
|
||||
disallow_yield--;
|
||||
}
|
||||
|
||||
void *
|
||||
rust_task::wait_event(bool *killed) {
|
||||
scoped_lock with(lifecycle_lock);
|
||||
|
@ -185,6 +185,7 @@ private:
|
||||
// Indicates that we've called back into Rust from C
|
||||
bool reentered_rust_stack;
|
||||
unsigned long disallow_kill;
|
||||
unsigned long disallow_yield;
|
||||
|
||||
// The stack used for running C code, borrowed from the scheduler thread
|
||||
stk_seg *c_stack;
|
||||
@ -318,6 +319,8 @@ public:
|
||||
|
||||
void inhibit_kill();
|
||||
void allow_kill();
|
||||
void inhibit_yield();
|
||||
void allow_yield();
|
||||
};
|
||||
|
||||
// FIXME (#2697): It would be really nice to be able to get rid of this.
|
||||
|
@ -183,6 +183,8 @@ rust_port_drop
|
||||
rust_port_task
|
||||
rust_task_inhibit_kill
|
||||
rust_task_allow_kill
|
||||
rust_task_inhibit_yield
|
||||
rust_task_allow_yield
|
||||
rust_task_kill_other
|
||||
rust_task_kill_all
|
||||
rust_create_cond_lock
|
||||
|
Loading…
x
Reference in New Issue
Block a user