Change yield() and wait_event() to be MUST_CHECK and return the killed flag. (closes #2875)

This commit is contained in:
Ben Blum 2012-07-24 18:22:44 -04:00
parent ac9df5841c
commit 695ab09899
6 changed files with 38 additions and 26 deletions

View File

@ -69,7 +69,7 @@ fn packet<T: send>() -> *packet<T> unsafe {
#[rust_stack]
fn task_clear_event_reject(task: *rust_task);
fn task_wait_event(this: *rust_task, killed: &mut bool) -> *libc::c_void;
fn task_wait_event(this: *rust_task, killed: &mut *libc::c_void) -> bool;
fn task_signal_event(target: *rust_task, event: *libc::c_void);
}
@ -80,13 +80,13 @@ unsafe fn uniquify<T>(x: *T) -> ~T {
}
fn wait_event(this: *rust_task) -> *libc::c_void {
let mut killed = false;
let mut event = ptr::null();
let res = rustrt::task_wait_event(this, &mut killed);
let killed = rustrt::task_wait_event(this, &mut event);
if killed && !task::failing() {
fail ~"killed"
}
res
event
}
fn swap_state_acq(&dst: state, src: state) -> state {

View File

@ -515,8 +515,7 @@ fn yield() {
//! Yield control to the task scheduler
let task_ = rustrt::rust_get_task();
let mut killed = false;
rustrt::rust_task_yield(task_, killed);
let killed = rustrt::rust_task_yield(task_);
if killed && !failing() {
fail ~"killed";
}
@ -1104,7 +1103,7 @@ unsafe fn local_data_modify<T: owned>(
extern mod rustrt {
#[rust_stack]
fn rust_task_yield(task: *rust_task, &killed: bool);
fn rust_task_yield(task: *rust_task) -> bool;
fn rust_get_sched_id() -> sched_id;
fn rust_new_sched(num_threads: libc::uintptr_t) -> sched_id;

View File

@ -722,9 +722,9 @@ rust_port_id_send(rust_port_id target_port_id, void *sptr) {
// This is called by an intrinsic on the Rust stack and must run
// entirely in the red zone. Do not call on the C stack.
extern "C" CDECL void
extern "C" CDECL MUST_CHECK bool
rust_task_yield(rust_task *task, bool *killed) {
task->yield(killed);
return task->yield();
}
extern "C" CDECL void
@ -920,8 +920,8 @@ rust_wait_cond_lock(rust_cond_lock *lock) {
lock->waiting = task;
task->block(lock, "waiting for signal");
lock->lock.unlock();
bool killed = false;
task->yield(&killed);
bool killed = task->yield();
assert(!killed && "unimplemented");
lock->lock.lock();
}
@ -960,12 +960,12 @@ task_clear_event_reject(rust_task *task) {
// Waits on an event, returning the pointer to the event that unblocked this
// task.
extern "C" void *
task_wait_event(rust_task *task, bool *killed) {
// FIXME #2890: we should assert that the passed in task is the currently
extern "C" MUST_CHECK bool
task_wait_event(rust_task *task, void **result) {
// Maybe (if not too slow) assert that the passed in task is the currently
// running task. We wouldn't want to wait some other task.
return task->wait_event(killed);
return task->wait_event(result);
}
extern "C" void

View File

@ -85,6 +85,8 @@ extern "C" int check_claims;
} \
}
#define MUST_CHECK __attribute__((warn_unused_result))
#define PTR "0x%" PRIxPTR
// This accounts for logging buffers.

View File

@ -242,26 +242,30 @@ void rust_task_yield_fail(rust_task *task) {
}
// Only run this on the rust stack
void
rust_task::yield(bool *killed) {
MUST_CHECK bool rust_task::yield() {
bool killed = false;
if (disallow_yield > 0) {
call_on_c_stack(this, (void *)rust_task_yield_fail);
}
// FIXME (#2875): clean this up
// This check is largely superfluous; it's the one after the context swap
// that really matters. This one allows us to assert a useful invariant.
if (must_fail_from_being_killed()) {
{
scoped_lock with(lifecycle_lock);
assert(!(state == task_state_blocked));
}
*killed = true;
killed = true;
}
// Return to the scheduler.
ctx.next->swap(ctx);
if (must_fail_from_being_killed()) {
*killed = true;
killed = true;
}
return killed;
}
void
@ -687,19 +691,24 @@ void rust_task::allow_yield() {
disallow_yield--;
}
void *
rust_task::wait_event(bool *killed) {
MUST_CHECK bool rust_task::wait_event(void **result) {
bool killed = false;
scoped_lock with(lifecycle_lock);
if(!event_reject) {
block_inner(&event_cond, "waiting on event");
lifecycle_lock.unlock();
yield(killed);
killed = yield();
lifecycle_lock.lock();
} else if (must_fail_from_being_killed_inner()) {
// If the deschedule was rejected, yield won't do our killed check for
// us. For thoroughness, do it here. FIXME (#524)
killed = true;
}
event_reject = false;
return event;
*result = event;
return killed;
}
void

View File

@ -259,7 +259,8 @@ public:
void backtrace();
// Yields control to the scheduler. Called from the Rust stack
void yield(bool *killed);
// Returns TRUE if the task was killed and needs to fail.
MUST_CHECK bool yield();
// Fail this task (assuming caller-on-stack is different task).
void kill();
@ -311,7 +312,8 @@ public:
this->event_reject = false;
}
void *wait_event(bool *killed);
// Returns TRUE if the task was killed and needs to fail.
MUST_CHECK bool wait_event(void **result);
void signal_event(void *event);
void cleanup_after_turn();