Change yield() and wait_event() to be MUST_CHECK and return the killed flag. (closes #2875)
This commit is contained in:
parent
ac9df5841c
commit
695ab09899
@ -69,7 +69,7 @@ fn packet<T: send>() -> *packet<T> unsafe {
|
|||||||
#[rust_stack]
|
#[rust_stack]
|
||||||
fn task_clear_event_reject(task: *rust_task);
|
fn task_clear_event_reject(task: *rust_task);
|
||||||
|
|
||||||
fn task_wait_event(this: *rust_task, killed: &mut bool) -> *libc::c_void;
|
fn task_wait_event(this: *rust_task, killed: &mut *libc::c_void) -> bool;
|
||||||
fn task_signal_event(target: *rust_task, event: *libc::c_void);
|
fn task_signal_event(target: *rust_task, event: *libc::c_void);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -80,13 +80,13 @@ unsafe fn uniquify<T>(x: *T) -> ~T {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fn wait_event(this: *rust_task) -> *libc::c_void {
|
fn wait_event(this: *rust_task) -> *libc::c_void {
|
||||||
let mut killed = false;
|
let mut event = ptr::null();
|
||||||
|
|
||||||
let res = rustrt::task_wait_event(this, &mut killed);
|
let killed = rustrt::task_wait_event(this, &mut event);
|
||||||
if killed && !task::failing() {
|
if killed && !task::failing() {
|
||||||
fail ~"killed"
|
fail ~"killed"
|
||||||
}
|
}
|
||||||
res
|
event
|
||||||
}
|
}
|
||||||
|
|
||||||
fn swap_state_acq(&dst: state, src: state) -> state {
|
fn swap_state_acq(&dst: state, src: state) -> state {
|
||||||
|
@ -515,8 +515,7 @@ fn yield() {
|
|||||||
//! Yield control to the task scheduler
|
//! Yield control to the task scheduler
|
||||||
|
|
||||||
let task_ = rustrt::rust_get_task();
|
let task_ = rustrt::rust_get_task();
|
||||||
let mut killed = false;
|
let killed = rustrt::rust_task_yield(task_);
|
||||||
rustrt::rust_task_yield(task_, killed);
|
|
||||||
if killed && !failing() {
|
if killed && !failing() {
|
||||||
fail ~"killed";
|
fail ~"killed";
|
||||||
}
|
}
|
||||||
@ -1104,7 +1103,7 @@ unsafe fn local_data_modify<T: owned>(
|
|||||||
|
|
||||||
extern mod rustrt {
|
extern mod rustrt {
|
||||||
#[rust_stack]
|
#[rust_stack]
|
||||||
fn rust_task_yield(task: *rust_task, &killed: bool);
|
fn rust_task_yield(task: *rust_task) -> bool;
|
||||||
|
|
||||||
fn rust_get_sched_id() -> sched_id;
|
fn rust_get_sched_id() -> sched_id;
|
||||||
fn rust_new_sched(num_threads: libc::uintptr_t) -> sched_id;
|
fn rust_new_sched(num_threads: libc::uintptr_t) -> sched_id;
|
||||||
|
@ -722,9 +722,9 @@ rust_port_id_send(rust_port_id target_port_id, void *sptr) {
|
|||||||
|
|
||||||
// This is called by an intrinsic on the Rust stack and must run
|
// This is called by an intrinsic on the Rust stack and must run
|
||||||
// entirely in the red zone. Do not call on the C stack.
|
// entirely in the red zone. Do not call on the C stack.
|
||||||
extern "C" CDECL void
|
extern "C" CDECL MUST_CHECK bool
|
||||||
rust_task_yield(rust_task *task, bool *killed) {
|
rust_task_yield(rust_task *task, bool *killed) {
|
||||||
task->yield(killed);
|
return task->yield();
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C" CDECL void
|
extern "C" CDECL void
|
||||||
@ -920,8 +920,8 @@ rust_wait_cond_lock(rust_cond_lock *lock) {
|
|||||||
lock->waiting = task;
|
lock->waiting = task;
|
||||||
task->block(lock, "waiting for signal");
|
task->block(lock, "waiting for signal");
|
||||||
lock->lock.unlock();
|
lock->lock.unlock();
|
||||||
bool killed = false;
|
bool killed = task->yield();
|
||||||
task->yield(&killed);
|
assert(!killed && "unimplemented");
|
||||||
lock->lock.lock();
|
lock->lock.lock();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -960,12 +960,12 @@ task_clear_event_reject(rust_task *task) {
|
|||||||
|
|
||||||
// Waits on an event, returning the pointer to the event that unblocked this
|
// Waits on an event, returning the pointer to the event that unblocked this
|
||||||
// task.
|
// task.
|
||||||
extern "C" void *
|
extern "C" MUST_CHECK bool
|
||||||
task_wait_event(rust_task *task, bool *killed) {
|
task_wait_event(rust_task *task, void **result) {
|
||||||
// FIXME #2890: we should assert that the passed in task is the currently
|
// Maybe (if not too slow) assert that the passed in task is the currently
|
||||||
// running task. We wouldn't want to wait some other task.
|
// running task. We wouldn't want to wait some other task.
|
||||||
|
|
||||||
return task->wait_event(killed);
|
return task->wait_event(result);
|
||||||
}
|
}
|
||||||
|
|
||||||
extern "C" void
|
extern "C" void
|
||||||
|
@ -85,6 +85,8 @@ extern "C" int check_claims;
|
|||||||
} \
|
} \
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define MUST_CHECK __attribute__((warn_unused_result))
|
||||||
|
|
||||||
#define PTR "0x%" PRIxPTR
|
#define PTR "0x%" PRIxPTR
|
||||||
|
|
||||||
// This accounts for logging buffers.
|
// This accounts for logging buffers.
|
||||||
|
@ -242,26 +242,30 @@ void rust_task_yield_fail(rust_task *task) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Only run this on the rust stack
|
// Only run this on the rust stack
|
||||||
void
|
MUST_CHECK bool rust_task::yield() {
|
||||||
rust_task::yield(bool *killed) {
|
bool killed = false;
|
||||||
|
|
||||||
if (disallow_yield > 0) {
|
if (disallow_yield > 0) {
|
||||||
call_on_c_stack(this, (void *)rust_task_yield_fail);
|
call_on_c_stack(this, (void *)rust_task_yield_fail);
|
||||||
}
|
}
|
||||||
// FIXME (#2875): clean this up
|
|
||||||
|
// This check is largely superfluous; it's the one after the context swap
|
||||||
|
// that really matters. This one allows us to assert a useful invariant.
|
||||||
if (must_fail_from_being_killed()) {
|
if (must_fail_from_being_killed()) {
|
||||||
{
|
{
|
||||||
scoped_lock with(lifecycle_lock);
|
scoped_lock with(lifecycle_lock);
|
||||||
assert(!(state == task_state_blocked));
|
assert(!(state == task_state_blocked));
|
||||||
}
|
}
|
||||||
*killed = true;
|
killed = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Return to the scheduler.
|
// Return to the scheduler.
|
||||||
ctx.next->swap(ctx);
|
ctx.next->swap(ctx);
|
||||||
|
|
||||||
if (must_fail_from_being_killed()) {
|
if (must_fail_from_being_killed()) {
|
||||||
*killed = true;
|
killed = true;
|
||||||
}
|
}
|
||||||
|
return killed;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
@ -687,19 +691,24 @@ void rust_task::allow_yield() {
|
|||||||
disallow_yield--;
|
disallow_yield--;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *
|
MUST_CHECK bool rust_task::wait_event(void **result) {
|
||||||
rust_task::wait_event(bool *killed) {
|
bool killed = false;
|
||||||
scoped_lock with(lifecycle_lock);
|
scoped_lock with(lifecycle_lock);
|
||||||
|
|
||||||
if(!event_reject) {
|
if(!event_reject) {
|
||||||
block_inner(&event_cond, "waiting on event");
|
block_inner(&event_cond, "waiting on event");
|
||||||
lifecycle_lock.unlock();
|
lifecycle_lock.unlock();
|
||||||
yield(killed);
|
killed = yield();
|
||||||
lifecycle_lock.lock();
|
lifecycle_lock.lock();
|
||||||
|
} else if (must_fail_from_being_killed_inner()) {
|
||||||
|
// If the deschedule was rejected, yield won't do our killed check for
|
||||||
|
// us. For thoroughness, do it here. FIXME (#524)
|
||||||
|
killed = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
event_reject = false;
|
event_reject = false;
|
||||||
return event;
|
*result = event;
|
||||||
|
return killed;
|
||||||
}
|
}
|
||||||
|
|
||||||
void
|
void
|
||||||
|
@ -259,7 +259,8 @@ public:
|
|||||||
void backtrace();
|
void backtrace();
|
||||||
|
|
||||||
// Yields control to the scheduler. Called from the Rust stack
|
// Yields control to the scheduler. Called from the Rust stack
|
||||||
void yield(bool *killed);
|
// Returns TRUE if the task was killed and needs to fail.
|
||||||
|
MUST_CHECK bool yield();
|
||||||
|
|
||||||
// Fail this task (assuming caller-on-stack is different task).
|
// Fail this task (assuming caller-on-stack is different task).
|
||||||
void kill();
|
void kill();
|
||||||
@ -311,7 +312,8 @@ public:
|
|||||||
this->event_reject = false;
|
this->event_reject = false;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *wait_event(bool *killed);
|
// Returns TRUE if the task was killed and needs to fail.
|
||||||
|
MUST_CHECK bool wait_event(void **result);
|
||||||
void signal_event(void *event);
|
void signal_event(void *event);
|
||||||
|
|
||||||
void cleanup_after_turn();
|
void cleanup_after_turn();
|
||||||
|
Loading…
Reference in New Issue
Block a user