diff --git a/src/libcore/rt/mod.rs b/src/libcore/rt/mod.rs index b1227af5f4c..ba61b49d14a 100644 --- a/src/libcore/rt/mod.rs +++ b/src/libcore/rt/mod.rs @@ -88,3 +88,75 @@ pub fn start(main: *u8, _argc: int, _argv: **c_char, _crate_map: *u8) -> int { fn rust_call_nullary_fn(f: *u8); } } + +/// Possible contexts in which Rust code may be executing. +/// Different runtime services are available depending on context. +#[deriving(Eq)] +pub enum RuntimeContext { + // Only default services, e.g. exchange heap + GlobalContext, + // The scheduler may be accessed + SchedulerContext, + // Full task services, e.g. local heap, unwinding + TaskContext, + // Running in an old-style task + OldTaskContext +} + +pub fn context() -> RuntimeContext { + + use task::rt::rust_task; + use self::sched::Scheduler; + + // XXX: Hitting TLS twice to check if the scheduler exists + // then to check for the task is not good for perf + if unsafe { rust_try_get_task().is_not_null() } { + return OldTaskContext; + } else { + if Scheduler::have_local() { + let context = ::cell::empty_cell(); + do Scheduler::borrow_local |sched| { + if sched.in_task_context() { + context.put_back(TaskContext); + } else { + context.put_back(SchedulerContext); + } + } + return context.take(); + } else { + return GlobalContext; + } + } + + pub extern { + #[rust_stack] + fn rust_try_get_task() -> *rust_task; + } +} + +#[test] +fn test_context() { + use unstable::run_in_bare_thread; + use self::sched::{Scheduler, Task}; + use self::uvio::UvEventLoop; + use cell::Cell; + + assert!(context() == OldTaskContext); + do run_in_bare_thread { + assert!(context() == GlobalContext); + let mut sched = ~UvEventLoop::new_scheduler(); + let task = ~do Task::new(&mut sched.stack_pool) { + assert!(context() == TaskContext); + let sched = Scheduler::take_local(); + do sched.deschedule_running_task_and_then() |task| { + assert!(context() == SchedulerContext); + let task = Cell(task); + do Scheduler::borrow_local |sched| { + sched.task_queue.push_back(task.take()); + } + } + }; + sched.task_queue.push_back(task); + sched.run(); + } +} diff --git a/src/libcore/rt/sched/local.rs b/src/libcore/rt/sched/local.rs index d8001011114..0eb97ee67ec 100644 --- a/src/libcore/rt/sched/local.rs +++ b/src/libcore/rt/sched/local.rs @@ -10,6 +10,7 @@ //! Access to the thread-local Scheduler +use prelude::*; use ptr::mut_null; use libc::c_void; use cast::transmute; @@ -39,6 +40,16 @@ pub fn take() -> ~Scheduler { } } +/// Check whether there is a thread-local Scheduler attached to the running thread +pub fn exists() -> bool { + unsafe { + match maybe_tls_key() { + Some(key) => tls::get(key).is_not_null(), + None => false + } + } +} + /// Borrow a mutable reference to the thread-local Scheduler /// # Safety Note /// Because this leaves the Scheduler in thread-local storage it is possible @@ -60,10 +71,31 @@ pub unsafe fn borrow() -> &mut Scheduler { } fn tls_key() -> tls::Key { + maybe_tls_key().get() +} + +fn maybe_tls_key() -> Option { unsafe { let key: *mut c_void = rust_get_sched_tls_key(); let key: &mut tls::Key = transmute(key); - return *key; + let key = *key; + // Check that the key has been initialized. + + // NB: This is a little racy because, while the key is + // initalized under a mutex and it's assumed to be initalized + // in the Scheduler ctor by any thread that needs to use it, + // we are not accessing the key under a mutex. Threads that + // are not using the new Scheduler but still *want to check* + // whether they are running under a new Scheduler may see a 0 + // value here that is in the process of being initialized in + // another thread. I think this is fine since the only action + // they could take if it was initialized would be to check the + // thread-local value and see that it's not set. + if key != 0 { + return Some(key); + } else { + return None; + } } } diff --git a/src/libcore/rt/sched/mod.rs b/src/libcore/rt/sched/mod.rs index f157e6a80e0..1141ea480c9 100644 --- a/src/libcore/rt/sched/mod.rs +++ b/src/libcore/rt/sched/mod.rs @@ -133,6 +133,11 @@ pub impl Scheduler { local::take() } + /// Just check whether there is a local scheduler + fn have_local() -> bool { + local::exists() + } + // * Scheduler-context operations fn resume_task_from_queue(~self) -> bool { diff --git a/src/rt/rust_builtin.cpp b/src/rt/rust_builtin.cpp index 475c030e8f2..a0db6f64f69 100644 --- a/src/rt/rust_builtin.cpp +++ b/src/rt/rust_builtin.cpp @@ -539,6 +539,11 @@ rust_get_task() { return rust_get_current_task(); } +extern "C" rust_task * +rust_try_get_task() { + return rust_try_get_current_task(); +} + extern "C" CDECL stk_seg * rust_get_stack_segment() { return rust_get_current_task()->stk; diff --git a/src/rt/rustrt.def.in b/src/rt/rustrt.def.in index f63e3f53a7c..5a556ed2107 100644 --- a/src/rt/rustrt.def.in +++ b/src/rt/rustrt.def.in @@ -47,6 +47,7 @@ rust_env_pairs rust_task_yield rust_task_is_unwinding rust_get_task +rust_try_get_task rust_get_stack_segment rust_log_str start_task