From bc7cee7bbf816be7a712c06a93015dc3c6fd5611 Mon Sep 17 00:00:00 2001
From: Ben Blum <bblum@andrew.cmu.edu>
Date: Tue, 30 Jul 2013 21:38:44 -0400
Subject: [PATCH] Move atomically to unstable::sync, and document what it
 actually does. Close #7872.

---
 src/libstd/rt/kill.rs              |  2 +-
 src/libstd/task/mod.rs             | 53 ------------------------------
 src/libstd/unstable/dynamic_lib.rs |  8 ++---
 src/libstd/unstable/mod.rs         |  4 +--
 src/libstd/unstable/sync.rs        | 52 +++++++++++++++++++++++++++--
 5 files changed, 57 insertions(+), 62 deletions(-)

diff --git a/src/libstd/rt/kill.rs b/src/libstd/rt/kill.rs
index e691bf51ea5..c2571f171a1 100644
--- a/src/libstd/rt/kill.rs
+++ b/src/libstd/rt/kill.rs
@@ -84,7 +84,7 @@ pub struct Death {
     on_exit:         Option<~fn(bool)>,
     // nesting level counter for task::unkillable calls (0 == killable).
     unkillable:      int,
-    // nesting level counter for task::atomically calls (0 == can yield).
+    // nesting level counter for unstable::atomically calls (0 == can yield).
     wont_sleep:      int,
     // A "spare" handle to the kill flag inside the kill handle. Used during
     // blocking/waking as an optimization to avoid two xadds on the refcount.
diff --git a/src/libstd/task/mod.rs b/src/libstd/task/mod.rs
index 29a0dc9e337..aff4bc12039 100644
--- a/src/libstd/task/mod.rs
+++ b/src/libstd/task/mod.rs
@@ -655,44 +655,6 @@ pub unsafe fn rekillable<U>(f: &fn() -> U) -> U {
     }
 }
 
-/**
- * A stronger version of unkillable that also inhibits scheduling operations.
- * For use with exclusive Arcs, which use pthread mutexes directly.
- */
-pub unsafe fn atomically<U>(f: &fn() -> U) -> U {
-    use rt::task::Task;
-
-    match context() {
-        OldTaskContext => {
-            let t = rt::rust_get_task();
-            do (|| {
-                rt::rust_task_inhibit_kill(t);
-                rt::rust_task_inhibit_yield(t);
-                f()
-            }).finally {
-                rt::rust_task_allow_yield(t);
-                rt::rust_task_allow_kill(t);
-            }
-        }
-        TaskContext => {
-            let t = Local::unsafe_borrow::<Task>();
-            do (|| {
-                // It's important to inhibit kill after inhibiting yield, because
-                // inhibit-kill might fail if we were already killed, and the
-                // inhibit-yield must happen to match the finally's allow-yield.
-                (*t).death.inhibit_yield();
-                (*t).death.inhibit_kill((*t).unwinder.unwinding);
-                f()
-            }).finally {
-                (*t).death.allow_kill((*t).unwinder.unwinding);
-                (*t).death.allow_yield();
-            }
-        }
-        // FIXME(#3095): As in unkillable().
-        _ => f()
-    }
-}
-
 #[test] #[should_fail] #[ignore(cfg(windows))]
 fn test_cant_dup_task_builder() {
     let mut builder = task();
@@ -1177,21 +1139,6 @@ fn test_unkillable_nested() {
     po.recv();
 }
 
-#[test] #[should_fail] #[ignore(cfg(windows))]
-fn test_atomically() {
-    unsafe { do atomically { yield(); } }
-}
-
-#[test]
-fn test_atomically2() {
-    unsafe { do atomically { } } yield(); // shouldn't fail
-}
-
-#[test] #[should_fail] #[ignore(cfg(windows))]
-fn test_atomically_nested() {
-    unsafe { do atomically { do atomically { } yield(); } }
-}
-
 #[test]
 fn test_child_doesnt_ref_parent() {
     // If the child refcounts the parent task, this will stack overflow when
diff --git a/src/libstd/unstable/dynamic_lib.rs b/src/libstd/unstable/dynamic_lib.rs
index e01f99adc94..ab44520454d 100644
--- a/src/libstd/unstable/dynamic_lib.rs
+++ b/src/libstd/unstable/dynamic_lib.rs
@@ -105,7 +105,7 @@ mod dl {
     use path;
     use ptr;
     use str;
-    use task;
+    use unstable::sync::atomically;
     use result::*;
 
     pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void {
@@ -120,7 +120,7 @@ mod dl {
 
     pub fn check_for_errors_in<T>(f: &fn()->T) -> Result<T, ~str> {
         unsafe {
-            do task::atomically {
+            do atomically {
                 let _old_error = dlerror();
 
                 let result = f();
@@ -164,7 +164,7 @@ mod dl {
     use libc;
     use path;
     use ptr;
-    use task;
+    use unstable::sync::atomically;
     use result::*;
 
     pub unsafe fn open_external(filename: &path::Path) -> *libc::c_void {
@@ -181,7 +181,7 @@ mod dl {
 
     pub fn check_for_errors_in<T>(f: &fn()->T) -> Result<T, ~str> {
         unsafe {
-            do task::atomically {
+            do atomically {
                 SetLastError(0);
 
                 let result = f();
diff --git a/src/libstd/unstable/mod.rs b/src/libstd/unstable/mod.rs
index 313567d1248..f721dd47a66 100644
--- a/src/libstd/unstable/mod.rs
+++ b/src/libstd/unstable/mod.rs
@@ -85,7 +85,7 @@ fn test_run_in_bare_thread_exchange() {
 pub fn change_dir_locked(p: &Path, action: &fn()) -> bool {
     use os;
     use os::change_dir;
-    use task;
+    use unstable::sync::atomically;
     use unstable::finally::Finally;
 
     unsafe {
@@ -93,7 +93,7 @@ pub fn change_dir_locked(p: &Path, action: &fn()) -> bool {
         // in the `action` callback can cause deadlock. Doing it in
         // `task::atomically` to try to avoid that, but ... I don't know
         // this is all bogus.
-        return do task::atomically {
+        return do atomically {
             rust_take_change_dir_lock();
 
             do (||{
diff --git a/src/libstd/unstable/sync.rs b/src/libstd/unstable/sync.rs
index 4c52d897a72..c52e4739edd 100644
--- a/src/libstd/unstable/sync.rs
+++ b/src/libstd/unstable/sync.rs
@@ -16,7 +16,6 @@ use ptr;
 use option::*;
 use either::{Either, Left, Right};
 use task;
-use task::atomically;
 use unstable::atomics::{AtomicOption,AtomicUint,Acquire,Release,SeqCst};
 use unstable::finally::Finally;
 use ops::Drop;
@@ -271,6 +270,48 @@ impl<T> Drop for UnsafeAtomicRcBox<T>{
 
 /****************************************************************************/
 
+/**
+ * Enables a runtime assertion that no operation in the argument closure shall
+ * use scheduler operations (yield, recv, spawn, etc). This is for use with
+ * pthread mutexes, which may block the entire scheduler thread, rather than
+ * just one task, and is hence prone to deadlocks if mixed with yielding.
+ *
+ * NOTE: THIS DOES NOT PROVIDE LOCKING, or any sort of critical-section
+ * synchronization whatsoever. It only makes sense to use for CPU-local issues.
+ */
+// FIXME(#8140) should not be pub
+pub unsafe fn atomically<U>(f: &fn() -> U) -> U {
+    use rt::task::Task;
+    use task::rt;
+    use rt::local::Local;
+    use rt::{context, OldTaskContext, TaskContext};
+
+    match context() {
+        OldTaskContext => {
+            let t = rt::rust_get_task();
+            do (|| {
+                rt::rust_task_inhibit_kill(t);
+                rt::rust_task_inhibit_yield(t);
+                f()
+            }).finally {
+                rt::rust_task_allow_yield(t);
+                rt::rust_task_allow_kill(t);
+            }
+        }
+        TaskContext => {
+            let t = Local::unsafe_borrow::<Task>();
+            do (|| {
+                (*t).death.inhibit_yield();
+                f()
+            }).finally {
+                (*t).death.allow_yield();
+            }
+        }
+        // FIXME(#3095): As in unkillable().
+        _ => f()
+    }
+}
+
 #[allow(non_camel_case_types)] // runtime type
 type rust_little_lock = *libc::c_void;
 
@@ -395,11 +436,18 @@ mod tests {
     use cell::Cell;
     use comm;
     use option::*;
-    use super::{Exclusive, UnsafeAtomicRcBox};
+    use super::{Exclusive, UnsafeAtomicRcBox, atomically};
     use task;
     use uint;
     use util;
 
+    #[test]
+    fn test_atomically() {
+        // NB. The whole runtime will abort on an 'atomic-sleep' violation,
+        // so we can't really test for the converse behaviour.
+        unsafe { do atomically { } } task::yield(); // oughtn't fail
+    }
+
     #[test]
     fn exclusive_new_arc() {
         unsafe {