aboutsummaryrefslogtreecommitdiff
path: root/ctr-std/src/sync/once.rs
diff options
context:
space:
mode:
authorFenrirWolf <[email protected]>2018-08-19 18:01:18 -0600
committerGitHub <[email protected]>2018-08-19 18:01:18 -0600
commit15cb3c1e91842a68a8e50e1e1a42aefab13cc25e (patch)
treea514fde042ff2a504a03305bfe0894ff8cd8d47e /ctr-std/src/sync/once.rs
parentUpdate for latest nightly 2018-06-09 (#70) (diff)
parentUpdate for nightly-2018-08-18 (diff)
downloadctru-rs-15cb3c1e91842a68a8e50e1e1a42aefab13cc25e.tar.xz
ctru-rs-15cb3c1e91842a68a8e50e1e1a42aefab13cc25e.zip
Merge pull request #73 from FenrirWolf/update-2018-08-18
Update for nightly-2018-08-18
Diffstat (limited to 'ctr-std/src/sync/once.rs')
-rw-r--r--ctr-std/src/sync/once.rs40
1 files changed, 25 insertions, 15 deletions
diff --git a/ctr-std/src/sync/once.rs b/ctr-std/src/sync/once.rs
index 7eb7be2..f6cb8be 100644
--- a/ctr-std/src/sync/once.rs
+++ b/ctr-std/src/sync/once.rs
@@ -31,12 +31,10 @@
// initialization closure panics, the Once enters a "poisoned" state which means
// that all future calls will immediately panic as well.
//
-// So to implement this, one might first reach for a `StaticMutex`, but those
-// unfortunately need to be deallocated (e.g. call `destroy()`) to free memory
-// on all OSes (some of the BSDs allocate memory for mutexes). It also gets a
-// lot harder with poisoning to figure out when the mutex needs to be
-// deallocated because it's not after the closure finishes, but after the first
-// successful closure finishes.
+// So to implement this, one might first reach for a `Mutex`, but those cannot
+// be put into a `static`. It also gets a lot harder with poisoning to figure
+// out when the mutex needs to be deallocated because it's not after the closure
+// finishes, but after the first successful closure finishes.
//
// All in all, this is instead implemented with atomics and lock-free
// operations! Whee! Each `Once` has one word of atomic state, and this state is
@@ -149,9 +147,9 @@ struct Waiter {
// Helper struct used to clean up after a closure call with a `Drop`
// implementation to also run on panic.
-struct Finish {
+struct Finish<'a> {
panicked: bool,
- me: &'static Once,
+ me: &'a Once,
}
impl Once {
@@ -178,6 +176,10 @@ impl Once {
/// happens-before relation between the closure and code executing after the
/// return).
///
+ /// If the given closure recusively invokes `call_once` on the same `Once`
+ /// instance the exact behavior is not specified, allowed outcomes are
+ /// a panic or a deadlock.
+ ///
/// # Examples
///
/// ```
@@ -218,9 +220,13 @@ impl Once {
///
/// [poison]: struct.Mutex.html#poisoning
#[stable(feature = "rust1", since = "1.0.0")]
- pub fn call_once<F>(&'static self, f: F) where F: FnOnce() {
+ pub fn call_once<F>(&self, f: F) where F: FnOnce() {
// Fast path, just see if we've completed initialization.
- if self.state.load(Ordering::SeqCst) == COMPLETE {
+ // An `Acquire` load is enough because that makes all the initialization
+ // operations visible to us. The cold path uses SeqCst consistently
+ // because the performance difference really does not matter there,
+ // and SeqCst minimizes the chances of something going wrong.
+ if self.state.load(Ordering::Acquire) == COMPLETE {
return
}
@@ -275,9 +281,13 @@ impl Once {
/// INIT.call_once(|| {});
/// ```
#[unstable(feature = "once_poison", issue = "33577")]
- pub fn call_once_force<F>(&'static self, f: F) where F: FnOnce(&OnceState) {
+ pub fn call_once_force<F>(&self, f: F) where F: FnOnce(&OnceState) {
// same as above, just with a different parameter to `call_inner`.
- if self.state.load(Ordering::SeqCst) == COMPLETE {
+ // An `Acquire` load is enough because that makes all the initialization
+ // operations visible to us. The cold path uses SeqCst consistently
+ // because the performance difference really does not matter there,
+ // and SeqCst minimizes the chances of something going wrong.
+ if self.state.load(Ordering::Acquire) == COMPLETE {
return
}
@@ -299,9 +309,9 @@ impl Once {
// currently no way to take an `FnOnce` and call it via virtual dispatch
// without some allocation overhead.
#[cold]
- fn call_inner(&'static self,
+ fn call_inner(&self,
ignore_poisoning: bool,
- init: &mut FnMut(bool)) {
+ init: &mut dyn FnMut(bool)) {
let mut state = self.state.load(Ordering::SeqCst);
'outer: loop {
@@ -390,7 +400,7 @@ impl fmt::Debug for Once {
}
}
-impl Drop for Finish {
+impl<'a> Drop for Finish<'a> {
fn drop(&mut self) {
// Swap out our state with however we finished. We should only ever see
// an old state which was RUNNING.