aboutsummaryrefslogtreecommitdiff
path: root/ctr-std/src/sys/unix/rwlock.rs
diff options
context:
space:
mode:
Diffstat (limited to 'ctr-std/src/sys/unix/rwlock.rs')
-rw-r--r--ctr-std/src/sys/unix/rwlock.rs173
1 files changed, 94 insertions, 79 deletions
diff --git a/ctr-std/src/sys/unix/rwlock.rs b/ctr-std/src/sys/unix/rwlock.rs
index 4a802e9..c754d5b 100644
--- a/ctr-std/src/sys/unix/rwlock.rs
+++ b/ctr-std/src/sys/unix/rwlock.rs
@@ -1,4 +1,4 @@
-// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
@@ -8,16 +8,14 @@
// option. This file may not be copied, modified, or distributed
// except according to those terms.
+use libc;
use cell::UnsafeCell;
-use super::mutex::Mutex;
-use super::condvar::Condvar;
+use sync::atomic::{AtomicUsize, Ordering};
-// A simple read-preferring RWLock implementation that I found on wikipedia <.<
pub struct RWLock {
- mutex: Mutex,
- cvar: Condvar,
- reader_count: UnsafeCell<u32>,
- writer_active: UnsafeCell<bool>,
+ inner: UnsafeCell<libc::pthread_rwlock_t>,
+ write_locked: UnsafeCell<bool>,
+ num_readers: AtomicUsize,
}
unsafe impl Send for RWLock {}
@@ -26,102 +24,119 @@ unsafe impl Sync for RWLock {}
impl RWLock {
pub const fn new() -> RWLock {
RWLock {
- mutex: Mutex::new(),
- cvar: Condvar::new(),
- reader_count: UnsafeCell::new(0),
- writer_active: UnsafeCell::new(false),
+ inner: UnsafeCell::new(libc::PTHREAD_RWLOCK_INITIALIZER),
+ write_locked: UnsafeCell::new(false),
+ num_readers: AtomicUsize::new(0),
}
}
-
#[inline]
pub unsafe fn read(&self) {
- self.mutex.lock();
-
- while *self.writer_active.get() {
- self.cvar.wait(&self.mutex);
+ let r = libc::pthread_rwlock_rdlock(self.inner.get());
+
+ // According to the pthread_rwlock_rdlock spec, this function **may**
+ // fail with EDEADLK if a deadlock is detected. On the other hand
+ // pthread mutexes will *never* return EDEADLK if they are initialized
+ // as the "fast" kind (which ours always are). As a result, a deadlock
+ // situation may actually return from the call to pthread_rwlock_rdlock
+ // instead of blocking forever (as mutexes and Windows rwlocks do). Note
+ // that not all unix implementations, however, will return EDEADLK for
+ // their rwlocks.
+ //
+ // We roughly maintain the deadlocking behavior by panicking to ensure
+ // that this lock acquisition does not succeed.
+ //
+ // We also check whether this lock is already write locked. This
+ // is only possible if it was write locked by the current thread and
+ // the implementation allows recursive locking. The POSIX standard
+ // doesn't require recursively locking a rwlock to deadlock, but we can't
+ // allow that because it could lead to aliasing issues.
+ if r == libc::EAGAIN {
+ panic!("rwlock maximum reader count exceeded");
+ } else if r == libc::EDEADLK || *self.write_locked.get() {
+ if r == 0 {
+ self.raw_unlock();
+ }
+ panic!("rwlock read lock would result in deadlock");
+ } else {
+ debug_assert_eq!(r, 0);
+ self.num_readers.fetch_add(1, Ordering::Relaxed);
}
-
- assert!(*self.reader_count.get() != u32::max_value());
- *self.reader_count.get() += 1;
-
- self.mutex.unlock();
}
-
#[inline]
pub unsafe fn try_read(&self) -> bool {
- if !self.mutex.try_lock() {
- return false
- }
-
- while *self.writer_active.get() {
- self.cvar.wait(&self.mutex);
+ let r = libc::pthread_rwlock_tryrdlock(self.inner.get());
+ if r == 0 {
+ if *self.write_locked.get() {
+ self.raw_unlock();
+ false
+ } else {
+ self.num_readers.fetch_add(1, Ordering::Relaxed);
+ true
+ }
+ } else {
+ false
}
-
- assert!(*self.reader_count.get() != u32::max_value());
- *self.reader_count.get() += 1;
-
- self.mutex.unlock();
- true
}
-
#[inline]
pub unsafe fn write(&self) {
- self.mutex.lock();
-
- while *self.writer_active.get() || *self.reader_count.get() > 0 {
- self.cvar.wait(&self.mutex);
+ let r = libc::pthread_rwlock_wrlock(self.inner.get());
+ // See comments above for why we check for EDEADLK and write_locked. We
+ // also need to check that num_readers is 0.
+ if r == libc::EDEADLK || *self.write_locked.get() ||
+ self.num_readers.load(Ordering::Relaxed) != 0 {
+ if r == 0 {
+ self.raw_unlock();
+ }
+ panic!("rwlock write lock would result in deadlock");
+ } else {
+ debug_assert_eq!(r, 0);
}
-
- *self.writer_active.get() = true;
-
- self.mutex.unlock();
+ *self.write_locked.get() = true;
}
-
#[inline]
pub unsafe fn try_write(&self) -> bool {
- if !self.mutex.try_lock() {
- return false;
- }
-
- while *self.writer_active.get() || *self.reader_count.get() > 0 {
- self.cvar.wait(&self.mutex);
+ let r = libc::pthread_rwlock_trywrlock(self.inner.get());
+ if r == 0 {
+ if *self.write_locked.get() || self.num_readers.load(Ordering::Relaxed) != 0 {
+ self.raw_unlock();
+ false
+ } else {
+ *self.write_locked.get() = true;
+ true
+ }
+ } else {
+ false
}
-
- *self.writer_active.get() = true;
-
- self.mutex.unlock();
- true
}
-
+ #[inline]
+ unsafe fn raw_unlock(&self) {
+ let r = libc::pthread_rwlock_unlock(self.inner.get());
+ debug_assert_eq!(r, 0);
+ }
#[inline]
pub unsafe fn read_unlock(&self) {
- self.mutex.lock();
-
- *self.reader_count.get() -= 1;
-
- if *self.reader_count.get() == 0 {
- self.cvar.notify_one()
- }
-
- self.mutex.unlock();
+ debug_assert!(!*self.write_locked.get());
+ self.num_readers.fetch_sub(1, Ordering::Relaxed);
+ self.raw_unlock();
}
-
#[inline]
pub unsafe fn write_unlock(&self) {
- self.mutex.lock();
-
- *self.writer_active.get() = false;
-
- self.cvar.notify_all();
-
- self.mutex.unlock();
+ debug_assert_eq!(self.num_readers.load(Ordering::Relaxed), 0);
+ debug_assert!(*self.write_locked.get());
+ *self.write_locked.get() = false;
+ self.raw_unlock();
}
-
#[inline]
pub unsafe fn destroy(&self) {
- self.mutex.destroy();
- self.cvar.destroy();
- *self.reader_count.get() = 0;
- *self.writer_active.get() = false;
+ let r = libc::pthread_rwlock_destroy(self.inner.get());
+ // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
+ // rwlock that was just initialized with
+ // libc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
+ // or pthread_rwlock_init() is called, this behaviour no longer occurs.
+ if cfg!(target_os = "dragonfly") {
+ debug_assert!(r == 0 || r == libc::EINVAL);
+ } else {
+ debug_assert_eq!(r, 0);
+ }
}
}