summaryrefslogtreecommitdiff
path: root/crates/windows-kernel-rs/src/sync
diff options
context:
space:
mode:
authorFuwn <[email protected]>2022-01-03 03:20:12 -0800
committerFuwn <[email protected]>2022-01-03 03:20:12 -0800
commit85db2b507f3f69b32811c54a89d9ac7bbbc46121 (patch)
tree2efd66da452f8a6a2cc6c91584c925f237506ddf /crates/windows-kernel-rs/src/sync
downloaddriver-85db2b507f3f69b32811c54a89d9ac7bbbc46121.tar.xz
driver-85db2b507f3f69b32811c54a89d9ac7bbbc46121.zip
feat(driver): commit primer
Diffstat (limited to 'crates/windows-kernel-rs/src/sync')
-rw-r--r--crates/windows-kernel-rs/src/sync/fast_mutex.rs137
-rw-r--r--crates/windows-kernel-rs/src/sync/mod.rs4
-rw-r--r--crates/windows-kernel-rs/src/sync/push_lock.rs185
3 files changed, 326 insertions, 0 deletions
diff --git a/crates/windows-kernel-rs/src/sync/fast_mutex.rs b/crates/windows-kernel-rs/src/sync/fast_mutex.rs
new file mode 100644
index 0000000..9a82524
--- /dev/null
+++ b/crates/windows-kernel-rs/src/sync/fast_mutex.rs
@@ -0,0 +1,137 @@
+use alloc::boxed::Box;
+use core::{
+ cell::UnsafeCell,
+ ops::{Deref, DerefMut},
+};
+
+use windows_kernel_sys::{
+ base::FAST_MUTEX,
+ ntoskrnl::{
+ ExAcquireFastMutex,
+ ExInitializeFastMutex,
+ ExReleaseFastMutex,
+ ExTryToAcquireFastMutex,
+ },
+};
+
+/// A mutual exclusion primitive useful for protecting shared data.
+///
+/// This mutex will block threads waiting for the lock to become available. The
+/// mutex can also be statically initialized or created via a [`new`]
+/// constructor. Each mutex has a type parameter which represents the data that
+/// it is protecting. The data can only be accessed through the RAII
+/// guards returned from [`lock`] and [`try_lock`], which guarantees that the
+/// data is only ever accessed when the mutex is locked.
+///
+/// [`new`]: FastMutex::new
+/// [`lock`]: FastMutex::lock
+/// [`try_lock`]: FastMutex::try_lock
+pub struct FastMutex<T: ?Sized> {
+ pub(crate) lock: Box<FAST_MUTEX>,
+ pub(crate) data: UnsafeCell<T>,
+}
+
+unsafe impl<T> Send for FastMutex<T> {}
+unsafe impl<T> Sync for FastMutex<T> {}
+
+impl<T> FastMutex<T> {
+ /// Creates a new mutex in an unlocked state ready for use.
+ pub fn new(data: T) -> Self {
+ let mut lock: Box<FAST_MUTEX> = Box::new(unsafe { core::mem::zeroed() });
+
+ unsafe { ExInitializeFastMutex(&mut *lock) };
+
+ Self {
+ lock,
+ data: UnsafeCell::new(data),
+ }
+ }
+
+ /// Consumes this `FastMutex`, returning the underlying data.
+ #[inline]
+ pub fn into_inner(self) -> T {
+ let Self {
+ data, ..
+ } = self;
+ data.into_inner()
+ }
+
+ /// Attempts to acquire this lock.
+ ///
+ /// If the lock could not be acquired at this time, then `None` is returned.
+ /// Otherwise, an RAII guard is returned. The lock will be unlocked when the
+ /// guard is dropped.
+ ///
+ /// This function does not block.
+ #[inline]
+ pub fn try_lock(&mut self) -> Option<FastMutexGuard<T>> {
+ let status = unsafe { ExTryToAcquireFastMutex(&mut *self.lock) } != 0;
+
+ match status {
+ true =>
+ Some(FastMutexGuard {
+ lock: &mut self.lock,
+ data: unsafe { &mut *self.data.get() },
+ }),
+ _ => None,
+ }
+ }
+
+ /// Acquires a mutex, blocking the current thread until it is able to do so.
+ ///
+ /// This function will block the local thread until it is available to acquire
+ /// the mutex. Upon returning, the thread is the only thread with the lock
+ /// held. An RAII guard is returned to allow scoped unlock of the lock. When
+ /// the guard goes out of scope, the mutex will be unlocked.
+ ///
+ /// The underlying function does not allow for recursion. If the thread
+ /// already holds the lock and tries to lock the mutex again, this function
+ /// will return `None` instead.
+ #[inline]
+ pub fn lock(&mut self) -> Option<FastMutexGuard<T>> {
+ unsafe { ExAcquireFastMutex(&mut *self.lock) };
+
+ Some(FastMutexGuard {
+ lock: &mut self.lock,
+ data: unsafe { &mut *self.data.get() },
+ })
+ }
+}
+
+impl<T: ?Sized + Default> Default for FastMutex<T> {
+ fn default() -> Self { Self::new(T::default()) }
+}
+
+impl<T> From<T> for FastMutex<T> {
+ fn from(data: T) -> Self { Self::new(data) }
+}
+
+/// An RAII implementation of a "scoped lock" of a mutex. When this structure is
+/// dropped (falls out of scope), the lock will be unlocked.
+///
+/// The data protected by the mutex can be accessed through this guard via its
+/// [`Deref`] and [`DerefMut`] implementations.
+///
+/// This structure is created by the [`lock`] and [`try_lock`] methods on
+/// [`FastMutex`].
+///
+/// [`lock`]: FastMutex::lock
+/// [`try_lock`]: FastMutex::try_lock
+pub struct FastMutexGuard<'a, T: 'a + ?Sized> {
+ pub(crate) lock: &'a mut FAST_MUTEX,
+ pub(crate) data: &'a mut T,
+}
+
+impl<'a, T: ?Sized> Drop for FastMutexGuard<'a, T> {
+ fn drop(&mut self) { unsafe { ExReleaseFastMutex(&mut *self.lock) }; }
+}
+
+impl<'a, T: ?Sized> Deref for FastMutexGuard<'a, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T { self.data }
+}
+
+impl<'a, T: ?Sized> DerefMut for FastMutexGuard<'a, T> {
+ fn deref_mut(&mut self) -> &mut T { self.data }
+}
diff --git a/crates/windows-kernel-rs/src/sync/mod.rs b/crates/windows-kernel-rs/src/sync/mod.rs
new file mode 100644
index 0000000..b024c55
--- /dev/null
+++ b/crates/windows-kernel-rs/src/sync/mod.rs
@@ -0,0 +1,4 @@
+pub mod fast_mutex;
+pub mod push_lock;
+
+pub use self::{fast_mutex::FastMutex as Mutex, push_lock::PushLock as RwLock};
diff --git a/crates/windows-kernel-rs/src/sync/push_lock.rs b/crates/windows-kernel-rs/src/sync/push_lock.rs
new file mode 100644
index 0000000..71b016a
--- /dev/null
+++ b/crates/windows-kernel-rs/src/sync/push_lock.rs
@@ -0,0 +1,185 @@
+use alloc::boxed::Box;
+use core::{
+ cell::UnsafeCell,
+ ops::{Deref, DerefMut},
+};
+
+use windows_kernel_sys::{
+ base::EX_PUSH_LOCK,
+ ntoskrnl::{
+ ExAcquirePushLockExclusive,
+ ExAcquirePushLockShared,
+ ExInitializePushLock,
+ ExReleasePushLockExclusive,
+ ExReleasePushLockShared,
+ KeEnterCriticalRegion,
+ KeLeaveCriticalRegion,
+ },
+};
+
+/// A [`PushLock`] is an efficient implementation of a reader-writer lock that
+/// can be stored both in paged and non-paged memory.
+///
+/// This type of lock allows a number of readers or at most one writer at any
+/// point in time. The write portion of this lock typically allows modifications
+/// of the underlying data (exclusive access) and the read portion of this lock
+/// typically allows for read-only access (shared access).
+///
+/// In comparison, a [`FastMutex`] does not distinguish between readers or
+/// writers that acquire the lock, therefore blocking any threads waiting for
+/// the lock to become available. A [`PushLock`] will allow any number of
+/// readers to acquire the lock as long as a writer is not holding the lock.
+///
+/// The priority policy is such that a thread trying to acquire the [`PushLock`]
+/// for exclusive access will be prioritized over threads trying to acquire the
+/// [`PushLock`] for shared access. More specifically, if a thread cannot lock
+/// the [`PushLock`] for exclusive access immediately, it will wait for the
+/// thread(s) that currently holds the lock to release the lock. If another
+/// thread tries to acquire the [`PushLock`] for shared access while a thread is
+/// waiting to acquire the lock for exclusive access, it will yield to the
+/// thread(s) trying to acquire the [`PushLock`] for exclusive access, even in
+/// the event that the [`PushLock`] is acquired for shared access.
+///
+/// [`FastMutex`]: crate::fast_mutex::FastMutex
+pub struct PushLock<T: ?Sized> {
+ pub(crate) lock: Box<EX_PUSH_LOCK>,
+ pub(crate) data: UnsafeCell<T>,
+}
+
+unsafe impl<T> Send for PushLock<T> {}
+unsafe impl<T> Sync for PushLock<T> {}
+
+impl<T> PushLock<T> {
+ /// Creates new instance of [`PushLock<T>`] that is unlocked.
+ pub fn new(data: T) -> Self {
+ let mut lock: Box<EX_PUSH_LOCK> = Box::new(0);
+
+ unsafe { ExInitializePushLock(&mut *lock) };
+
+ Self {
+ lock,
+ data: UnsafeCell::new(data),
+ }
+ }
+
+ /// Consumes this [`PushLock`], returning the underlying data.
+ #[inline]
+ pub fn into_inner(self) -> T {
+ let Self {
+ data, ..
+ } = self;
+ data.into_inner()
+ }
+
+ /// Locks this [`PushLock`] with shared read access, blocking the current
+ /// thread until it can be acquired.
+ ///
+ /// The calling thread will be blocked until there are no more writers which
+ /// hold the lock. There may be other readers currently inside the lock when
+ /// this method returns.
+ ///
+ /// This function will yield to threads waiting to acquire the [`PushLock`]
+ /// for exclusive access, even in the event that the [`PushLock`] is
+ /// currently held by one or more threads for shared access.
+ ///
+ /// While the underlying function does allow for recursion, this atomically
+ /// increments a shared reader counter. Since dropping the RAII guard
+ /// releases the lock by atomically decrementing this shared counter, it
+ /// will eventually reach zero once all RAII guards have been dropped.
+ #[inline]
+ pub fn read(&mut self) -> Option<PushLockReadGuard<T>> {
+ unsafe { KeEnterCriticalRegion() };
+
+ unsafe { ExAcquirePushLockShared(&mut *self.lock) };
+
+ Some(PushLockReadGuard {
+ lock: &mut self.lock,
+ data: unsafe { &mut *self.data.get() },
+ })
+ }
+
+ /// Locks this [`PushLock`] with exclusive write access, blocking the current
+ /// thread until it can be acquired.
+ ///
+ /// This function will not return while other writers or other readers
+ /// currently have access to the lock.
+ ///
+ /// Returns an RAII guard which will drop the write access of this
+ /// [`PushLock`] when dropped.
+ ///
+ /// This thread will take priority over any threads that are trying to acquire
+ /// the lock for shared access but that do not currently hold the lock for
+ /// shared access.
+ ///
+ /// The underlying function does not allow for recursion, which ensures
+ /// correct behavior.
+ #[inline]
+ pub fn write(&mut self) -> Option<PushLockWriteGuard<T>> {
+ unsafe { KeEnterCriticalRegion() };
+
+ unsafe { ExAcquirePushLockExclusive(&mut *self.lock) };
+
+ Some(PushLockWriteGuard {
+ lock: &mut self.lock,
+ data: unsafe { &mut *self.data.get() },
+ })
+ }
+}
+
+/// RAII structure used to release the shared read access of a lock when
+/// dropped.
+///
+/// This structure is created by the [`read`] and [`try_read`] methods on
+/// [`PushLock`]
+///
+/// [`read`]: PushLock::read
+/// [`try_read`]: PushLock::try_read
+pub struct PushLockReadGuard<'a, T: 'a + ?Sized> {
+ pub(crate) lock: &'a mut EX_PUSH_LOCK,
+ pub(crate) data: &'a T,
+}
+
+impl<'a, T: ?Sized> Drop for PushLockReadGuard<'a, T> {
+ fn drop(&mut self) {
+ unsafe { ExReleasePushLockShared(&mut *self.lock) };
+
+ unsafe { KeLeaveCriticalRegion() };
+ }
+}
+
+impl<'a, T: ?Sized> Deref for PushLockReadGuard<'a, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T { self.data }
+}
+
+/// RAII structure used to release the exclusive write access of a lock when
+/// dropped.
+///
+/// This structure is created by the [`write`] and [`try_write`] methods on
+/// [`PushLock`]
+///
+/// [`write`]: PushLock::write
+/// [`try_write`]: PushLock::try_write
+pub struct PushLockWriteGuard<'a, T: 'a + ?Sized> {
+ pub(crate) lock: &'a mut EX_PUSH_LOCK,
+ pub(crate) data: &'a mut T,
+}
+
+impl<'a, T: ?Sized> Drop for PushLockWriteGuard<'a, T> {
+ fn drop(&mut self) {
+ unsafe { ExReleasePushLockExclusive(&mut *self.lock) };
+
+ unsafe { KeLeaveCriticalRegion() };
+ }
+}
+
+impl<'a, T: ?Sized> Deref for PushLockWriteGuard<'a, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T { self.data }
+}
+
+impl<'a, T: ?Sized> DerefMut for PushLockWriteGuard<'a, T> {
+ fn deref_mut(&mut self) -> &mut T { self.data }
+}