summaryrefslogtreecommitdiff
path: root/crates/windows-kernel-rs/src/mdl.rs
diff options
context:
space:
mode:
authorFuwn <[email protected]>2022-01-03 03:20:12 -0800
committerFuwn <[email protected]>2022-01-03 03:20:12 -0800
commit85db2b507f3f69b32811c54a89d9ac7bbbc46121 (patch)
tree2efd66da452f8a6a2cc6c91584c925f237506ddf /crates/windows-kernel-rs/src/mdl.rs
downloaddriver-85db2b507f3f69b32811c54a89d9ac7bbbc46121.tar.xz
driver-85db2b507f3f69b32811c54a89d9ac7bbbc46121.zip
feat(driver): commit primer
Diffstat (limited to 'crates/windows-kernel-rs/src/mdl.rs')
-rw-r--r--crates/windows-kernel-rs/src/mdl.rs117
1 files changed, 117 insertions, 0 deletions
diff --git a/crates/windows-kernel-rs/src/mdl.rs b/crates/windows-kernel-rs/src/mdl.rs
new file mode 100644
index 0000000..7d002f0
--- /dev/null
+++ b/crates/windows-kernel-rs/src/mdl.rs
@@ -0,0 +1,117 @@
+use crate::{error::Error, memory::MemoryCaching};
+
+#[repr(i32)]
+#[derive(Clone, Copy, Debug, PartialEq, Eq)]
+pub enum AccessMode {
+ KernelMode = windows_kernel_sys::base::_MODE::KernelMode,
+ UserMode = windows_kernel_sys::base::_MODE::UserMode,
+}
+
+pub struct MemoryDescriptorList {
+ raw: *mut windows_kernel_sys::base::MDL,
+}
+
+unsafe impl Send for MemoryDescriptorList {}
+unsafe impl Sync for MemoryDescriptorList {}
+
+impl MemoryDescriptorList {
+ pub fn new(addr: *mut core::ffi::c_void, size: usize) -> Result<Self, Error> {
+ use windows_kernel_sys::ntoskrnl::IoAllocateMdl;
+
+ let raw = unsafe {
+ IoAllocateMdl(
+ addr,
+ size as _,
+ false as _,
+ false as _,
+ core::ptr::null_mut(),
+ )
+ };
+
+ if raw.is_null() {
+ return Err(Error::INSUFFICIENT_RESOURCES);
+ }
+
+ Ok(Self {
+ raw,
+ })
+ }
+
+ pub fn build_for_non_paged_pool(&mut self) {
+ use windows_kernel_sys::ntoskrnl::MmBuildMdlForNonPagedPool;
+
+ unsafe {
+ MmBuildMdlForNonPagedPool(self.raw);
+ }
+ }
+
+ pub fn map_locked_pages(
+ self,
+ access: AccessMode,
+ caching: MemoryCaching,
+ desired_addr: Option<*mut core::ffi::c_void>,
+ ) -> Result<LockedMapping, Error> {
+ use windows_kernel_sys::ntoskrnl::MmMapLockedPagesSpecifyCache;
+
+ let ptr = unsafe {
+ MmMapLockedPagesSpecifyCache(
+ self.raw,
+ access as _,
+ caching as _,
+ desired_addr.unwrap_or(core::ptr::null_mut()),
+ false as _,
+ 0,
+ )
+ };
+
+ Ok(LockedMapping {
+ raw: self.raw,
+ ptr,
+ })
+ }
+}
+
+impl Drop for MemoryDescriptorList {
+ fn drop(&mut self) {
+ use windows_kernel_sys::ntoskrnl::IoFreeMdl;
+
+ unsafe {
+ IoFreeMdl(self.raw);
+ }
+ }
+}
+
+pub struct LockedMapping {
+ raw: *mut windows_kernel_sys::base::MDL,
+ ptr: *mut core::ffi::c_void,
+}
+
+unsafe impl Send for LockedMapping {}
+unsafe impl Sync for LockedMapping {}
+
+impl LockedMapping {
+ pub fn ptr(&self) -> *mut core::ffi::c_void { self.ptr }
+
+ pub fn unlock(self) -> MemoryDescriptorList {
+ use windows_kernel_sys::ntoskrnl::MmUnmapLockedPages;
+
+ unsafe {
+ MmUnmapLockedPages(self.ptr, self.raw);
+ }
+
+ MemoryDescriptorList {
+ raw: self.raw
+ }
+ }
+}
+
+impl Drop for LockedMapping {
+ fn drop(&mut self) {
+ use windows_kernel_sys::ntoskrnl::{IoFreeMdl, MmUnmapLockedPages};
+
+ unsafe {
+ MmUnmapLockedPages(self.ptr, self.raw);
+ IoFreeMdl(self.raw);
+ }
+ }
+}