aboutsummaryrefslogtreecommitdiff
path: root/src/zencore/memory/mallocstomp.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/zencore/memory/mallocstomp.cpp')
-rw-r--r--src/zencore/memory/mallocstomp.cpp283
1 files changed, 283 insertions, 0 deletions
diff --git a/src/zencore/memory/mallocstomp.cpp b/src/zencore/memory/mallocstomp.cpp
new file mode 100644
index 000000000..db9e1535e
--- /dev/null
+++ b/src/zencore/memory/mallocstomp.cpp
@@ -0,0 +1,283 @@
+// Copyright Epic Games, Inc. All Rights Reserved.
+
+#include <zencore/memory/mallocstomp.h>
+
+#if ZEN_WITH_MALLOC_STOMP
+
+# include <zencore/memory/align.h>
+# include <zencore/xxhash.h>
+
+# if ZEN_PLATFORM_LINUX
+# include <sys/mman.h>
+# endif
+
+# if ZEN_PLATFORM_WINDOWS
+# include <zencore/windows.h>
+# endif
+
+# if ZEN_PLATFORM_WINDOWS
+// MallocStomp can keep virtual address range reserved after memory block is freed, while releasing the physical memory.
+// This dramatically increases accuracy of use-after-free detection, but consumes significant amount of memory for the OS page table.
+// Virtual memory limit for a process on Win10 is 128 TB, which means we can afford to keep virtual memory reserved for a very long time.
+// Running Infiltrator demo consumes ~700MB of virtual address space per second.
+# define MALLOC_STOMP_KEEP_VIRTUAL_MEMORY 1
+# else
+# define MALLOC_STOMP_KEEP_VIRTUAL_MEMORY 0
+# endif
+
+// 64-bit ABIs on x86_64 expect a 16-byte alignment
+# define STOMPALIGNMENT 16U
+
+namespace zen {
+
+struct FMallocStomp::FAllocationData
+{
+ /** Pointer to the full allocation. Needed so the OS knows what to free. */
+ void* FullAllocationPointer;
+ /** Full size of the allocation including the extra page. */
+ size_t FullSize;
+ /** Size of the allocation requested. */
+ size_t Size;
+ /** Sentinel used to check for underrun. */
+ size_t Sentinel;
+
+ /** Calculate the expected sentinel value for this allocation data. */
+ size_t CalculateSentinel() const
+ {
+ XXH3_128 Xxh = XXH3_128::HashMemory(this, offsetof(FAllocationData, Sentinel));
+
+ size_t Hash;
+ memcpy(&Hash, Xxh.Hash, sizeof(Hash));
+
+ return Hash;
+ }
+};
+
+FMallocStomp::FMallocStomp(const bool InUseUnderrunMode) : PageSize(4096 /* TODO: make dynamic */), bUseUnderrunMode(InUseUnderrunMode)
+{
+}
+
+void*
+FMallocStomp::Malloc(size_t Size, uint32_t Alignment)
+{
+ void* Result = TryMalloc(Size, Alignment);
+
+ if (Result == nullptr)
+ {
+ OutOfMemory(Size, Alignment);
+ }
+
+ return Result;
+}
+
+void*
+FMallocStomp::TryMalloc(size_t Size, uint32_t Alignment)
+{
+ if (Size == 0U)
+ {
+ Size = 1U;
+ }
+
+ Alignment = Max<uint32_t>(Alignment, STOMPALIGNMENT);
+
+ constexpr static size_t AllocationDataSize = sizeof(FAllocationData);
+
+ const size_t AlignedSize = Alignment ? ((Size + Alignment - 1) & -(int32_t)Alignment) : Size;
+ const size_t AlignmentSize = Alignment > PageSize ? Alignment - PageSize : 0;
+ const size_t AllocFullPageSize = (AlignedSize + AlignmentSize + AllocationDataSize + PageSize - 1) & ~(PageSize - 1);
+ const size_t TotalAllocationSize = AllocFullPageSize + PageSize;
+
+# if ZEN_PLATFORM_LINUX || ZEN_PLATFORM_MAC
+ void* FullAllocationPointer = mmap(nullptr, TotalAllocationSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
+# elif ZEN_PLATFORM_WINDOWS && MALLOC_STOMP_KEEP_VIRTUAL_MEMORY
+ // Allocate virtual address space from current block using linear allocation strategy.
+ // If there is not enough space, try to allocate new block from OS. Report OOM if block allocation fails.
+ void* FullAllocationPointer = nullptr;
+
+ {
+ RwLock::ExclusiveLockScope _(Lock);
+
+ if (VirtualAddressCursor + TotalAllocationSize <= VirtualAddressMax)
+ {
+ FullAllocationPointer = (void*)(VirtualAddressCursor);
+ }
+ else
+ {
+ const size_t ReserveSize = Max(VirtualAddressBlockSize, TotalAllocationSize);
+
+ // Reserve a new block of virtual address space that will be linearly sub-allocated
+ // We intentionally don't keep track of reserved blocks, as we never need to explicitly release them.
+ FullAllocationPointer = VirtualAlloc(nullptr, ReserveSize, MEM_RESERVE, PAGE_NOACCESS);
+
+ VirtualAddressCursor = uintptr_t(FullAllocationPointer);
+ VirtualAddressMax = VirtualAddressCursor + ReserveSize;
+ }
+
+ VirtualAddressCursor += TotalAllocationSize;
+ }
+# else
+ void* FullAllocationPointer = FPlatformMemory::BinnedAllocFromOS(TotalAllocationSize);
+# endif // PLATFORM_UNIX || PLATFORM_MAC
+
+ if (!FullAllocationPointer)
+ {
+ return nullptr;
+ }
+
+ void* ReturnedPointer = nullptr;
+
+ ZEN_ASSERT_SLOW(IsAligned(FullAllocationPointer, PageSize));
+
+ if (bUseUnderrunMode)
+ {
+ ReturnedPointer = Align((uint8_t*)FullAllocationPointer + PageSize + AllocationDataSize, Alignment);
+ void* AllocDataPointerStart = static_cast<FAllocationData*>(ReturnedPointer) - 1;
+ ZEN_ASSERT_SLOW(AllocDataPointerStart >= FullAllocationPointer);
+
+# if ZEN_PLATFORM_WINDOWS && MALLOC_STOMP_KEEP_VIRTUAL_MEMORY
+ // Commit physical pages to the used range, leaving the first page unmapped.
+ void* CommittedMemory = VirtualAlloc(AllocDataPointerStart, AllocationDataSize + AlignedSize, MEM_COMMIT, PAGE_READWRITE);
+ if (!CommittedMemory)
+ {
+ // Failed to allocate and commit physical memory pages.
+ return nullptr;
+ }
+ ZEN_ASSERT(CommittedMemory == AlignDown(AllocDataPointerStart, PageSize));
+# else
+ // Page protect the first page, this will cause the exception in case there is an underrun.
+ FPlatformMemory::PageProtect((uint8*)AlignDown(AllocDataPointerStart, PageSize) - PageSize, PageSize, false, false);
+# endif
+ } //-V773
+ else
+ {
+ ReturnedPointer = AlignDown((uint8_t*)FullAllocationPointer + AllocFullPageSize - AlignedSize, Alignment);
+ void* ReturnedPointerEnd = (uint8_t*)ReturnedPointer + AlignedSize;
+ ZEN_ASSERT_SLOW(IsAligned(ReturnedPointerEnd, PageSize));
+
+ void* AllocDataPointerStart = static_cast<FAllocationData*>(ReturnedPointer) - 1;
+ ZEN_ASSERT_SLOW(AllocDataPointerStart >= FullAllocationPointer);
+
+# if ZEN_PLATFORM_WINDOWS && MALLOC_STOMP_KEEP_VIRTUAL_MEMORY
+ // Commit physical pages to the used range, leaving the last page unmapped.
+ void* CommitPointerStart = AlignDown(AllocDataPointerStart, PageSize);
+ void* CommittedMemory = VirtualAlloc(CommitPointerStart,
+ size_t((uint8_t*)ReturnedPointerEnd - (uint8_t*)CommitPointerStart),
+ MEM_COMMIT,
+ PAGE_READWRITE);
+ if (!CommittedMemory)
+ {
+ // Failed to allocate and commit physical memory pages.
+ return nullptr;
+ }
+ ZEN_ASSERT(CommittedMemory == CommitPointerStart);
+# else
+ // Page protect the last page, this will cause the exception in case there is an overrun.
+ FPlatformMemory::PageProtect(ReturnedPointerEnd, PageSize, false, false);
+# endif
+ } //-V773
+
+ ZEN_ASSERT_SLOW(IsAligned(FullAllocationPointer, PageSize));
+ ZEN_ASSERT_SLOW(IsAligned(TotalAllocationSize, PageSize));
+ ZEN_ASSERT_SLOW(IsAligned(ReturnedPointer, Alignment));
+ ZEN_ASSERT_SLOW((uint8_t*)ReturnedPointer + AlignedSize <= (uint8_t*)FullAllocationPointer + TotalAllocationSize);
+
+ FAllocationData& AllocationData = static_cast<FAllocationData*>(ReturnedPointer)[-1];
+ AllocationData = {FullAllocationPointer, TotalAllocationSize, AlignedSize, 0};
+ AllocationData.Sentinel = AllocationData.CalculateSentinel();
+
+ return ReturnedPointer;
+}
+
+void*
+FMallocStomp::Realloc(void* InPtr, size_t NewSize, uint32_t Alignment)
+{
+ void* Result = TryRealloc(InPtr, NewSize, Alignment);
+
+ if (Result == nullptr && NewSize)
+ {
+ OutOfMemory(NewSize, Alignment);
+ }
+
+ return Result;
+}
+
+void*
+FMallocStomp::TryRealloc(void* InPtr, size_t NewSize, uint32_t Alignment)
+{
+ if (NewSize == 0U)
+ {
+ Free(InPtr);
+ return nullptr;
+ }
+
+ void* ReturnPtr = nullptr;
+
+ if (InPtr != nullptr)
+ {
+ ReturnPtr = TryMalloc(NewSize, Alignment);
+
+ if (ReturnPtr != nullptr)
+ {
+ FAllocationData* AllocDataPtr = reinterpret_cast<FAllocationData*>(reinterpret_cast<uint8_t*>(InPtr) - sizeof(FAllocationData));
+ memcpy(ReturnPtr, InPtr, Min(AllocDataPtr->Size, NewSize));
+ Free(InPtr);
+ }
+ }
+ else
+ {
+ ReturnPtr = TryMalloc(NewSize, Alignment);
+ }
+
+ return ReturnPtr;
+}
+
+void
+FMallocStomp::Free(void* InPtr)
+{
+ if (InPtr == nullptr)
+ {
+ return;
+ }
+
+ FAllocationData* AllocDataPtr = reinterpret_cast<FAllocationData*>(InPtr);
+ AllocDataPtr--;
+
+ // Check the sentinel to verify that the allocation data is intact.
+ if (AllocDataPtr->Sentinel != AllocDataPtr->CalculateSentinel())
+ {
+ // There was a memory underrun related to this allocation.
+ ZEN_DEBUG_BREAK();
+ }
+
+# if ZEN_PLATFORM_LINUX || ZEN_PLATFORM_MAC
+ munmap(AllocDataPtr->FullAllocationPointer, AllocDataPtr->FullSize);
+# elif ZEN_PLATFORM_WINDOWS && MALLOC_STOMP_KEEP_VIRTUAL_MEMORY
+ // Unmap physical memory, but keep virtual address range reserved to catch use-after-free errors.
+
+ VirtualFree(AllocDataPtr->FullAllocationPointer, AllocDataPtr->FullSize, MEM_DECOMMIT);
+
+# else
+ FPlatformMemory::BinnedFreeToOS(AllocDataPtr->FullAllocationPointer, AllocDataPtr->FullSize);
+# endif // PLATFORM_UNIX || PLATFORM_MAC
+}
+
+bool
+FMallocStomp::GetAllocationSize(void* Original, size_t& SizeOut)
+{
+ if (Original == nullptr)
+ {
+ SizeOut = 0U;
+ }
+ else
+ {
+ FAllocationData* AllocDataPtr = reinterpret_cast<FAllocationData*>(Original);
+ AllocDataPtr--;
+ SizeOut = AllocDataPtr->Size;
+ }
+
+ return true;
+}
+
+} // namespace zen
+
+#endif // WITH_MALLOC_STOMP