1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
|
// Copyright Epic Games, Inc. All Rights Reserved.
#include <zencore/memory/mallocstomp.h>
#if ZEN_WITH_MALLOC_STOMP
# include <zencore/memory/align.h>
# include <zencore/xxhash.h>
# if ZEN_PLATFORM_LINUX
# include <sys/mman.h>
# endif
# if ZEN_PLATFORM_WINDOWS
# include <zencore/windows.h>
# endif
# if ZEN_PLATFORM_WINDOWS
// MallocStomp can keep virtual address range reserved after memory block is freed, while releasing the physical memory.
// This dramatically increases accuracy of use-after-free detection, but consumes significant amount of memory for the OS page table.
// Virtual memory limit for a process on Win10 is 128 TB, which means we can afford to keep virtual memory reserved for a very long time.
// Running Infiltrator demo consumes ~700MB of virtual address space per second.
# define MALLOC_STOMP_KEEP_VIRTUAL_MEMORY 1
# else
# define MALLOC_STOMP_KEEP_VIRTUAL_MEMORY 0
# endif
// 64-bit ABIs on x86_64 expect a 16-byte alignment
# define STOMPALIGNMENT 16U
namespace zen {
struct FMallocStomp::FAllocationData
{
/** Pointer to the full allocation. Needed so the OS knows what to free. */
void* FullAllocationPointer;
/** Full size of the allocation including the extra page. */
size_t FullSize;
/** Size of the allocation requested. */
size_t Size;
/** Sentinel used to check for underrun. */
size_t Sentinel;
/** Calculate the expected sentinel value for this allocation data. */
size_t CalculateSentinel() const
{
XXH3_128 Xxh = XXH3_128::HashMemory(this, offsetof(FAllocationData, Sentinel));
size_t Hash;
memcpy(&Hash, Xxh.Hash, sizeof(Hash));
return Hash;
}
};
FMallocStomp::FMallocStomp(const bool InUseUnderrunMode) : PageSize(4096 /* TODO: make dynamic */), bUseUnderrunMode(InUseUnderrunMode)
{
}
void*
FMallocStomp::Malloc(size_t Size, uint32_t Alignment)
{
void* Result = TryMalloc(Size, Alignment);
if (Result == nullptr)
{
OutOfMemory(Size, Alignment);
}
return Result;
}
void*
FMallocStomp::TryMalloc(size_t Size, uint32_t Alignment)
{
if (Size == 0U)
{
Size = 1U;
}
Alignment = Max<uint32_t>(Alignment, STOMPALIGNMENT);
constexpr static size_t AllocationDataSize = sizeof(FAllocationData);
const size_t AlignedSize = Alignment ? ((Size + Alignment - 1) & -(int32_t)Alignment) : Size;
const size_t AlignmentSize = Alignment > PageSize ? Alignment - PageSize : 0;
const size_t AllocFullPageSize = (AlignedSize + AlignmentSize + AllocationDataSize + PageSize - 1) & ~(PageSize - 1);
const size_t TotalAllocationSize = AllocFullPageSize + PageSize;
# if ZEN_PLATFORM_LINUX || ZEN_PLATFORM_MAC
void* FullAllocationPointer = mmap(nullptr, TotalAllocationSize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
# elif ZEN_PLATFORM_WINDOWS && MALLOC_STOMP_KEEP_VIRTUAL_MEMORY
// Allocate virtual address space from current block using linear allocation strategy.
// If there is not enough space, try to allocate new block from OS. Report OOM if block allocation fails.
void* FullAllocationPointer = nullptr;
{
RwLock::ExclusiveLockScope _(Lock);
if (VirtualAddressCursor + TotalAllocationSize <= VirtualAddressMax)
{
FullAllocationPointer = (void*)(VirtualAddressCursor);
}
else
{
const size_t ReserveSize = Max(VirtualAddressBlockSize, TotalAllocationSize);
// Reserve a new block of virtual address space that will be linearly sub-allocated
// We intentionally don't keep track of reserved blocks, as we never need to explicitly release them.
FullAllocationPointer = VirtualAlloc(nullptr, ReserveSize, MEM_RESERVE, PAGE_NOACCESS);
VirtualAddressCursor = uintptr_t(FullAllocationPointer);
VirtualAddressMax = VirtualAddressCursor + ReserveSize;
}
VirtualAddressCursor += TotalAllocationSize;
}
# else
void* FullAllocationPointer = FPlatformMemory::BinnedAllocFromOS(TotalAllocationSize);
# endif // PLATFORM_UNIX || PLATFORM_MAC
if (!FullAllocationPointer)
{
return nullptr;
}
void* ReturnedPointer = nullptr;
ZEN_ASSERT_SLOW(IsAligned(FullAllocationPointer, PageSize));
if (bUseUnderrunMode)
{
ReturnedPointer = Align((uint8_t*)FullAllocationPointer + PageSize + AllocationDataSize, Alignment);
void* AllocDataPointerStart = static_cast<FAllocationData*>(ReturnedPointer) - 1;
ZEN_ASSERT_SLOW(AllocDataPointerStart >= FullAllocationPointer);
# if ZEN_PLATFORM_WINDOWS && MALLOC_STOMP_KEEP_VIRTUAL_MEMORY
// Commit physical pages to the used range, leaving the first page unmapped.
void* CommittedMemory = VirtualAlloc(AllocDataPointerStart, AllocationDataSize + AlignedSize, MEM_COMMIT, PAGE_READWRITE);
if (!CommittedMemory)
{
// Failed to allocate and commit physical memory pages.
return nullptr;
}
ZEN_ASSERT(CommittedMemory == AlignDown(AllocDataPointerStart, PageSize));
# else
// Page protect the first page, this will cause the exception in case there is an underrun.
FPlatformMemory::PageProtect((uint8*)AlignDown(AllocDataPointerStart, PageSize) - PageSize, PageSize, false, false);
# endif
} //-V773
else
{
ReturnedPointer = AlignDown((uint8_t*)FullAllocationPointer + AllocFullPageSize - AlignedSize, Alignment);
void* ReturnedPointerEnd = (uint8_t*)ReturnedPointer + AlignedSize;
ZEN_ASSERT_SLOW(IsAligned(ReturnedPointerEnd, PageSize));
void* AllocDataPointerStart = static_cast<FAllocationData*>(ReturnedPointer) - 1;
ZEN_ASSERT_SLOW(AllocDataPointerStart >= FullAllocationPointer);
# if ZEN_PLATFORM_WINDOWS && MALLOC_STOMP_KEEP_VIRTUAL_MEMORY
// Commit physical pages to the used range, leaving the last page unmapped.
void* CommitPointerStart = AlignDown(AllocDataPointerStart, PageSize);
void* CommittedMemory = VirtualAlloc(CommitPointerStart,
size_t((uint8_t*)ReturnedPointerEnd - (uint8_t*)CommitPointerStart),
MEM_COMMIT,
PAGE_READWRITE);
if (!CommittedMemory)
{
// Failed to allocate and commit physical memory pages.
return nullptr;
}
ZEN_ASSERT(CommittedMemory == CommitPointerStart);
# else
// Page protect the last page, this will cause the exception in case there is an overrun.
FPlatformMemory::PageProtect(ReturnedPointerEnd, PageSize, false, false);
# endif
} //-V773
ZEN_ASSERT_SLOW(IsAligned(FullAllocationPointer, PageSize));
ZEN_ASSERT_SLOW(IsAligned(TotalAllocationSize, PageSize));
ZEN_ASSERT_SLOW(IsAligned(ReturnedPointer, Alignment));
ZEN_ASSERT_SLOW((uint8_t*)ReturnedPointer + AlignedSize <= (uint8_t*)FullAllocationPointer + TotalAllocationSize);
FAllocationData& AllocationData = static_cast<FAllocationData*>(ReturnedPointer)[-1];
AllocationData = {FullAllocationPointer, TotalAllocationSize, AlignedSize, 0};
AllocationData.Sentinel = AllocationData.CalculateSentinel();
return ReturnedPointer;
}
void*
FMallocStomp::Realloc(void* InPtr, size_t NewSize, uint32_t Alignment)
{
void* Result = TryRealloc(InPtr, NewSize, Alignment);
if (Result == nullptr && NewSize)
{
OutOfMemory(NewSize, Alignment);
}
return Result;
}
void*
FMallocStomp::TryRealloc(void* InPtr, size_t NewSize, uint32_t Alignment)
{
if (NewSize == 0U)
{
Free(InPtr);
return nullptr;
}
void* ReturnPtr = nullptr;
if (InPtr != nullptr)
{
ReturnPtr = TryMalloc(NewSize, Alignment);
if (ReturnPtr != nullptr)
{
FAllocationData* AllocDataPtr = reinterpret_cast<FAllocationData*>(reinterpret_cast<uint8_t*>(InPtr) - sizeof(FAllocationData));
memcpy(ReturnPtr, InPtr, Min(AllocDataPtr->Size, NewSize));
Free(InPtr);
}
}
else
{
ReturnPtr = TryMalloc(NewSize, Alignment);
}
return ReturnPtr;
}
void
FMallocStomp::Free(void* InPtr)
{
if (InPtr == nullptr)
{
return;
}
FAllocationData* AllocDataPtr = reinterpret_cast<FAllocationData*>(InPtr);
AllocDataPtr--;
// Check the sentinel to verify that the allocation data is intact.
if (AllocDataPtr->Sentinel != AllocDataPtr->CalculateSentinel())
{
// There was a memory underrun related to this allocation.
ZEN_DEBUG_BREAK();
}
# if ZEN_PLATFORM_LINUX || ZEN_PLATFORM_MAC
munmap(AllocDataPtr->FullAllocationPointer, AllocDataPtr->FullSize);
# elif ZEN_PLATFORM_WINDOWS && MALLOC_STOMP_KEEP_VIRTUAL_MEMORY
// Unmap physical memory, but keep virtual address range reserved to catch use-after-free errors.
VirtualFree(AllocDataPtr->FullAllocationPointer, AllocDataPtr->FullSize, MEM_DECOMMIT);
# else
FPlatformMemory::BinnedFreeToOS(AllocDataPtr->FullAllocationPointer, AllocDataPtr->FullSize);
# endif // PLATFORM_UNIX || PLATFORM_MAC
}
bool
FMallocStomp::GetAllocationSize(void* Original, size_t& SizeOut)
{
if (Original == nullptr)
{
SizeOut = 0U;
}
else
{
FAllocationData* AllocDataPtr = reinterpret_cast<FAllocationData*>(Original);
AllocDataPtr--;
SizeOut = AllocDataPtr->Size;
}
return true;
}
} // namespace zen
#endif // WITH_MALLOC_STOMP
|