aboutsummaryrefslogtreecommitdiff
path: root/src/zencore/memory.cpp
diff options
context:
space:
mode:
authorStefan Boberg <[email protected]>2023-05-02 10:01:47 +0200
committerGitHub <[email protected]>2023-05-02 10:01:47 +0200
commit075d17f8ada47e990fe94606c3d21df409223465 (patch)
treee50549b766a2f3c354798a54ff73404217b4c9af /src/zencore/memory.cpp
parentfix: bundle shouldn't append content zip to zen (diff)
downloadzen-075d17f8ada47e990fe94606c3d21df409223465.tar.xz
zen-075d17f8ada47e990fe94606c3d21df409223465.zip
moved source directories into `/src` (#264)
* moved source directories into `/src` * updated bundle.lua for new `src` path * moved some docs, icon * removed old test trees
Diffstat (limited to 'src/zencore/memory.cpp')
-rw-r--r--src/zencore/memory.cpp211
1 files changed, 211 insertions, 0 deletions
diff --git a/src/zencore/memory.cpp b/src/zencore/memory.cpp
new file mode 100644
index 000000000..1f148cede
--- /dev/null
+++ b/src/zencore/memory.cpp
@@ -0,0 +1,211 @@
+// Copyright Epic Games, Inc. All Rights Reserved.
+
+#include <zencore/intmath.h>
+#include <zencore/memory.h>
+#include <zencore/testing.h>
+#include <zencore/zencore.h>
+
+#if ZEN_PLATFORM_WINDOWS
+# include <malloc.h>
+ZEN_THIRD_PARTY_INCLUDES_START
+# include <mimalloc.h>
+ZEN_THIRD_PARTY_INCLUDES_END
+#else
+# include <cstdlib>
+#endif
+
+namespace zen {
+
+//////////////////////////////////////////////////////////////////////////
+
+static void*
+AlignedAllocImpl(size_t Size, size_t Alignment)
+{
+#if ZEN_PLATFORM_WINDOWS
+# if ZEN_USE_MIMALLOC && 0 /* this path is not functional */
+ return mi_aligned_alloc(Alignment, Size);
+# else
+ return _aligned_malloc(Size, Alignment);
+# endif
+#else
+ // aligned_alloc() states that size must be a multiple of alignment. Some
+ // platforms return null if this requirement isn't met.
+ Size = (Size + Alignment - 1) & ~(Alignment - 1);
+ return std::aligned_alloc(Alignment, Size);
+#endif
+}
+
+void
+AlignedFreeImpl(void* ptr)
+{
+ if (ptr == nullptr)
+ return;
+
+#if ZEN_PLATFORM_WINDOWS
+# if ZEN_USE_MIMALLOC && 0 /* this path is not functional */
+ return mi_free(ptr);
+# else
+ _aligned_free(ptr);
+# endif
+#else
+ std::free(ptr);
+#endif
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+MemoryArena::MemoryArena()
+{
+}
+
+MemoryArena::~MemoryArena()
+{
+}
+
+void*
+MemoryArena::Alloc(size_t Size, size_t Alignment)
+{
+ return AlignedAllocImpl(Size, Alignment);
+}
+
+void
+MemoryArena::Free(void* ptr)
+{
+ AlignedFreeImpl(ptr);
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+void*
+Memory::Alloc(size_t Size, size_t Alignment)
+{
+ return AlignedAllocImpl(Size, Alignment);
+}
+
+void
+Memory::Free(void* ptr)
+{
+ AlignedFreeImpl(ptr);
+}
+
+//////////////////////////////////////////////////////////////////////////
+
+ChunkingLinearAllocator::ChunkingLinearAllocator(uint64_t ChunkSize, uint64_t ChunkAlignment)
+: m_ChunkSize(ChunkSize)
+, m_ChunkAlignment(ChunkAlignment)
+{
+}
+
+ChunkingLinearAllocator::~ChunkingLinearAllocator()
+{
+ Reset();
+}
+
+void
+ChunkingLinearAllocator::Reset()
+{
+ for (void* ChunkEntry : m_ChunkList)
+ {
+ Memory::Free(ChunkEntry);
+ }
+ m_ChunkList.clear();
+
+ m_ChunkCursor = nullptr;
+ m_ChunkBytesRemain = 0;
+}
+
+void*
+ChunkingLinearAllocator::Alloc(size_t Size, size_t Alignment)
+{
+ ZEN_ASSERT_SLOW(zen::IsPow2(Alignment));
+
+ // This could be improved in a bunch of ways
+ //
+ // * We pessimistically allocate memory even though there may be enough memory available for a single allocation due to the way we take
+ // alignment into account below
+ // * The block allocation size could be chosen to minimize slack for the case when multiple oversize allocations are made rather than
+ // minimizing the number of chunks
+ // * ...
+
+ const uint64_t AllocationSize = zen::RoundUp(Size, Alignment);
+
+ if (m_ChunkBytesRemain < (AllocationSize + Alignment - 1))
+ {
+ const uint64_t ChunkSize = zen::RoundUp(zen::Max(m_ChunkSize, Size), m_ChunkSize);
+ void* ChunkPtr = Memory::Alloc(ChunkSize, m_ChunkAlignment);
+ m_ChunkCursor = reinterpret_cast<uint8_t*>(ChunkPtr);
+ m_ChunkBytesRemain = ChunkSize;
+ m_ChunkList.push_back(ChunkPtr);
+ }
+
+ const uint64_t AlignFixup = (Alignment - reinterpret_cast<uintptr_t>(m_ChunkCursor)) & (Alignment - 1);
+ void* ReturnPtr = m_ChunkCursor + AlignFixup;
+ const uint64_t Delta = AlignFixup + AllocationSize;
+
+ ZEN_ASSERT_SLOW(m_ChunkBytesRemain >= Delta);
+
+ m_ChunkCursor += Delta;
+ m_ChunkBytesRemain -= Delta;
+
+ ZEN_ASSERT_SLOW(IsPointerAligned(ReturnPtr, Alignment));
+
+ return ReturnPtr;
+}
+
+//////////////////////////////////////////////////////////////////////////
+//
+// Unit tests
+//
+
+#if ZEN_WITH_TESTS
+
+TEST_CASE("ChunkingLinearAllocator")
+{
+ ChunkingLinearAllocator Allocator(4096);
+
+ void* p1 = Allocator.Alloc(1, 1);
+ void* p2 = Allocator.Alloc(1, 1);
+
+ CHECK(p1 != p2);
+
+ void* p3 = Allocator.Alloc(1, 4);
+ CHECK(IsPointerAligned(p3, 4));
+
+ void* p3_2 = Allocator.Alloc(1, 4);
+ CHECK(IsPointerAligned(p3_2, 4));
+
+ void* p4 = Allocator.Alloc(1, 8);
+ CHECK(IsPointerAligned(p4, 8));
+
+ for (int i = 0; i < 100; ++i)
+ {
+ void* p0 = Allocator.Alloc(64);
+ ZEN_UNUSED(p0);
+ }
+}
+
+TEST_CASE("MemoryView")
+{
+ {
+ uint8_t Array1[16] = {};
+ MemoryView View1 = MakeMemoryView(Array1);
+ CHECK(View1.GetSize() == 16);
+ }
+
+ {
+ uint32_t Array2[16] = {};
+ MemoryView View2 = MakeMemoryView(Array2);
+ CHECK(View2.GetSize() == 64);
+ }
+
+ CHECK(MakeMemoryView<float>({1.0f, 1.2f}).GetSize() == 8);
+}
+
+void
+memory_forcelink()
+{
+}
+
+#endif
+
+} // namespace zen