aboutsummaryrefslogtreecommitdiff
path: root/zencore/include
diff options
context:
space:
mode:
authorStefan Boberg <[email protected]>2021-05-24 13:26:27 +0200
committerStefan Boberg <[email protected]>2021-05-24 13:26:27 +0200
commit429b6eb2ffcbdd0b30d0058d01cc19570e3e6b4b (patch)
treeeeddf5905ca45802483b07717de83ab90ba07606 /zencore/include
parentAdded functionality to SharedBuffer/UniqueBuffer to support CompositeBuffer i... (diff)
downloadzen-429b6eb2ffcbdd0b30d0058d01cc19570e3e6b4b.tar.xz
zen-429b6eb2ffcbdd0b30d0058d01cc19570e3e6b4b.zip
Initial implementation of CompositeBuffer
A CompositeBuffer is a non-contiguous buffer composed of zero or more immutable shared buffers
Diffstat (limited to 'zencore/include')
-rw-r--r--zencore/include/zencore/compositebuffer.h127
1 files changed, 127 insertions, 0 deletions
diff --git a/zencore/include/zencore/compositebuffer.h b/zencore/include/zencore/compositebuffer.h
new file mode 100644
index 000000000..7b2bbf48f
--- /dev/null
+++ b/zencore/include/zencore/compositebuffer.h
@@ -0,0 +1,127 @@
+// Copyright Epic Games, Inc. All Rights Reserved.
+
+#pragma once
+
+#include <zencore/sharedbuffer.h>
+#include <zencore/zencore.h>
+
+#include <functional>
+#include <span>
+#include <vector>
+
+namespace zen {
+
+/**
+ * CompositeBuffer is a non-contiguous buffer composed of zero or more immutable shared buffers.
+ *
+ * A composite buffer is most efficient when its segments are consumed as they are, but it can be
+ * flattened into a contiguous buffer, when necessary, by calling Flatten(). Ownership of segment
+ * buffers is not changed on construction, but if ownership of segments is required then that can
+ * be guaranteed by calling MakeOwned().
+ */
+
+class CompositeBuffer
+{
+public:
+ /**
+ * Construct a composite buffer by concatenating the buffers. Does not enforce ownership.
+ *
+ * Buffer parameters may be SharedBuffer, CompositeBuffer, or std::vector<SharedBuffer>.
+ */
+ template<typename... BufferTypes>
+ inline explicit CompositeBuffer(BufferTypes&&... Buffers)
+ {
+ if constexpr (sizeof...(Buffers) > 0)
+ {
+ m_Segments.reserve((GetBufferCount(std::forward<BufferTypes>(Buffers)) + ...));
+ (AppendBuffers(std::forward<BufferTypes>(Buffers)), ...);
+ std::erase_if(m_Segments, [](const SharedBuffer& It) { return It.IsNull(); });
+ }
+ }
+
+ /** Reset this to null. */
+ ZENCORE_API void Reset();
+
+ /** Returns the total size of the composite buffer in bytes. */
+ [[nodiscard]] ZENCORE_API uint64_t GetSize() const;
+
+ /** Returns the segments that the buffer is composed from. */
+ [[nodiscard]] inline std::span<const SharedBuffer> GetSegments() const { return std::span<const SharedBuffer>{m_Segments}; }
+
+ /** Returns true if the composite buffer is not null. */
+ [[nodiscard]] inline explicit operator bool() const { return !IsNull(); }
+
+ /** Returns true if the composite buffer is null. */
+ [[nodiscard]] inline bool IsNull() const { return m_Segments.empty(); }
+
+ /** Returns true if every segment in the composite buffer is owned. */
+ [[nodiscard]] ZENCORE_API bool IsOwned() const;
+
+ /** Returns a copy of the buffer where every segment is owned. */
+ [[nodiscard]] ZENCORE_API CompositeBuffer MakeOwned() const&;
+ [[nodiscard]] ZENCORE_API CompositeBuffer MakeOwned() &&;
+
+ /** Returns the concatenation of the segments into a contiguous buffer. */
+ [[nodiscard]] ZENCORE_API SharedBuffer Flatten() const&;
+ [[nodiscard]] ZENCORE_API SharedBuffer Flatten() &&;
+
+ /** Returns the middle part of the buffer by taking the size starting at the offset. */
+ [[nodiscard]] ZENCORE_API CompositeBuffer Mid(uint64_t Offset, uint64_t Size = ~uint64_t(0)) const;
+
+ /**
+ * Returns a view of the range if contained by one segment, otherwise a view of a copy of the range.
+ *
+ * @note CopyBuffer is reused if large enough, and otherwise allocated when needed.
+ *
+ * @param Offset The byte offset in this buffer that the range starts at.
+ * @param Size The number of bytes in the range to view or copy.
+ * @param CopyBuffer The buffer to write the copy into if a copy is required.
+ */
+ [[nodiscard]] ZENCORE_API MemoryView ViewOrCopyRange(uint64_t Offset, uint64_t Size, UniqueBuffer& CopyBuffer) const;
+
+ /**
+ * Copies a range of the buffer to a contiguous region of memory.
+ *
+ * @param Target The view to copy to. Must be no larger than the data available at the offset.
+ * @param Offset The byte offset in this buffer to start copying from.
+ */
+ ZENCORE_API void CopyTo(MutableMemoryView Target, uint64_t Offset = 0) const;
+
+ /**
+ * Invokes a visitor with a view of each segment that intersects with a range.
+ *
+ * @param Offset The byte offset in this buffer to start visiting from.
+ * @param Size The number of bytes in the range to visit.
+ * @param Visitor The visitor to invoke from zero to GetSegments().Num() times.
+ */
+ ZENCORE_API void IterateRange(uint64_t Offset, uint64_t Size, std::function<void(MemoryView View)> Visitor) const;
+ ZENCORE_API void IterateRange(uint64_t Offset,
+ uint64_t Size,
+ std::function<void(MemoryView View, const SharedBuffer& ViewOuter)> Visitor) const;
+
+ /** A null composite buffer. */
+ static const CompositeBuffer Null;
+
+private:
+ static inline size_t GetBufferCount(const CompositeBuffer& Buffer) { return Buffer.m_Segments.size(); }
+ inline void AppendBuffers(const CompositeBuffer& Buffer)
+ {
+ m_Segments.insert(m_Segments.end(), begin(Buffer.m_Segments), end(Buffer.m_Segments));
+ }
+ inline void AppendBuffers(CompositeBuffer&& Buffer)
+ {
+ // TODO: this operates just like the by-reference version above
+ m_Segments.insert(m_Segments.end(), begin(Buffer.m_Segments), end(Buffer.m_Segments));
+ }
+
+ static inline size_t GetBufferCount(const SharedBuffer&) { return 1; }
+ inline void AppendBuffers(const SharedBuffer& Buffer) { m_Segments.push_back(Buffer); }
+ inline void AppendBuffers(SharedBuffer&& Buffer) { m_Segments.push_back(std::move(Buffer)); }
+
+private:
+ std::vector<SharedBuffer> m_Segments;
+};
+
+void compositebuffer_forcelink(); // internal
+
+} // namespace zen