aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan Engelbrecht <[email protected]>2022-09-30 15:13:45 +0200
committerGitHub <[email protected]>2022-09-30 06:13:45 -0700
commitbcc85c7e5cbcf0737f5180e3d601e63b480e8949 (patch)
tree134d702e98ec63ff06e6d08345a4d021812d33b3
parentUse bucket/key to get inline value in upstream for chunks without a chunkid (... (diff)
downloadzen-bcc85c7e5cbcf0737f5180e3d601e63b480e8949.tar.xz
zen-bcc85c7e5cbcf0737f5180e3d601e63b480e8949.zip
De/reduce buffer creation in parsepackedmessage (#175)
* Don't create call CreateBuffer for attachement data that we only read and not keep * changelog * don't read oplog attachments into memory just to do a redundant store of them
-rw-r--r--CHANGELOG.md1
-rw-r--r--zencore/include/zencore/stream.h1
-rw-r--r--zenhttp/httpshared.cpp49
-rw-r--r--zenserver/projectstore.cpp14
4 files changed, 46 insertions, 19 deletions
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 79d125d10..33dd37e08 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,7 @@
- Bugfix: CompactBinary: Fixed LoadCompactBinary to gracefully handle read failures and sizes larger than the archive. From https://p4-swarm.epicgames.net/changes/21983905
- Improvement: Logging: don't do formatting of messages the will not be logged
- Improvement: Logging: Timing and upstream source information in upstream logging when debug level logging is enabled
+- Improvement: Reduce buffer creation and copying in ParsePackageMessage
## v0.1.5
- Bugfix: Don't fail entire request if GetCacheValue from Horde fails for a single value
diff --git a/zencore/include/zencore/stream.h b/zencore/include/zencore/stream.h
index efff2c541..ec303e1f8 100644
--- a/zencore/include/zencore/stream.h
+++ b/zencore/include/zencore/stream.h
@@ -77,6 +77,7 @@ public:
inline uint64_t Size() const { return m_BufferSize; }
inline uint64_t GetSize() const { return Size(); }
inline uint64_t CurrentOffset() const { return m_Offset; }
+ inline void Skip(size_t ByteCount) { m_Offset += ByteCount; };
private:
const uint8_t* m_BufferBase;
diff --git a/zenhttp/httpshared.cpp b/zenhttp/httpshared.cpp
index 2b96f32fc..f9aa3af82 100644
--- a/zenhttp/httpshared.cpp
+++ b/zenhttp/httpshared.cpp
@@ -253,14 +253,11 @@ ParsePackageMessage(IoBuffer Payload, std::function<IoBuffer(const IoHash&, uint
for (uint32_t i = 0; i < ChunkCount; ++i)
{
- const CbAttachmentEntry& Entry = AttachmentEntries[i];
- const uint64_t AttachmentSize = Entry.PayloadSize;
- IoBuffer AttachmentBuffer = CreateBuffer(Entry.AttachmentHash, AttachmentSize);
+ const CbAttachmentEntry& Entry = AttachmentEntries[i];
+ const uint64_t AttachmentSize = Entry.PayloadSize;
- ZEN_ASSERT(AttachmentBuffer);
- ZEN_ASSERT(AttachmentBuffer.Size() == AttachmentSize);
-
- Reader.Read(AttachmentBuffer.MutableData(), AttachmentSize);
+ const IoBuffer AttachmentBuffer(Payload, Reader.CurrentOffset(), AttachmentSize);
+ Reader.Skip(AttachmentSize);
if (Entry.Flags & CbAttachmentEntry::kIsLocalRef)
{
@@ -279,13 +276,20 @@ ParsePackageMessage(IoBuffer Payload, std::function<IoBuffer(const IoHash&, uint
IoBufferBuilder::MakeFromFile(Path, AttachRefHdr->PayloadByteOffset, AttachRefHdr->PayloadByteSize))
{
CompressedBuffer CompBuf(CompressedBuffer::FromCompressed(SharedBuffer(ChunkReference)));
- CbAttachment Attachment(std::move(CompBuf));
+ if (!CompBuf)
+ {
+ throw std::runtime_error(fmt::format("invalid format for chunk #{} at '{}' (offset {}, size {})",
+ i,
+ PathToUtf8(Path),
+ AttachRefHdr->PayloadByteOffset,
+ AttachRefHdr->PayloadByteSize));
+ }
+ CbAttachment Attachment(std::move(CompBuf));
Package.AddAttachment(Attachment);
}
else
{
// Unable to open chunk reference
-
throw std::runtime_error(fmt::format("unable to resolve chunk #{} at '{}' (offset {}, size {})",
i,
PathToUtf8(Path),
@@ -295,12 +299,15 @@ ParsePackageMessage(IoBuffer Payload, std::function<IoBuffer(const IoHash&, uint
}
else if (Entry.Flags & CbAttachmentEntry::kIsCompressed)
{
- CompressedBuffer CompBuf(CompressedBuffer::FromCompressed(SharedBuffer(AttachmentBuffer)));
-
if (Entry.Flags & CbAttachmentEntry::kIsObject)
{
if (i == 0)
{
+ CompressedBuffer CompBuf(CompressedBuffer::FromCompressed(SharedBuffer(AttachmentBuffer)));
+ if (!CompBuf)
+ {
+ throw std::runtime_error(fmt::format("invalid format for chunk #{} expected compressed buffer for CbObject", i));
+ }
// First payload is always a compact binary object
Package.SetObject(LoadCompactBinaryObject(std::move(CompBuf)));
}
@@ -311,6 +318,18 @@ ParsePackageMessage(IoBuffer Payload, std::function<IoBuffer(const IoHash&, uint
}
else
{
+ // Make a copy of the buffer so we attachements don't reference the entire payload
+ IoBuffer AttachmentBufferCopy = CreateBuffer(Entry.AttachmentHash, AttachmentSize);
+ ZEN_ASSERT(AttachmentBufferCopy);
+ ZEN_ASSERT(AttachmentBufferCopy.Size() == AttachmentSize);
+ AttachmentBufferCopy.GetMutableView().CopyFrom(AttachmentBuffer.GetView());
+
+ CompressedBuffer CompBuf(CompressedBuffer::FromCompressed(SharedBuffer(AttachmentBufferCopy)));
+ if (!CompBuf)
+ {
+ throw std::runtime_error(fmt::format("invalid format for chunk #{} expected compressed buffer for attachment", i));
+ }
+
CbAttachment Attachment(std::move(CompBuf));
Package.AddAttachment(Attachment);
}
@@ -330,7 +349,13 @@ ParsePackageMessage(IoBuffer Payload, std::function<IoBuffer(const IoHash&, uint
}
else
{
- CbAttachment Attachment(SharedBuffer{AttachmentBuffer});
+ // Make a copy of the buffer so we attachements don't reference the entire payload
+ IoBuffer AttachmentBufferCopy = CreateBuffer(Entry.AttachmentHash, AttachmentSize);
+ ZEN_ASSERT(AttachmentBufferCopy);
+ ZEN_ASSERT(AttachmentBufferCopy.Size() == AttachmentSize);
+ AttachmentBufferCopy.GetMutableView().CopyFrom(AttachmentBuffer.GetView());
+
+ CbAttachment Attachment(SharedBuffer{AttachmentBufferCopy});
Package.AddAttachment(Attachment);
}
}
diff --git a/zenserver/projectstore.cpp b/zenserver/projectstore.cpp
index 93276f029..19978c3e9 100644
--- a/zenserver/projectstore.cpp
+++ b/zenserver/projectstore.cpp
@@ -1544,8 +1544,13 @@ HttpProjectService::HttpProjectService(CidStore& Store, ProjectStore* Projects)
std::vector<IoHash> MissingChunks;
CbPackage::AttachmentResolver Resolver = [&](const IoHash& Hash) -> SharedBuffer {
- IoHash AttachmentId;
+ if (m_CidStore.ContainsChunk(Hash))
+ {
+ // Return null attachment as we already have it, no point in reading it and storing it again
+ return {};
+ }
+ IoHash AttachmentId;
if (IsUsingSalt)
{
IoHash AttachmentSpec[]{SaltHash, Hash};
@@ -1557,12 +1562,7 @@ HttpProjectService::HttpProjectService(CidStore& Store, ProjectStore* Projects)
}
std::filesystem::path AttachmentPath = Oplog.TempPath() / AttachmentId.ToHexString();
-
- if (IoBuffer CompressedData = m_CidStore.FindChunkByCid(Hash))
- {
- return SharedBuffer(std::move(CompressedData));
- }
- else if (IoBuffer Data = IoBufferBuilder::MakeFromTemporaryFile(AttachmentPath))
+ if (IoBuffer Data = IoBufferBuilder::MakeFromTemporaryFile(AttachmentPath))
{
return SharedBuffer(std::move(Data));
}