diff options
| author | Dan Engelbrecht <[email protected]> | 2025-09-04 13:17:25 +0200 |
|---|---|---|
| committer | GitHub Enterprise <[email protected]> | 2025-09-04 13:17:25 +0200 |
| commit | 9f575bd416e1f7afbd11d4b221074f34bb89605c (patch) | |
| tree | 07c87ccdbc01cdaf13015f46dddfaa71fa791d5b /src/zenserver/projectstore/projectstore.cpp | |
| parent | oplog memory usage reduction (#482) (diff) | |
| download | zen-9f575bd416e1f7afbd11d4b221074f34bb89605c.tar.xz zen-9f575bd416e1f7afbd11d4b221074f34bb89605c.zip | |
add validation of compact binary payloads before reading them (#483)
* add validation of compact binary payloads before reading them
Diffstat (limited to 'src/zenserver/projectstore/projectstore.cpp')
| -rw-r--r-- | src/zenserver/projectstore/projectstore.cpp | 138 |
1 files changed, 73 insertions, 65 deletions
diff --git a/src/zenserver/projectstore/projectstore.cpp b/src/zenserver/projectstore/projectstore.cpp index 56b64cee4..5277d689f 100644 --- a/src/zenserver/projectstore/projectstore.cpp +++ b/src/zenserver/projectstore/projectstore.cpp @@ -5439,67 +5439,70 @@ ProjectStore::WriteOplog(const std::string_view ProjectId, const std::string_vie } Project->TouchOplog(OplogId); - CbObject ContainerObject = LoadCompactBinaryObject(Payload); - if (!ContainerObject) - { - return {HttpResponseCode::BadRequest, "Invalid payload format"}; - } - - CidStore& ChunkStore = m_CidStore; - RwLock AttachmentsLock; - tsl::robin_set<IoHash, IoHash::Hasher> Attachments; - - auto HasAttachment = [&ChunkStore](const IoHash& RawHash) { return ChunkStore.ContainsChunk(RawHash); }; - auto OnNeedBlock = [&AttachmentsLock, &Attachments](const IoHash& BlockHash, const std::vector<IoHash>&& ChunkHashes) { - RwLock::ExclusiveLockScope _(AttachmentsLock); - if (BlockHash != IoHash::Zero) - { - Attachments.insert(BlockHash); - } - else - { - Attachments.insert(ChunkHashes.begin(), ChunkHashes.end()); - } - }; - auto OnNeedAttachment = [&AttachmentsLock, &Attachments](const IoHash& RawHash) { - RwLock::ExclusiveLockScope _(AttachmentsLock); - Attachments.insert(RawHash); - }; + CbValidateError ValidateResult; + if (CbObject ContainerObject = ValidateAndReadCompactBinaryObject(std::move(Payload), ValidateResult); + ValidateResult == CbValidateError::None && ContainerObject) + { + CidStore& ChunkStore = m_CidStore; + RwLock AttachmentsLock; + tsl::robin_set<IoHash, IoHash::Hasher> Attachments; + + auto HasAttachment = [&ChunkStore](const IoHash& RawHash) { return ChunkStore.ContainsChunk(RawHash); }; + auto OnNeedBlock = [&AttachmentsLock, &Attachments](const IoHash& BlockHash, const std::vector<IoHash>&& ChunkHashes) { + RwLock::ExclusiveLockScope _(AttachmentsLock); + if (BlockHash != IoHash::Zero) + { + Attachments.insert(BlockHash); + } + else + { + Attachments.insert(ChunkHashes.begin(), ChunkHashes.end()); + } + }; + auto OnNeedAttachment = [&AttachmentsLock, &Attachments](const IoHash& RawHash) { + RwLock::ExclusiveLockScope _(AttachmentsLock); + Attachments.insert(RawHash); + }; - auto OnChunkedAttachment = [](const ChunkedInfo&) {}; + auto OnChunkedAttachment = [](const ChunkedInfo&) {}; - auto OnReferencedAttachments = [&Oplog](std::span<IoHash> RawHashes) { Oplog->CaptureAddedAttachments(RawHashes); }; - // Make sure we retain any attachments we download before writing the oplog - Oplog->EnableUpdateCapture(); - auto _ = MakeGuard([&Oplog]() { Oplog->DisableUpdateCapture(); }); + auto OnReferencedAttachments = [&Oplog](std::span<IoHash> RawHashes) { Oplog->CaptureAddedAttachments(RawHashes); }; + // Make sure we retain any attachments we download before writing the oplog + Oplog->EnableUpdateCapture(); + auto _ = MakeGuard([&Oplog]() { Oplog->DisableUpdateCapture(); }); - RemoteProjectStore::Result RemoteResult = SaveOplogContainer(*Oplog, - ContainerObject, - OnReferencedAttachments, - HasAttachment, - OnNeedBlock, - OnNeedAttachment, - OnChunkedAttachment, - nullptr); + RemoteProjectStore::Result RemoteResult = SaveOplogContainer(*Oplog, + ContainerObject, + OnReferencedAttachments, + HasAttachment, + OnNeedBlock, + OnNeedAttachment, + OnChunkedAttachment, + nullptr); - if (RemoteResult.ErrorCode) - { - return ConvertResult(RemoteResult); - } + if (RemoteResult.ErrorCode) + { + return ConvertResult(RemoteResult); + } - CbObjectWriter Cbo(1 + 1 + 5 + Attachments.size() * (1 + sizeof(IoHash::Hash)) + 1); - Cbo.BeginArray("need"); - { - for (const IoHash& Hash : Attachments) + CbObjectWriter Cbo(1 + 1 + 5 + Attachments.size() * (1 + sizeof(IoHash::Hash)) + 1); + Cbo.BeginArray("need"); { - ZEN_DEBUG("Need attachment {}", Hash); - Cbo << Hash; + for (const IoHash& Hash : Attachments) + { + ZEN_DEBUG("Need attachment {}", Hash); + Cbo << Hash; + } } - } - Cbo.EndArray(); // "need" + Cbo.EndArray(); // "need" - OutResponse = Cbo.Save(); - return {HttpResponseCode::OK, {}}; + OutResponse = Cbo.Save(); + return {HttpResponseCode::OK, {}}; + } + else + { + return {HttpResponseCode::BadRequest, fmt::format("Invalid payload format ('{}')", ToString(ValidateResult))}; + } } std::pair<HttpResponseCode, std::string> @@ -5604,15 +5607,19 @@ ProjectStore::Rpc(HttpServerRequest& HttpReq, } break; case HttpContentType::kCbObject: - Cb = LoadCompactBinaryObject(Payload); - if (!Cb) { - HttpReq.WriteResponse(HttpResponseCode::BadRequest, - HttpContentType::kText, - "Content format not supported, expected compact binary format"); - return false; + CbValidateError ValidateResult; + if (Cb = ValidateAndReadCompactBinaryObject(std::move(Payload), ValidateResult); + ValidateResult != CbValidateError::None || !Cb) + { + HttpReq.WriteResponse( + HttpResponseCode::BadRequest, + HttpContentType::kText, + fmt::format("Content format not supported, expected compact binary format ('{}')", ToString(ValidateResult))); + return false; + } + break; } - break; case HttpContentType::kCbPackage: try { @@ -5917,8 +5924,8 @@ ProjectStore::Rpc(HttpServerRequest& HttpReq, ResponseObj.EndArray(); } - // Ops that have moved chunks to a compressed buffer for storage in m_CidStore have been rewritten with references to the new - // chunk(s). Make sure we add the chunks to m_CidStore, and do it after we update the oplog so GC doesn't think we have + // Ops that have moved chunks to a compressed buffer for storage in m_CidStore have been rewritten with references to the + // new chunk(s). Make sure we add the chunks to m_CidStore, and do it after we update the oplog so GC doesn't think we have // unreferenced chunks. for (auto It : AddedChunks) { @@ -7768,7 +7775,8 @@ TEST_CASE("project.store.gc.prep") Project1->DeleteOplog("oplog1"sv); } - // Caution - putting breakpoints and stepping through this part of the test likely makes it fails due to expiry time of pending chunks + // Caution - putting breakpoints and stepping through this part of the test likely makes it fails due to expiry time of pending + // chunks { Ref<ProjectStore::Project> Project1 = ProjectStore.OpenProject("proj1"sv); Ref<ProjectStore::Oplog> Oplog = Project1->NewOplog("oplog1"sv, Project1OplogPath); @@ -7790,8 +7798,8 @@ TEST_CASE("project.store.gc.prep") } Sleep(200); - // This pass they should also be retained since our age retention has kept them alive and they will now be picked up and the retention - // cleared + // This pass they should also be retained since our age retention has kept them alive and they will now be picked up and the + // retention cleared { GcSettings Settings = {.CacheExpireTime = GcClock::Now(), .ProjectStoreExpireTime = GcClock::Now(), |