aboutsummaryrefslogtreecommitdiff
path: root/src/zenserver/projectstore/projectstore.cpp
diff options
context:
space:
mode:
authorDan Engelbrecht <[email protected]>2023-08-11 13:55:19 +0200
committerGitHub <[email protected]>2023-08-11 13:55:19 +0200
commit26717f50c658ea2cf745a4e0042735d3fa21f214 (patch)
tree26ea44cc893e6a4a2b041c8ca55fc7aae47d1076 /src/zenserver/projectstore/projectstore.cpp
parentAdd `response.text` to output in log when jupiter request fails (#354) (diff)
downloadzen-26717f50c658ea2cf745a4e0042735d3fa21f214.tar.xz
zen-26717f50c658ea2cf745a4e0042735d3fa21f214.zip
Make sure we always write "data" attachment hash for snapshotted oplog entries (#355)
* Make sure we always write "data" attachment hash for snapshotted oplog entries * Make sure to add chunk mappings for files moved to attatchment in snapshot operation * fix inverted timoute for expiration (we don't want time expiry in these cases) * increase timeout for jupiter oplog in project to 3 min * changelog
Diffstat (limited to 'src/zenserver/projectstore/projectstore.cpp')
-rw-r--r--src/zenserver/projectstore/projectstore.cpp32
1 files changed, 25 insertions, 7 deletions
diff --git a/src/zenserver/projectstore/projectstore.cpp b/src/zenserver/projectstore/projectstore.cpp
index 64f0244d5..4865f048b 100644
--- a/src/zenserver/projectstore/projectstore.cpp
+++ b/src/zenserver/projectstore/projectstore.cpp
@@ -769,6 +769,16 @@ ProjectStore::Oplog::GetOpByIndex(int Index)
}
void
+ProjectStore::Oplog::AddChunkMappings(const std::unordered_map<Oid, IoHash, Oid::Hasher>& ChunkMappings)
+{
+ RwLock::ExclusiveLockScope OplogLock(m_OplogLock);
+ for (const auto& It : ChunkMappings)
+ {
+ AddChunkMapping(OplogLock, It.first, It.second);
+ }
+}
+
+void
ProjectStore::Oplog::AddFileMapping(const RwLock::ExclusiveLockScope&,
Oid FileId,
IoHash Hash,
@@ -1337,7 +1347,7 @@ ProjectStore::Project::ScrubStorage(ScrubContext& Ctx)
OpenOplog(OpLogId);
}
IterateOplogs([&](const Oplog& Ops) {
- if (!IsExpired(GcClock::TimePoint::max(), Ops))
+ if (!IsExpired(GcClock::TimePoint::min(), Ops))
{
Ops.ScrubStorage(Ctx);
}
@@ -1559,7 +1569,7 @@ ProjectStore::ScrubStorage(ScrubContext& Ctx)
for (auto& Kv : m_Projects)
{
- if (Kv.second->IsExpired(GcClock::TimePoint::max()))
+ if (Kv.second->IsExpired(GcClock::TimePoint::min()))
{
continue;
}
@@ -2462,7 +2472,8 @@ ProjectStore::Rpc(HttpServerRequest& HttpReq,
uint64_t TotalBytes = 0;
uint64_t TotalFiles = 0;
- std::vector<CbObject> NewOps;
+ std::vector<CbObject> NewOps;
+ std::unordered_map<Oid, IoHash, Oid::Hasher> NewChunkMappings;
Oplog->IterateOplog([&](CbObject Op) {
bool OpRewritten = false;
@@ -2499,6 +2510,7 @@ ProjectStore::Rpc(HttpServerRequest& HttpReq,
{
// Read file contents into memory, compress and store in CidStore
+ Oid ChunkId = View["id"sv].AsObjectId();
IoBuffer FileIoBuffer = DataFile.ReadAll();
CompressedBuffer Compressed = CompressedBuffer::Compress(SharedBuffer(FileIoBuffer));
const IoHash RawHash = Compressed.DecodeRawHash();
@@ -2516,12 +2528,11 @@ ProjectStore::Rpc(HttpServerRequest& HttpReq,
}
// Rewrite file array entry with new data reference
-
- CbObject RewrittenOp = RewriteCbObject(View, [&](CbObjectWriter& Writer, CbFieldView Field) -> bool {
+ CbObjectWriter Writer;
+ RewriteCbObject(Writer, View, [&](CbObjectWriter&, CbFieldView Field) -> bool {
if (Field.GetName() == "data"sv)
{
- Writer.AddBinaryAttachment("data"sv, RawHash);
-
+ // omit this field as we will write it explicitly ourselves
return true;
}
else if (Field.GetName() == "serverpath"sv)
@@ -2532,9 +2543,13 @@ ProjectStore::Rpc(HttpServerRequest& HttpReq,
return false;
});
+ Writer.AddBinaryAttachment("data"sv, RawHash);
+ CbObject RewrittenOp = Writer.Save();
Cbo.AddObject(std::move(RewrittenOp));
CopyField = false;
+
+ NewChunkMappings.insert_or_assign(ChunkId, RawHash);
}
}
}
@@ -2571,6 +2586,9 @@ ProjectStore::Rpc(HttpServerRequest& HttpReq,
OpCount++;
});
+ // Make sure we have references to our attachments
+ Oplog->AddChunkMappings(NewChunkMappings);
+
CbObjectWriter ResponseObj;
// Persist rewritten oplog entries