aboutsummaryrefslogtreecommitdiff
path: root/src/zenstore
diff options
context:
space:
mode:
authorDan Engelbrecht <[email protected]>2025-12-15 13:20:21 +0100
committerGitHub Enterprise <[email protected]>2025-12-15 13:20:21 +0100
commita715d3ab7701e6257730a73c62567052d21c9771 (patch)
tree1f6b1de9c7cf11ec1403187d77d74a3b1af52a39 /src/zenstore
parentshow download source data (#689) (diff)
downloadzen-a715d3ab7701e6257730a73c62567052d21c9771.tar.xz
zen-a715d3ab7701e6257730a73c62567052d21c9771.zip
oplog download size (#690)
- Bugfix: Upload of oplogs could reference multiple blocks for the same chunk causing redundant downloads of blocks - Improvement: Use the improved block reuse selection function from zen builds upload in zen oplog-export to reduce oplog download size
Diffstat (limited to 'src/zenstore')
-rw-r--r--src/zenstore/cas.cpp18
1 files changed, 9 insertions, 9 deletions
diff --git a/src/zenstore/cas.cpp b/src/zenstore/cas.cpp
index 49d24c21e..ed017988f 100644
--- a/src/zenstore/cas.cpp
+++ b/src/zenstore/cas.cpp
@@ -267,17 +267,17 @@ CasImpl::InsertChunk(IoBuffer Chunk, const IoHash& ChunkHash, InsertMode Mode)
}
static void
-GetCompactCasResults(CasContainerStrategy& Strategy,
- std::span<IoBuffer> Data,
- std::span<IoHash> ChunkHashes,
- std::span<size_t> Indexes,
- std::vector<CasStore::InsertResult> Results)
+GetCompactCasResults(CasContainerStrategy& Strategy,
+ std::span<IoBuffer> Data,
+ std::span<IoHash> ChunkHashes,
+ std::span<size_t> Indexes,
+ std::vector<CasStore::InsertResult>& OutResults)
{
const size_t Count = Indexes.size();
if (Count == 1)
{
const size_t Index = Indexes[0];
- Results[Index] = Strategy.InsertChunk(Data[Index], ChunkHashes[Index]);
+ OutResults[Index] = Strategy.InsertChunk(Data[Index], ChunkHashes[Index]);
return;
}
std::vector<IoBuffer> Chunks;
@@ -290,12 +290,12 @@ GetCompactCasResults(CasContainerStrategy& Strategy,
Hashes.push_back(ChunkHashes[Index]);
}
- Strategy.InsertChunks(Chunks, Hashes);
+ std::vector<CasStore::InsertResult> Results = Strategy.InsertChunks(Chunks, Hashes);
for (size_t Offset = 0; Offset < Count; Offset++)
{
- size_t Index = Indexes[Offset];
- Results[Index] = Results[Offset];
+ size_t Index = Indexes[Offset];
+ OutResults[Index] = Results[Offset];
}
};