aboutsummaryrefslogtreecommitdiff
path: root/zenstore/compactcas.cpp
diff options
context:
space:
mode:
authorStefan Boberg <[email protected]>2022-06-16 15:42:17 +0200
committerStefan Boberg <[email protected]>2022-06-16 15:42:17 +0200
commitb8797a647406d31ebfd137a9ae07819ccf332a10 (patch)
treeb57dcb1443c817577e1c9f8e10a35837e1d85389 /zenstore/compactcas.cpp
parentasio: added some context to error reporting (diff)
downloadzen-b8797a647406d31ebfd137a9ae07819ccf332a10.tar.xz
zen-b8797a647406d31ebfd137a9ae07819ccf332a10.zip
merged from main
Diffstat (limited to 'zenstore/compactcas.cpp')
-rw-r--r--zenstore/compactcas.cpp68
1 files changed, 38 insertions, 30 deletions
diff --git a/zenstore/compactcas.cpp b/zenstore/compactcas.cpp
index 65f959a0e..5aed02e7f 100644
--- a/zenstore/compactcas.cpp
+++ b/zenstore/compactcas.cpp
@@ -315,11 +315,13 @@ CasContainerStrategy::Flush()
void
CasContainerStrategy::Scrub(ScrubContext& Ctx)
{
- RwLock::SharedLockScope _(m_LocationMapLock);
-
- uint64_t TotalChunkCount = m_LocationMap.size();
+ std::vector<IoHash> BadKeys;
std::vector<BlockStoreLocation> ChunkLocations;
std::vector<IoHash> ChunkIndexToChunkHash;
+
+ RwLock::SharedLockScope _(m_LocationMapLock);
+
+ uint64_t TotalChunkCount = m_LocationMap.size();
ChunkLocations.reserve(TotalChunkCount);
ChunkIndexToChunkHash.reserve(TotalChunkCount);
{
@@ -328,37 +330,45 @@ CasContainerStrategy::Scrub(ScrubContext& Ctx)
const IoHash& ChunkHash = Entry.first;
const BlockStoreDiskLocation& DiskLocation = Entry.second;
BlockStoreLocation Location = DiskLocation.Get(m_PayloadAlignment);
- size_t ChunkIndex = ChunkLocations.size();
ChunkLocations.push_back(Location);
- ChunkIndexToChunkHash[ChunkIndex] = ChunkHash;
+ ChunkIndexToChunkHash.push_back(ChunkHash);
}
}
- std::vector<IoHash> BadKeys;
+ const auto ValidateSmallChunk = [&](size_t ChunkIndex, const void* Data, uint64_t Size) {
+ const IoHash& Hash = ChunkIndexToChunkHash[ChunkIndex];
+ if (!Data)
+ {
+ // ChunkLocation out of range of stored blocks
+ BadKeys.push_back(Hash);
+ return;
+ }
+ const IoHash ComputedHash = IoHash::HashBuffer(Data, Size);
+ if (ComputedHash != Hash)
+ {
+ // Hash mismatch
+ BadKeys.push_back(Hash);
+ return;
+ }
+ };
- m_BlockStore.IterateChunks(
- ChunkLocations,
- [&](size_t ChunkIndex, const void* Data, uint64_t Size) {
- const IoHash ComputedHash = IoHash::HashBuffer(Data, Size);
- const IoHash& ExpectedHash = ChunkIndexToChunkHash[ChunkIndex];
- if (ComputedHash != ExpectedHash)
- {
- // Hash mismatch
- BadKeys.push_back(ExpectedHash);
- }
- },
- [&](size_t ChunkIndex, Ref<BlockStoreFile> BlockFile, uint64_t Offset, uint64_t Size) {
- IoHashStream Hasher;
- BlockFile->StreamByteRange(Offset, Size, [&](const void* Data, uint64_t Size) { Hasher.Append(Data, Size); });
- IoHash ComputedHash = Hasher.GetHash();
- const IoHash& ExpectedHash = ChunkIndexToChunkHash[ChunkIndex];
- if (ComputedHash != ExpectedHash)
- {
- // Hash mismatch
- BadKeys.push_back(ExpectedHash);
- }
- });
+ const auto ValidateLargeChunk = [&](size_t ChunkIndex, BlockStoreFile& File, uint64_t Offset, uint64_t Size) {
+ IoHashStream Hasher;
+ File.StreamByteRange(Offset, Size, [&](const void* Data, uint64_t Size) { Hasher.Append(Data, Size); });
+ IoHash ComputedHash = Hasher.GetHash();
+ const IoHash& Hash = ChunkIndexToChunkHash[ChunkIndex];
+ if (ComputedHash != Hash)
+ {
+ // Hash mismatch
+ BadKeys.push_back(Hash);
+ return;
+ }
+ };
+
+ m_BlockStore.IterateChunks(ChunkLocations, ValidateSmallChunk, ValidateLargeChunk);
+
+ _.ReleaseNow();
if (BadKeys.empty())
{
@@ -367,8 +377,6 @@ CasContainerStrategy::Scrub(ScrubContext& Ctx)
ZEN_ERROR("Scrubbing found #{} bad chunks in '{}'", BadKeys.size(), m_Config.RootDirectory / m_ContainerBaseName);
- _.ReleaseNow();
-
if (Ctx.RunRecovery())
{
// Deal with bad chunks by removing them from our lookup map