aboutsummaryrefslogtreecommitdiff
path: root/zenstore/compactcas.cpp
diff options
context:
space:
mode:
authorDan Engelbrecht <[email protected]>2022-05-01 23:34:20 +0200
committerDan Engelbrecht <[email protected]>2022-05-01 23:34:57 +0200
commit75b1dd112aead7c5246fa84928b9cd96dde49cbc (patch)
tree98e74af2d1563e88791b149cb5d0d896206705cb /zenstore/compactcas.cpp
parentreimplement CasContainerStrategy::Scrub (diff)
downloadzen-75b1dd112aead7c5246fa84928b9cd96dde49cbc.tar.xz
zen-75b1dd112aead7c5246fa84928b9cd96dde49cbc.zip
respect Ctx.RunRecovery()
Diffstat (limited to 'zenstore/compactcas.cpp')
-rw-r--r--zenstore/compactcas.cpp44
1 files changed, 24 insertions, 20 deletions
diff --git a/zenstore/compactcas.cpp b/zenstore/compactcas.cpp
index a6e617474..a79928fba 100644
--- a/zenstore/compactcas.cpp
+++ b/zenstore/compactcas.cpp
@@ -359,7 +359,7 @@ CasContainerStrategy::Scrub(ScrubContext& Ctx)
}
}
- std::vector<IoHash> BadChunks;
+ std::vector<IoHash> BadKeys;
// We do a read sweep through the payloads file and validate
// any entries that are contained within each segment, with
@@ -368,16 +368,16 @@ CasContainerStrategy::Scrub(ScrubContext& Ctx)
m_BlockStore.IterateChunks(
ChunkLocations,
- [&ChunkIndexToChunkHash, &BadChunks](size_t ChunkIndex, const void* Data, uint64_t Size) {
+ [&ChunkIndexToChunkHash, &BadKeys](size_t ChunkIndex, const void* Data, uint64_t Size) {
const IoHash ComputedHash = IoHash::HashBuffer(Data, Size);
const IoHash& ExpectedHash = ChunkIndexToChunkHash[ChunkIndex];
if (ComputedHash != ExpectedHash)
{
// Hash mismatch
- BadChunks.push_back(ExpectedHash);
+ BadKeys.push_back(ExpectedHash);
}
},
- [&ChunkIndexToChunkHash, &BadChunks](size_t ChunkIndex, BasicFile& BlockFile, uint64_t Offset, uint64_t Size) {
+ [&ChunkIndexToChunkHash, &BadKeys](size_t ChunkIndex, BasicFile& BlockFile, uint64_t Offset, uint64_t Size) {
IoHashStream Hasher;
BlockFile.StreamByteRange(Offset, Size, [&](const void* Data, uint64_t Size) { Hasher.Append(Data, Size); });
IoHash ComputedHash = Hasher.GetHash();
@@ -385,42 +385,46 @@ CasContainerStrategy::Scrub(ScrubContext& Ctx)
if (ComputedHash != ExpectedHash)
{
// Hash mismatch
- BadChunks.push_back(ExpectedHash);
+ BadKeys.push_back(ExpectedHash);
}
});
- if (BadChunks.empty())
+ if (BadKeys.empty())
{
return;
}
- ZEN_ERROR("Scrubbing found {} bad chunks in '{}'", BadChunks.size(), m_Config.RootDirectory / m_ContainerBaseName);
+ ZEN_ERROR("Scrubbing found #{} bad chunks in '{}'", BadKeys.size(), m_Config.RootDirectory / m_ContainerBaseName);
_.ReleaseNow();
- // Deal with bad chunks by removing them from our lookup map
- std::vector<CasDiskIndexEntry> LogEntries;
- LogEntries.reserve(BadChunks.size());
+ if (Ctx.RunRecovery())
{
- RwLock::ExclusiveLockScope __(m_LocationMapLock);
- for (const IoHash& ChunkHash : BadChunks)
+ // Deal with bad chunks by removing them from our lookup map
+
+ std::vector<CasDiskIndexEntry> LogEntries;
+ LogEntries.reserve(BadKeys.size());
{
- const auto KeyIt = m_LocationMap.find(ChunkHash);
- if (KeyIt == m_LocationMap.end())
+ RwLock::ExclusiveLockScope __(m_LocationMapLock);
+ for (const IoHash& ChunkHash : BadKeys)
{
- // Might have been GC'd
- continue;
+ const auto KeyIt = m_LocationMap.find(ChunkHash);
+ if (KeyIt == m_LocationMap.end())
+ {
+ // Might have been GC'd
+ continue;
+ }
+ LogEntries.push_back({.Key = KeyIt->first, .Location = KeyIt->second, .Flags = CasDiskIndexEntry::kTombstone});
+ m_LocationMap.erase(KeyIt);
}
- LogEntries.push_back({.Key = KeyIt->first, .Location = KeyIt->second, .Flags = CasDiskIndexEntry::kTombstone});
- m_LocationMap.erase(KeyIt);
}
+ m_CasLog.Append(LogEntries);
}
- m_CasLog.Append(LogEntries);
// Let whomever it concerns know about the bad chunks. This could
// be used to invalidate higher level data structures more efficiently
// than a full validation pass might be able to do
- Ctx.ReportBadCasChunks(BadChunks);
+ Ctx.ReportBadCasChunks(BadKeys);
}
void