diff options
| author | Dan Engelbrecht <[email protected]> | 2022-06-14 15:18:47 -0700 |
|---|---|---|
| committer | GitHub <[email protected]> | 2022-06-14 15:18:47 -0700 |
| commit | 3c6f831b8995b296e0179404e67e1d6e2187065a (patch) | |
| tree | 67297329596bb3fca10a879a0dddfac9c9d8a7af /zenstore/compactcas.cpp | |
| parent | added _WIN32_WINNT define to be consistent with xmake file (diff) | |
| parent | Make sure we don't try to create a ZipFS IoBuffer of zero size (diff) | |
| download | zen-3c6f831b8995b296e0179404e67e1d6e2187065a.tar.xz zen-3c6f831b8995b296e0179404e67e1d6e2187065a.zip | |
Merge pull request #129 from EpicGames/de/better-iterate-chunksv0.1.4-pre2
Improved BlockStore::IterateChunks
Diffstat (limited to 'zenstore/compactcas.cpp')
| -rw-r--r-- | zenstore/compactcas.cpp | 68 |
1 files changed, 38 insertions, 30 deletions
diff --git a/zenstore/compactcas.cpp b/zenstore/compactcas.cpp index 65f959a0e..5aed02e7f 100644 --- a/zenstore/compactcas.cpp +++ b/zenstore/compactcas.cpp @@ -315,11 +315,13 @@ CasContainerStrategy::Flush() void CasContainerStrategy::Scrub(ScrubContext& Ctx) { - RwLock::SharedLockScope _(m_LocationMapLock); - - uint64_t TotalChunkCount = m_LocationMap.size(); + std::vector<IoHash> BadKeys; std::vector<BlockStoreLocation> ChunkLocations; std::vector<IoHash> ChunkIndexToChunkHash; + + RwLock::SharedLockScope _(m_LocationMapLock); + + uint64_t TotalChunkCount = m_LocationMap.size(); ChunkLocations.reserve(TotalChunkCount); ChunkIndexToChunkHash.reserve(TotalChunkCount); { @@ -328,37 +330,45 @@ CasContainerStrategy::Scrub(ScrubContext& Ctx) const IoHash& ChunkHash = Entry.first; const BlockStoreDiskLocation& DiskLocation = Entry.second; BlockStoreLocation Location = DiskLocation.Get(m_PayloadAlignment); - size_t ChunkIndex = ChunkLocations.size(); ChunkLocations.push_back(Location); - ChunkIndexToChunkHash[ChunkIndex] = ChunkHash; + ChunkIndexToChunkHash.push_back(ChunkHash); } } - std::vector<IoHash> BadKeys; + const auto ValidateSmallChunk = [&](size_t ChunkIndex, const void* Data, uint64_t Size) { + const IoHash& Hash = ChunkIndexToChunkHash[ChunkIndex]; + if (!Data) + { + // ChunkLocation out of range of stored blocks + BadKeys.push_back(Hash); + return; + } + const IoHash ComputedHash = IoHash::HashBuffer(Data, Size); + if (ComputedHash != Hash) + { + // Hash mismatch + BadKeys.push_back(Hash); + return; + } + }; - m_BlockStore.IterateChunks( - ChunkLocations, - [&](size_t ChunkIndex, const void* Data, uint64_t Size) { - const IoHash ComputedHash = IoHash::HashBuffer(Data, Size); - const IoHash& ExpectedHash = ChunkIndexToChunkHash[ChunkIndex]; - if (ComputedHash != ExpectedHash) - { - // Hash mismatch - BadKeys.push_back(ExpectedHash); - } - }, - [&](size_t ChunkIndex, Ref<BlockStoreFile> BlockFile, uint64_t Offset, uint64_t Size) { - IoHashStream Hasher; - BlockFile->StreamByteRange(Offset, Size, [&](const void* Data, uint64_t Size) { Hasher.Append(Data, Size); }); - IoHash ComputedHash = Hasher.GetHash(); - const IoHash& ExpectedHash = ChunkIndexToChunkHash[ChunkIndex]; - if (ComputedHash != ExpectedHash) - { - // Hash mismatch - BadKeys.push_back(ExpectedHash); - } - }); + const auto ValidateLargeChunk = [&](size_t ChunkIndex, BlockStoreFile& File, uint64_t Offset, uint64_t Size) { + IoHashStream Hasher; + File.StreamByteRange(Offset, Size, [&](const void* Data, uint64_t Size) { Hasher.Append(Data, Size); }); + IoHash ComputedHash = Hasher.GetHash(); + const IoHash& Hash = ChunkIndexToChunkHash[ChunkIndex]; + if (ComputedHash != Hash) + { + // Hash mismatch + BadKeys.push_back(Hash); + return; + } + }; + + m_BlockStore.IterateChunks(ChunkLocations, ValidateSmallChunk, ValidateLargeChunk); + + _.ReleaseNow(); if (BadKeys.empty()) { @@ -367,8 +377,6 @@ CasContainerStrategy::Scrub(ScrubContext& Ctx) ZEN_ERROR("Scrubbing found #{} bad chunks in '{}'", BadKeys.size(), m_Config.RootDirectory / m_ContainerBaseName); - _.ReleaseNow(); - if (Ctx.RunRecovery()) { // Deal with bad chunks by removing them from our lookup map |