diff options
| author | Dan Engelbrecht <[email protected]> | 2022-03-23 17:20:39 +0100 |
|---|---|---|
| committer | Dan Engelbrecht <[email protected]> | 2022-03-31 11:29:27 +0200 |
| commit | a24ebc13bd3321eaef5757f6b42fd7fa2ebdfcc0 (patch) | |
| tree | 5292d47e59ca5821c13328670fd0952082506b6c /zenstore/compactcas.cpp | |
| parent | typedef for LocationMap (diff) | |
| download | zen-a24ebc13bd3321eaef5757f6b42fd7fa2ebdfcc0.tar.xz zen-a24ebc13bd3321eaef5757f6b42fd7fa2ebdfcc0.zip | |
review feedback
Diffstat (limited to 'zenstore/compactcas.cpp')
| -rw-r--r-- | zenstore/compactcas.cpp | 90 |
1 files changed, 45 insertions, 45 deletions
diff --git a/zenstore/compactcas.cpp b/zenstore/compactcas.cpp index 4de5635a8..a2656fb0a 100644 --- a/zenstore/compactcas.cpp +++ b/zenstore/compactcas.cpp @@ -66,7 +66,7 @@ namespace { { result.push_back({.Key = MovedEntry.first, .Location = MovedEntry.second}); } - for (const auto& ChunkHash : DeletedChunks) + for (const IoHash& ChunkHash : DeletedChunks) { result.push_back({.Key = ChunkHash, .Flags = CasDiskIndexEntry::kTombstone}); } @@ -127,12 +127,12 @@ CasContainerStrategy::InsertChunk(const void* ChunkData, size_t ChunkSize, const // New entry WriteBlockIndex = m_WriteBlockIndex.load(std::memory_order_acquire); - WriteBlock = m_WriteBlock.lock(); - if (!WriteBlock || (m_CurrentInsertOffset + ChunkSize) > m_MaxBlockSize) + bool IsWriting = m_WriteBlock != nullptr; + if (!IsWriting || (m_CurrentInsertOffset + ChunkSize) > m_MaxBlockSize) { - if (WriteBlock) + if (m_WriteBlock) { - WriteBlock->Flush(); + m_WriteBlock.reset(); } { RwLock::ExclusiveLockScope __(m_LocationMapLock); @@ -140,26 +140,26 @@ CasContainerStrategy::InsertChunk(const void* ChunkData, size_t ChunkSize, const { throw std::runtime_error(fmt::format("unable to allocate a new block in {}", m_ContainerBaseName)); } - WriteBlockIndex += WriteBlock ? 1 : 0; + WriteBlockIndex += IsWriting ? 1 : 0; while (m_ChunkBlocks.contains(WriteBlockIndex)) { WriteBlockIndex = (WriteBlockIndex + 1) & BlockStoreDiskLocation::MaxBlockIndex; } auto BlockPath = BuildUcasPath(m_BlocksBasePath, WriteBlockIndex); - WriteBlock = std::make_shared<BlockStoreFile>(BlockPath); - m_ChunkBlocks[WriteBlockIndex] = WriteBlock; + m_WriteBlock = std::make_shared<BlockStoreFile>(BlockPath); + m_ChunkBlocks[WriteBlockIndex] = m_WriteBlock; m_WriteBlockIndex.store(WriteBlockIndex, std::memory_order_release); } - m_WriteBlock = WriteBlock; m_CurrentInsertOffset = 0; - WriteBlock->Create(m_MaxBlockSize); + m_WriteBlock->Create(m_MaxBlockSize); } else { - WriteBlock->Open(); + m_WriteBlock->Open(); } InsertOffset = m_CurrentInsertOffset; m_CurrentInsertOffset = RoundUp(InsertOffset + ChunkSize, m_PayloadAlignment); + WriteBlock = m_WriteBlock; } WriteBlock->Write(ChunkData, ChunkSize, InsertOffset); @@ -234,7 +234,7 @@ CasContainerStrategy::Flush() if (m_CurrentInsertOffset > 0) { uint32_t WriteBlockIndex = m_WriteBlockIndex.load(std::memory_order_acquire); - auto WriteBlock = m_WriteBlock.lock(); + auto WriteBlock = m_WriteBlock; WriteBlockIndex++; while (m_ChunkBlocks.contains(WriteBlockIndex)) { @@ -363,7 +363,7 @@ CasContainerStrategy::Scrub(ScrubContext& Ctx) void CasContainerStrategy::UpdateLocations(const std::span<CasDiskIndexEntry>& Entries) { - for (const auto& Entry : Entries) + for (const CasDiskIndexEntry& Entry : Entries) { if (Entry.Flags & CasDiskIndexEntry::kTombstone) { @@ -414,7 +414,7 @@ CasContainerStrategy::CollectGarbage(GcContext& GcCtx) Stopwatch TotalTimer; uint64_t WriteBlockTimeUs = 0; uint64_t ReadBlockTimeUs = 0; - uint64_t TotalChunkCount = {}; + uint64_t TotalChunkCount = 0; uint64_t DeletedSize = 0; uint64_t OldTotalSize = m_TotalSize.load(std::memory_order::relaxed); @@ -452,7 +452,7 @@ CasContainerStrategy::CollectGarbage(GcContext& GcCtx) RwLock::SharedLockScope _l(m_LocationMapLock); Stopwatch Timer; const auto TimerGuard = MakeGuard([&Timer, &WriteBlockTimeUs] { WriteBlockTimeUs += Timer.GetElapsedTimeUs(); }); - if (!m_WriteBlock.expired()) + if (m_WriteBlock) { ExcludeBlockIndex = m_WriteBlockIndex.load(std::memory_order_acquire); } @@ -500,7 +500,7 @@ CasContainerStrategy::CollectGarbage(GcContext& GcCtx) DeleteChunks.back().reserve(GuesstimateCountPerBlock); } - uint64_t DeleteCount = {}; + uint64_t DeleteCount = 0; uint64_t NewTotalSize = 0; GcCtx.FilterCas(TotalChunkHashes, [&](const IoHash& ChunkHash, bool Keep) { @@ -538,12 +538,12 @@ CasContainerStrategy::CollectGarbage(GcContext& GcCtx) // Move all chunks in blocks that have chunks removed to new blocks std::shared_ptr<BlockStoreFile> NewBlockFile; - uint64_t WriteOffset = {}; - uint32_t NewBlockIndex = {}; + uint64_t WriteOffset = 0; + uint32_t NewBlockIndex = 0; DeletedChunks.reserve(DeleteCount); std::unordered_map<IoHash, BlockStoreDiskLocation> MovedBlockChunks; - for (auto BlockIndex : BlocksToReWrite) + for (uint32_t BlockIndex : BlocksToReWrite) { const size_t ChunkMapIndex = BlockIndexToChunkMapIndex[BlockIndex]; const auto& KeepMap = KeepChunks[ChunkMapIndex]; @@ -580,7 +580,7 @@ CasContainerStrategy::CollectGarbage(GcContext& GcCtx) MovedChunks.reserve(MovedChunks.size() + KeepMap.size()); std::vector<uint8_t> Chunk; - for (const auto& ChunkHash : KeepMap) + for (const IoHash& ChunkHash : KeepMap) { auto KeyIt = LocationMap.find(ChunkHash); const BlockStoreLocation ChunkLocation = KeyIt->second.Get(m_PayloadAlignment); @@ -692,7 +692,7 @@ CasContainerStrategy::CollectGarbage(GcContext& GcCtx) OldBlockFile.reset(); } - for (const auto& ChunkHash : DeletedChunks) + for (const IoHash& ChunkHash : DeletedChunks) { DeletedSize += LocationMap[ChunkHash].GetSize(); } @@ -921,7 +921,7 @@ CasContainerStrategy::OpenContainer(bool IsNewStore) { if (std::filesystem::is_regular_file(LegacySobsPath)) { - uint32_t NewBlockIndex = {}; + uint32_t NewBlockIndex = 0; Stopwatch MigrationTimer; uint64_t TotalSize = 0; const auto Guard = MakeGuard([this, &MigrationTimer, &NewBlockIndex, &TotalSize] { @@ -1009,10 +1009,10 @@ CasContainerStrategy::OpenContainer(bool IsNewStore) m_CasLog.Open(SlogPath, true); std::unique_ptr<BlockStoreFile> NewBlockFile; - uint64_t WriteOffset = {}; + uint64_t WriteOffset = 0; std::vector<uint8_t> Chunk; - for (const auto& ChunkHash : ChunkHashes) + for (const IoHash& ChunkHash : ChunkHashes) { const auto& Entry = LegacyDiskIndex[ChunkHash]; const LegacyCasDiskLocation& ChunkLocation = Entry.Location; @@ -1088,7 +1088,7 @@ CasContainerStrategy::OpenContainer(bool IsNewStore) std::unordered_set<uint32_t> BlockUsage; for (const auto& Entry : m_LocationMap) { - const auto& Location = Entry.second; + const BlockStoreDiskLocation& Location = Entry.second; m_TotalSize.fetch_add(Location.GetSize()); BlockUsage.insert(Location.GetBlockIndex()); } @@ -1436,14 +1436,14 @@ TEST_CASE("compactcas.gc.compact") uint64_t ChunkSizes[9] = {128, 541, 1023, 781, 218, 37, 4, 997, 5}; std::vector<IoBuffer> Chunks; Chunks.reserve(9); - for (const auto& Size : ChunkSizes) + for (uint64_t Size : ChunkSizes) { Chunks.push_back(CreateChunk(Size)); } std::vector<IoHash> ChunkHashes; ChunkHashes.reserve(9); - for (const auto& Chunk : Chunks) + for (const IoBuffer& Chunk : Chunks) { ChunkHashes.push_back(IoHash::HashBuffer(Chunk.Data(), Chunk.Size())); } @@ -1468,7 +1468,7 @@ TEST_CASE("compactcas.gc.compact") CHECK(Cas.HaveChunk(ChunkHashes[7])); CHECK(Cas.HaveChunk(ChunkHashes[8])); - auto InitialSize = Cas.StorageSize().DiskSize; + uint64_t InitialSize = Cas.StorageSize().DiskSize; // Keep first and last { @@ -1655,7 +1655,7 @@ TEST_CASE("compactcas.gc.compact") CHECK(ChunkHashes[7] == IoHash::HashBuffer(Cas.FindChunk(ChunkHashes[7]))); CHECK(ChunkHashes[8] == IoHash::HashBuffer(Cas.FindChunk(ChunkHashes[8]))); - auto FinalSize = Cas.StorageSize().DiskSize; + uint64_t FinalSize = Cas.StorageSize().DiskSize; CHECK(InitialSize == FinalSize); } @@ -1666,14 +1666,14 @@ TEST_CASE("compactcas.gc.deleteblockonopen") uint64_t ChunkSizes[20] = {128, 541, 311, 181, 218, 37, 4, 397, 5, 92, 551, 721, 31, 92, 16, 99, 131, 41, 541, 84}; std::vector<IoBuffer> Chunks; Chunks.reserve(20); - for (const auto& Size : ChunkSizes) + for (uint64_t Size : ChunkSizes) { Chunks.push_back(CreateChunk(Size)); } std::vector<IoHash> ChunkHashes; ChunkHashes.reserve(20); - for (const auto& Chunk : Chunks) + for (const IoBuffer& Chunk : Chunks) { ChunkHashes.push_back(IoHash::HashBuffer(Chunk.Data(), Chunk.Size())); } @@ -1735,14 +1735,14 @@ TEST_CASE("compactcas.gc.handleopeniobuffer") uint64_t ChunkSizes[20] = {128, 541, 311, 181, 218, 37, 4, 397, 5, 92, 551, 721, 31, 92, 16, 99, 131, 41, 541, 84}; std::vector<IoBuffer> Chunks; Chunks.reserve(20); - for (const auto& Size : ChunkSizes) + for (const uint64_t& Size : ChunkSizes) { Chunks.push_back(CreateChunk(Size)); } std::vector<IoHash> ChunkHashes; ChunkHashes.reserve(20); - for (const auto& Chunk : Chunks) + for (const IoBuffer& Chunk : Chunks) { ChunkHashes.push_back(IoHash::HashBuffer(Chunk.Data(), Chunk.Size())); } @@ -1760,7 +1760,7 @@ TEST_CASE("compactcas.gc.handleopeniobuffer") CHECK(Cas.InsertChunk(Chunks[i], ChunkHashes[i]).New); } - auto RetainChunk = Cas.FindChunk(ChunkHashes[5]); + IoBuffer RetainChunk = Cas.FindChunk(ChunkHashes[5]); Cas.Flush(); // GC everything @@ -1785,7 +1785,7 @@ TEST_CASE("compactcas.legacyconversion") size_t SingleBlockSize = 0; std::vector<IoBuffer> Chunks; Chunks.reserve(ChunkCount); - for (const auto& Size : ChunkSizes) + for (uint64_t Size : ChunkSizes) { Chunks.push_back(CreateChunk(Size)); SingleBlockSize += Size; @@ -1793,7 +1793,7 @@ TEST_CASE("compactcas.legacyconversion") std::vector<IoHash> ChunkHashes; ChunkHashes.reserve(ChunkCount); - for (const auto& Chunk : Chunks) + for (const IoBuffer& Chunk : Chunks) { ChunkHashes.push_back(IoHash::HashBuffer(Chunk.Data(), Chunk.Size())); } @@ -1859,7 +1859,7 @@ TEST_CASE("compactcas.legacyconversion") } TCasLogFile<LegacyCasDiskIndexEntry> LegacyCasLog; LegacyCasLog.Open(SlogPath, true); - for (const auto& Entry : LogEntries) + for (const CasDiskIndexEntry& Entry : LogEntries) { BlockStoreLocation Location = Entry.Location.Get(16); LegacyCasDiskLocation LegacyLocation(Location.Offset, Location.Size); @@ -1937,9 +1937,9 @@ TEST_CASE("compactcas.threadedinsert") // * doctest::skip(true)) for (int32_t Idx = 0; Idx < kChunkCount; ++Idx) { ThreadPool.ScheduleWork([&Cas, &OldChunkHashes, Idx]() { - auto ChunkHash = OldChunkHashes[Idx]; - auto Chunk = Cas.FindChunk(ChunkHash); - auto Hash = IoHash::HashBuffer(Chunk); + IoHash ChunkHash = OldChunkHashes[Idx]; + IoBuffer Chunk = Cas.FindChunk(ChunkHash); + IoHash Hash = IoHash::HashBuffer(Chunk); CHECK(ChunkHash == Hash); }); } @@ -1975,8 +1975,8 @@ TEST_CASE("compactcas.threadedinsert") // * doctest::skip(true)) AddedChunkCount.fetch_add(1); }); ThreadPool.ScheduleWork([&Cas, &ChunkHashesLock, &OldChunkHashes, Idx]() { - IoHash ChunkHash = OldChunkHashes[Idx]; - auto Chunk = Cas.FindChunk(OldChunkHashes[Idx]); + IoHash ChunkHash = OldChunkHashes[Idx]; + IoBuffer Chunk = Cas.FindChunk(OldChunkHashes[Idx]); if (Chunk) { CHECK(ChunkHash == IoHash::HashBuffer(Chunk)); @@ -1992,7 +1992,7 @@ TEST_CASE("compactcas.threadedinsert") // * doctest::skip(true)) AddedHashes.swap(NewChunkHashes); } // Need to be careful since we might GC blocks we don't know outside of RwLock::ExclusiveLockScope - for (auto ChunkHash : AddedHashes) + for (const IoHash& ChunkHash : AddedHashes) { if (Cas.HaveChunk(ChunkHash)) { @@ -2036,7 +2036,7 @@ TEST_CASE("compactcas.threadedinsert") // * doctest::skip(true)) AddedHashes.swap(NewChunkHashes); } // Need to be careful since we might GC blocks we don't know outside of RwLock::ExclusiveLockScope - for (auto ChunkHash : AddedHashes) + for (const IoHash& ChunkHash : AddedHashes) { if (Cas.HaveChunk(ChunkHash)) { @@ -2064,7 +2064,7 @@ TEST_CASE("compactcas.threadedinsert") // * doctest::skip(true)) } } { - for (const auto& ChunkHash : GcChunkHashes) + for (const IoHash& ChunkHash : GcChunkHashes) { ThreadPool.ScheduleWork([&Cas, ChunkHash]() { CHECK(Cas.HaveChunk(ChunkHash)); |