diff options
| author | Dan Engelbrecht <[email protected]> | 2023-11-06 15:55:39 +0100 |
|---|---|---|
| committer | GitHub <[email protected]> | 2023-11-06 15:55:39 +0100 |
| commit | 5295c9618cbae2bb937e188c072f66a77d793eb5 (patch) | |
| tree | 6aeca701b20af5745eb7924c901c9708c098199b /src/zenstore/blockstore.cpp | |
| parent | statsd for cas (#511) (diff) | |
| download | zen-5295c9618cbae2bb937e188c072f66a77d793eb5.tar.xz zen-5295c9618cbae2bb937e188c072f66a77d793eb5.zip | |
gc v2 tests (#512)
* set MaxBlockCount at init
* properly calculate total size
* basic blockstore compact blocks test
* correct detection of block swap
* Use one implementation for CreateRandomBlob
* reduce some data sets to increase speed of tests
* reduce test time
* rename BlockStoreCompactState::AddBlock -> BlockStoreCompactState::IncludeBlock
Diffstat (limited to 'src/zenstore/blockstore.cpp')
| -rw-r--r-- | src/zenstore/blockstore.cpp | 335 |
1 files changed, 306 insertions, 29 deletions
diff --git a/src/zenstore/blockstore.cpp b/src/zenstore/blockstore.cpp index 837185201..30a659784 100644 --- a/src/zenstore/blockstore.cpp +++ b/src/zenstore/blockstore.cpp @@ -173,6 +173,7 @@ BlockStore::Initialize(const std::filesystem::path& BlocksBasePath, uint64_t Max m_TotalSize = 0; m_BlocksBasePath = BlocksBasePath; m_MaxBlockSize = MaxBlockSize; + m_MaxBlockCount = MaxBlockCount; if (std::filesystem::is_directory(m_BlocksBasePath)) { @@ -322,9 +323,10 @@ BlockStore::WriteChunk(const void* Data, uint64_t Size, uint64_t Alignment, cons RwLock::ExclusiveLockScope InsertLock(m_InsertLock); - uint32_t WriteBlockIndex = m_WriteBlockIndex.load(std::memory_order_acquire); - bool IsWriting = !!m_WriteBlock; - if (!IsWriting || (m_CurrentInsertOffset + Size) > m_MaxBlockSize) + uint32_t WriteBlockIndex = m_WriteBlockIndex.load(std::memory_order_acquire); + bool IsWriting = !!m_WriteBlock; + uint64_t AlignedInsertOffset = RoundUp(m_CurrentInsertOffset, Alignment); + if (!IsWriting || (AlignedInsertOffset + Size) > m_MaxBlockSize) { if (m_WriteBlock) { @@ -347,18 +349,18 @@ BlockStore::WriteChunk(const void* Data, uint64_t Size, uint64_t Alignment, cons m_WriteBlock = NewBlockFile; m_WriteBlockIndex.store(WriteBlockIndex, std::memory_order_release); m_CurrentInsertOffset = 0; + AlignedInsertOffset = 0; } - uint64_t InsertOffset = m_CurrentInsertOffset; - m_CurrentInsertOffset = RoundUp(InsertOffset + Size, Alignment); - uint64_t AlignedWriteSize = m_CurrentInsertOffset - InsertOffset; - Ref<BlockStoreFile> WriteBlock = m_WriteBlock; + uint64_t AlignedWriteSize = AlignedInsertOffset - m_CurrentInsertOffset + Size; + m_CurrentInsertOffset = AlignedInsertOffset + Size; + Ref<BlockStoreFile> WriteBlock = m_WriteBlock; m_ActiveWriteBlocks.push_back(WriteBlockIndex); InsertLock.ReleaseNow(); - WriteBlock->Write(Data, Size, InsertOffset); + WriteBlock->Write(Data, Size, AlignedInsertOffset); m_TotalSize.fetch_add(AlignedWriteSize, std::memory_order::relaxed); - Callback({.BlockIndex = WriteBlockIndex, .Offset = InsertOffset, .Size = Size}); + Callback({.BlockIndex = WriteBlockIndex, .Offset = AlignedInsertOffset, .Size = Size}); { RwLock::ExclusiveLockScope _(m_InsertLock); @@ -1003,12 +1005,15 @@ BlockStore::CompactBlocks(const BlockStoreCompactState& CompactState, CompactState.IterateBlocks( [&](uint32_t BlockIndex, const std::vector<size_t>& KeepChunkIndexes, const std::vector<BlockStoreLocation>& ChunkLocations) { - ZEN_ASSERT(BlockIndex != m_WriteBlockIndex.load()); - Ref<BlockStoreFile> OldBlockFile; { RwLock::SharedLockScope _(m_InsertLock); - auto It = m_ChunkBlocks.find(BlockIndex); + if ((BlockIndex == m_WriteBlockIndex.load()) && m_WriteBlock) + { + // You are trying to collect the currently writing block, Report error? + return; + } + auto It = m_ChunkBlocks.find(BlockIndex); if (It == m_ChunkBlocks.end()) { // This block has unknown, we can't move anything. Report error? @@ -1320,21 +1325,6 @@ namespace blockstore::impl { return Result; }; - static IoBuffer CreateChunk(uint64_t Size) - { - static std::random_device rd; - static std::mt19937 g(rd()); - - std::vector<uint8_t> Values; - Values.resize(Size); - for (size_t Idx = 0; Idx < Size; ++Idx) - { - Values[Idx] = static_cast<uint8_t>(Idx); - } - std::shuffle(Values.begin(), Values.end(), g); - - return IoBufferBuilder::MakeCloneFromMemory(Values.data(), Values.size()); - } } // namespace blockstore::impl TEST_CASE("blockstore.chunks") @@ -1546,7 +1536,7 @@ TEST_CASE("blockstore.reclaim.space") ChunkHashes.reserve(ChunkCount); for (size_t ChunkIndex = 0; ChunkIndex < ChunkCount; ++ChunkIndex) { - IoBuffer Chunk = CreateChunk(57 + ChunkIndex); + IoBuffer Chunk = CreateRandomBlob(57 + ChunkIndex); Store.WriteChunk(Chunk.Data(), Chunk.Size(), Alignment, [&](const BlockStoreLocation& L) { ChunkLocations.push_back(L); }); ChunkHashes.push_back(IoHash::HashBuffer(Chunk.Data(), Chunk.Size())); @@ -1665,7 +1655,7 @@ TEST_CASE("blockstore.thread.read.write") ChunkHashes.reserve(ChunkCount); for (size_t ChunkIndex = 0; ChunkIndex < ChunkCount; ++ChunkIndex) { - IoBuffer Chunk = CreateChunk(57 + ChunkIndex / 2); + IoBuffer Chunk = CreateRandomBlob(57 + ChunkIndex / 2); Chunks.push_back(Chunk); ChunkHashes.push_back(IoHash::HashBuffer(Chunk.Data(), Chunk.Size())); } @@ -1730,6 +1720,293 @@ TEST_CASE("blockstore.thread.read.write") } } +TEST_CASE("blockstore.compact.blocks") +{ + using namespace blockstore::impl; + + ScopedTemporaryDirectory TempDir; + auto RootDirectory = TempDir.Path(); + + BlockStore Store; + Store.Initialize(RootDirectory / "store", 1088, 1024); + + constexpr size_t ChunkCount = 200; + constexpr size_t Alignment = 8; + std::vector<IoBuffer> Chunks; + std::vector<IoHash> ChunkHashes; + Chunks.reserve(ChunkCount); + ChunkHashes.reserve(ChunkCount); + for (size_t ChunkIndex = 0; ChunkIndex < ChunkCount; ++ChunkIndex) + { + IoBuffer Chunk = CreateRandomBlob(57 + ChunkIndex / 2); + Chunks.push_back(Chunk); + ChunkHashes.push_back(IoHash::HashBuffer(Chunk.Data(), Chunk.Size())); + } + + std::vector<BlockStoreLocation> ChunkLocations; + ChunkLocations.resize(ChunkCount); + + for (size_t ChunkIndex = 0; ChunkIndex < ChunkCount; ++ChunkIndex) + { + IoBuffer& Chunk = Chunks[ChunkIndex]; + Store.WriteChunk(Chunk.Data(), Chunk.Size(), Alignment, [&](const BlockStoreLocation& L) { ChunkLocations[ChunkIndex] = L; }); + } + + SUBCASE("touch nothing") + { + uint64_t PreSize = Store.TotalSize(); + CHECK(PreSize > 0); + BlockStoreCompactState State; + Store.CompactBlocks( + State, + Alignment, + [&](const BlockStore::MovedChunksArray&, uint64_t) { CHECK(false); }, + []() { + CHECK(false); + return 0; + }); + CHECK_EQ(PreSize, Store.TotalSize()); + } + SUBCASE("keep nothing") + { + Store.Flush(true); + + uint64_t PreSize = Store.TotalSize(); + CHECK(PreSize > 0); + BlockStoreCompactState State; + for (const BlockStoreLocation& Location : ChunkLocations) + { + State.IncludeBlock(Location.BlockIndex); + } + uint64_t RemovedSize = 0; + Store.CompactBlocks( + State, + Alignment, + [&](const BlockStore::MovedChunksArray& Moved, uint64_t Removed) { + RemovedSize += Removed; + CHECK(Moved.empty()); + }, + []() { return 0; }); + CHECK_EQ(RemovedSize, PreSize); + CHECK_EQ(0u, Store.TotalSize()); + } + SUBCASE("keep current write block") + { + uint64_t PreSize = Store.TotalSize(); + BlockStoreCompactState State; + BlockStore::ReclaimSnapshotState SnapshotState = Store.GetReclaimSnapshotState(); + for (const BlockStoreLocation& Location : ChunkLocations) + { + if (SnapshotState.m_ActiveWriteBlocks.contains(Location.BlockIndex)) + { + continue; + } + State.IncludeBlock(Location.BlockIndex); + } + uint64_t RemovedSize = 0; + Store.CompactBlocks( + State, + Alignment, + [&](const BlockStore::MovedChunksArray& Moved, uint64_t Removed) { + RemovedSize += Removed; + CHECK(Moved.empty()); + }, + []() { return 0; }); + CHECK_EQ(Store.TotalSize() + RemovedSize, PreSize); + CHECK_LE(Store.TotalSize(), 1088); + CHECK_GT(Store.TotalSize(), 0); + } + SUBCASE("keep everthing") + { + Store.Flush(true); + + uint64_t PreSize = Store.TotalSize(); + BlockStoreCompactState State; + BlockStore::ReclaimSnapshotState SnapshotState = Store.GetReclaimSnapshotState(); + for (const BlockStoreLocation& Location : ChunkLocations) + { + State.AddKeepLocation(Location); + } + Store.CompactBlocks( + State, + Alignment, + [&](const BlockStore::MovedChunksArray&, uint64_t) { CHECK(false); }, + []() { + CHECK(false); + return 0; + }); + CHECK_EQ(Store.TotalSize(), PreSize); + } + SUBCASE("drop first block") + { + uint64_t PreSize = Store.TotalSize(); + BlockStoreCompactState State; + BlockStore::ReclaimSnapshotState SnapshotState = Store.GetReclaimSnapshotState(); + + CHECK(!SnapshotState.m_ActiveWriteBlocks.contains(0)); + State.IncludeBlock(0); + + uint64_t FirstBlockSize = 0; + for (const BlockStoreLocation& Location : ChunkLocations) + { + if (Location.BlockIndex == 0) + { + FirstBlockSize = Max<uint64_t>(FirstBlockSize, Location.Offset + Location.Size); + } + } + + uint64_t RemovedSize = 0; + Store.CompactBlocks( + State, + Alignment, + [&](const BlockStore::MovedChunksArray& Moved, uint64_t Removed) { + CHECK(Moved.empty()); + RemovedSize += Removed; + }, + []() { + CHECK(false); + return 0; + }); + CHECK_EQ(FirstBlockSize, RemovedSize); + CHECK_EQ(Store.TotalSize(), PreSize - FirstBlockSize); + } + SUBCASE("compact first block") + { + uint64_t PreSize = Store.TotalSize(); + BlockStoreCompactState State; + BlockStore::ReclaimSnapshotState SnapshotState = Store.GetReclaimSnapshotState(); + + CHECK(!SnapshotState.m_ActiveWriteBlocks.contains(0)); + State.IncludeBlock(0); + + uint64_t SkipChunkCount = 2; + std::vector<BlockStoreLocation> DroppedLocations(ChunkLocations.begin(), ChunkLocations.begin() + 2); + for (auto It = ChunkLocations.begin() + 2; It != ChunkLocations.end(); It++) + { + const BlockStoreLocation& Location = *It; + if (Location.BlockIndex != 0) + { + continue; + } + State.AddKeepLocation(Location); + } + uint64_t RemovedSize = 0; + Store.CompactBlocks( + State, + Alignment, + [&](const BlockStore::MovedChunksArray& Moved, uint64_t Removed) { + for (const auto& Move : Moved) + { + const BlockStoreLocation& OldLocation = State.GetLocation(Move.first); + CHECK(OldLocation.BlockIndex == 0); // Only move from block 0 + CHECK(std::find(DroppedLocations.begin(), DroppedLocations.end(), OldLocation) == DroppedLocations.end()); + auto It = std::find(ChunkLocations.begin(), ChunkLocations.end(), OldLocation); + CHECK(It != ChunkLocations.end()); + (*It) = Move.second; + } + RemovedSize += Removed; + }, + []() { + CHECK(false); + return 0; + }); + + SkipChunkCount = 2; + + for (size_t Index = 0; Index < ChunkLocations.size(); Index++) + { + const BlockStoreLocation& Location = ChunkLocations[Index]; + if (Location.BlockIndex == 0 && SkipChunkCount > 0) + { + CHECK(!Store.TryGetChunk(Location)); + continue; + } + IoBuffer Buffer = Store.TryGetChunk(Location); + CHECK(Buffer); + IoHash RawHash = IoHash::HashBuffer(Buffer.Data(), Buffer.Size()); + CHECK_EQ(ChunkHashes[Index], RawHash); + } + CHECK_LT(Store.TotalSize(), PreSize); + } + SUBCASE("compact every other item") + { + uint64_t PreSize = Store.TotalSize(); + BlockStoreCompactState State; + BlockStore::ReclaimSnapshotState SnapshotState = Store.GetReclaimSnapshotState(); + bool SkipFlag = false; + + for (const BlockStoreLocation& Location : ChunkLocations) + { + if (SnapshotState.m_ActiveWriteBlocks.contains(Location.BlockIndex)) + { + continue; + } + if (SkipFlag) + { + State.IncludeBlock(Location.BlockIndex); + SkipFlag = false; + continue; + } + SkipFlag = true; + } + + SkipFlag = false; + std::vector<BlockStoreLocation> DroppedLocations; + for (const BlockStoreLocation& Location : ChunkLocations) + { + if (SnapshotState.m_ActiveWriteBlocks.contains(Location.BlockIndex)) + { + continue; + } + if (SkipFlag) + { + DroppedLocations.push_back(Location); + SkipFlag = false; + continue; + } + State.AddKeepLocation(Location); + SkipFlag = true; + } + uint64_t RemovedSize = 0; + Store.CompactBlocks( + State, + Alignment, + [&](const BlockStore::MovedChunksArray& Moved, uint64_t Removed) { + for (const auto& Move : Moved) + { + const BlockStoreLocation& OldLocation = State.GetLocation(Move.first); + CHECK(std::find(DroppedLocations.begin(), DroppedLocations.end(), OldLocation) == DroppedLocations.end()); + auto It = std::find(ChunkLocations.begin(), ChunkLocations.end(), OldLocation); + CHECK(It != ChunkLocations.end()); + (*It) = Move.second; + } + RemovedSize += Removed; + }, + []() { + CHECK(false); + return 0; + }); + SkipFlag = false; + for (size_t Index = 0; Index < ChunkLocations.size(); Index++) + { + const BlockStoreLocation& Location = ChunkLocations[Index]; + if (SkipFlag && !SnapshotState.m_ActiveWriteBlocks.contains(Location.BlockIndex)) + { + CHECK(std::find(DroppedLocations.begin(), DroppedLocations.end(), Location) != DroppedLocations.end()); + CHECK(!Store.TryGetChunk(Location)); + SkipFlag = false; + continue; + } + IoBuffer Buffer = Store.TryGetChunk(Location); + CHECK(Buffer); + IoHash RawHash = IoHash::HashBuffer(Buffer.Data(), Buffer.Size()); + CHECK_EQ(ChunkHashes[Index], RawHash); + SkipFlag = true; + } + CHECK_LT(Store.TotalSize(), PreSize); + } +} + #endif void |