aboutsummaryrefslogtreecommitdiff
path: root/src/zenstore/blockstore.cpp
diff options
context:
space:
mode:
authorDan Engelbrecht <[email protected]>2023-11-24 13:26:51 +0100
committerGitHub <[email protected]>2023-11-24 13:26:51 +0100
commit254d2f89c110fc5f14e658505559a7e7534a984d (patch)
tree511e8dcbae633ae4ccaea20f29b9b04bc41ea875 /src/zenstore/blockstore.cpp
parentfix truncation of sentry hostname (diff)
downloadzen-254d2f89c110fc5f14e658505559a7e7534a984d.tar.xz
zen-254d2f89c110fc5f14e658505559a7e7534a984d.zip
Add GC Cancel/Stop (#568)
- GcScheduler will now cancel any running GC when it shuts down. - Old GC is rather limited in *when* it reacts to cancel of GC. GCv2 is more responsive.
Diffstat (limited to 'src/zenstore/blockstore.cpp')
-rw-r--r--src/zenstore/blockstore.cpp255
1 files changed, 141 insertions, 114 deletions
diff --git a/src/zenstore/blockstore.cpp b/src/zenstore/blockstore.cpp
index ec299092d..e4a66daf4 100644
--- a/src/zenstore/blockstore.cpp
+++ b/src/zenstore/blockstore.cpp
@@ -1056,156 +1056,172 @@ BlockStore::CompactBlocks(const BlockStoreCompactState& CompactState,
}
});
- auto ReportChanges = [&]() {
+ auto ReportChanges = [&]() -> bool {
+ bool Continue = true;
if (!MovedChunks.empty() || RemovedSize > 0)
{
- ChangeCallback(MovedChunks, RemovedSize > AddedSize ? RemovedSize - AddedSize : 0);
+ Continue = ChangeCallback(MovedChunks, RemovedSize > AddedSize ? RemovedSize - AddedSize : 0);
DeletedSize += RemovedSize;
RemovedSize = 0;
AddedSize = 0;
MovedCount += MovedChunks.size();
MovedChunks.clear();
}
+ return Continue;
};
std::vector<uint32_t> RemovedBlocks;
- CompactState.IterateBlocks(
- [&](uint32_t BlockIndex, const std::vector<size_t>& KeepChunkIndexes, const std::vector<BlockStoreLocation>& ChunkLocations) {
- Ref<BlockStoreFile> OldBlockFile;
+ CompactState.IterateBlocks([&](uint32_t BlockIndex,
+ const std::vector<size_t>& KeepChunkIndexes,
+ const std::vector<BlockStoreLocation>& ChunkLocations) -> bool {
+ Ref<BlockStoreFile> OldBlockFile;
+ {
+ RwLock::SharedLockScope _(m_InsertLock);
+ if ((BlockIndex == m_WriteBlockIndex.load()) && m_WriteBlock)
{
- RwLock::SharedLockScope _(m_InsertLock);
- if ((BlockIndex == m_WriteBlockIndex.load()) && m_WriteBlock)
- {
- // You are trying to collect the currently writing block, Report error?
- return;
- }
- auto It = m_ChunkBlocks.find(BlockIndex);
- if (It == m_ChunkBlocks.end())
- {
- // This block has unknown, we can't move anything. Report error?
- return;
- }
- if (!It->second)
- {
- // This block has been removed, we can't move anything. Report error?
- return;
- }
- OldBlockFile = It->second;
+ ZEN_ERROR("Compact Block was requested to rewrite the currently active write block in '{}', Block index {}",
+ m_BlocksBasePath,
+ BlockIndex);
+ return false;
}
- ZEN_ASSERT(OldBlockFile);
+ auto It = m_ChunkBlocks.find(BlockIndex);
+ if (It == m_ChunkBlocks.end())
+ {
+ ZEN_WARN("Compact Block was requested to rewrite an unknown block in '{}', Block index {}", m_BlocksBasePath, BlockIndex);
+ return true;
+ }
+ if (!It->second)
+ {
+ ZEN_WARN("Compact Block was requested to rewrite a deleted block in '{}', Block index {}", m_BlocksBasePath, BlockIndex);
+ return true;
+ }
+ OldBlockFile = It->second;
+ }
+ ZEN_ASSERT(OldBlockFile);
- uint64_t OldBlockSize = OldBlockFile->FileSize();
+ uint64_t OldBlockSize = OldBlockFile->FileSize();
- std::vector<uint8_t> Chunk;
- for (const size_t& ChunkIndex : KeepChunkIndexes)
+ std::vector<uint8_t> Chunk;
+ for (const size_t& ChunkIndex : KeepChunkIndexes)
+ {
+ const BlockStoreLocation ChunkLocation = ChunkLocations[ChunkIndex];
+ if (ChunkLocation.Offset + ChunkLocation.Size > OldBlockSize)
{
- const BlockStoreLocation ChunkLocation = ChunkLocations[ChunkIndex];
- if (ChunkLocation.Offset + ChunkLocation.Size > OldBlockSize)
- {
- ZEN_WARN(
- "Compact Block skipping chunk outside of block range in '{}', Chunk start {}, Chunk size {} in Block {}, Block "
- "size {}",
- m_BlocksBasePath,
- ChunkLocation.Offset,
- ChunkLocation.Size,
- OldBlockFile->GetPath(),
- OldBlockSize);
- continue;
- }
+ ZEN_WARN(
+ "Compact Block skipping chunk outside of block range in '{}', Chunk start {}, Chunk size {} in Block {}, Block "
+ "size {}",
+ m_BlocksBasePath,
+ ChunkLocation.Offset,
+ ChunkLocation.Size,
+ OldBlockFile->GetPath(),
+ OldBlockSize);
+ continue;
+ }
- Chunk.resize(ChunkLocation.Size);
- OldBlockFile->Read(Chunk.data(), Chunk.size(), ChunkLocation.Offset);
+ Chunk.resize(ChunkLocation.Size);
+ OldBlockFile->Read(Chunk.data(), Chunk.size(), ChunkLocation.Offset);
- if ((WriteOffset + Chunk.size()) > m_MaxBlockSize)
+ if ((WriteOffset + Chunk.size()) > m_MaxBlockSize)
+ {
+ if (NewBlockFile)
{
- if (NewBlockFile)
- {
- NewBlockFile->Flush();
- MovedSize += NewBlockFile->FileSize();
- NewBlockFile = nullptr;
+ NewBlockFile->Flush();
+ MovedSize += NewBlockFile->FileSize();
+ NewBlockFile = nullptr;
- ZEN_ASSERT(!MovedChunks.empty() || RemovedSize > 0); // We should not have a new block if we haven't moved anything
+ ZEN_ASSERT(!MovedChunks.empty() || RemovedSize > 0); // We should not have a new block if we haven't moved anything
- ChangeCallback(MovedChunks, RemovedSize);
- DeletedSize += RemovedSize;
- RemovedSize = 0;
- MovedCount += MovedChunks.size();
- MovedChunks.clear();
+ if (!ReportChanges())
+ {
+ return false;
}
+ }
- uint32_t NextBlockIndex = m_WriteBlockIndex.load(std::memory_order_relaxed);
+ uint32_t NextBlockIndex = m_WriteBlockIndex.load(std::memory_order_relaxed);
+ {
+ RwLock::ExclusiveLockScope InsertLock(m_InsertLock);
+ std::filesystem::path NewBlockPath;
+ NextBlockIndex = GetFreeBlockIndex(NextBlockIndex, InsertLock, NewBlockPath);
+ if (NextBlockIndex == (uint32_t)m_MaxBlockCount)
{
- RwLock::ExclusiveLockScope InsertLock(m_InsertLock);
- std::filesystem::path NewBlockPath;
- NextBlockIndex = GetFreeBlockIndex(NextBlockIndex, InsertLock, NewBlockPath);
- if (NextBlockIndex == (uint32_t)m_MaxBlockCount)
- {
- ZEN_ERROR("unable to allocate a new block in '{}', count limit {} exeeded",
- m_BlocksBasePath,
- static_cast<uint64_t>(std::numeric_limits<uint32_t>::max()) + 1);
- return;
- }
-
- NewBlockFile = new BlockStoreFile(NewBlockPath);
- m_ChunkBlocks[NextBlockIndex] = NewBlockFile;
+ ZEN_ERROR("unable to allocate a new block in '{}', count limit {} exeeded",
+ m_BlocksBasePath,
+ static_cast<uint64_t>(std::numeric_limits<uint32_t>::max()) + 1);
+ return false;
}
- ZEN_ASSERT(NewBlockFile);
- std::error_code Error;
- DiskSpace Space = DiskSpaceInfo(m_BlocksBasePath, Error);
- if (Error)
+ NewBlockFile = new BlockStoreFile(NewBlockPath);
+ m_ChunkBlocks[NextBlockIndex] = NewBlockFile;
+ }
+ ZEN_ASSERT(NewBlockFile);
+
+ std::error_code Error;
+ DiskSpace Space = DiskSpaceInfo(m_BlocksBasePath, Error);
+ if (Error)
+ {
+ ZEN_ERROR("get disk space in '{}' FAILED, reason: '{}'", m_BlocksBasePath, Error.message());
{
- ZEN_ERROR("get disk space in '{}' FAILED, reason: '{}'", m_BlocksBasePath, Error.message());
- return;
+ RwLock::ExclusiveLockScope _l(m_InsertLock);
+ ZEN_ASSERT(m_ChunkBlocks[NextBlockIndex] == NewBlockFile);
+ m_ChunkBlocks.erase(NextBlockIndex);
}
+ NewBlockFile->MarkAsDeleteOnClose();
+ NewBlockFile = nullptr;
+ return false;
+ }
- if (Space.Free < m_MaxBlockSize)
+ if (Space.Free < m_MaxBlockSize)
+ {
+ uint64_t ReclaimedSpace = DiskReserveCallback();
+ if (Space.Free + ReclaimedSpace < m_MaxBlockSize)
{
- uint64_t ReclaimedSpace = DiskReserveCallback();
- if (Space.Free + ReclaimedSpace < m_MaxBlockSize)
- {
- ZEN_WARN("garbage collect for '{}' FAILED, required disk space {}, free {}",
- m_BlocksBasePath,
- m_MaxBlockSize,
- NiceBytes(Space.Free + ReclaimedSpace));
- {
- RwLock::ExclusiveLockScope _l(m_InsertLock);
- ZEN_ASSERT(m_ChunkBlocks[NextBlockIndex] == NewBlockFile);
- m_ChunkBlocks.erase(NextBlockIndex);
- }
- NewBlockFile = nullptr;
- return;
- }
-
- ZEN_INFO("using gc reserve for '{}', reclaimed {}, disk free {}",
+ ZEN_WARN("garbage collect for '{}' FAILED, required disk space {}, free {}",
m_BlocksBasePath,
- ReclaimedSpace,
+ m_MaxBlockSize,
NiceBytes(Space.Free + ReclaimedSpace));
+ {
+ RwLock::ExclusiveLockScope _l(m_InsertLock);
+ ZEN_ASSERT(m_ChunkBlocks[NextBlockIndex] == NewBlockFile);
+ m_ChunkBlocks.erase(NextBlockIndex);
+ }
+ NewBlockFile->MarkAsDeleteOnClose();
+ NewBlockFile = nullptr;
+ return false;
}
- NewBlockFile->Create(m_MaxBlockSize);
- NewBlockIndex = NextBlockIndex;
- WriteOffset = 0;
- }
- NewBlockFile->Write(Chunk.data(), Chunk.size(), WriteOffset);
- MovedChunks.push_back({ChunkIndex, {.BlockIndex = NewBlockIndex, .Offset = WriteOffset, .Size = Chunk.size()}});
- WriteOffset = RoundUp(WriteOffset + Chunk.size(), PayloadAlignment);
- AddedSize += Chunk.size();
+ ZEN_INFO("using gc reserve for '{}', reclaimed {}, disk free {}",
+ m_BlocksBasePath,
+ ReclaimedSpace,
+ NiceBytes(Space.Free + ReclaimedSpace));
+ }
+ NewBlockFile->Create(m_MaxBlockSize);
+ NewBlockIndex = NextBlockIndex;
+ WriteOffset = 0;
}
- Chunk.clear();
- ReportChanges();
+ NewBlockFile->Write(Chunk.data(), Chunk.size(), WriteOffset);
+ MovedChunks.push_back({ChunkIndex, {.BlockIndex = NewBlockIndex, .Offset = WriteOffset, .Size = Chunk.size()}});
+ WriteOffset = RoundUp(WriteOffset + Chunk.size(), PayloadAlignment);
+ AddedSize += Chunk.size();
+ }
+ Chunk.clear();
+
+ if (!ReportChanges())
+ {
+ return false;
+ }
- {
- RwLock::ExclusiveLockScope InsertLock(m_InsertLock);
- ZEN_DEBUG("marking cas block store file '{}' for delete, block #{}", OldBlockFile->GetPath(), BlockIndex);
- OldBlockFile->MarkAsDeleteOnClose();
- m_ChunkBlocks.erase(BlockIndex);
- m_TotalSize.fetch_sub(OldBlockSize);
- RemovedSize += OldBlockSize;
- }
- });
+ {
+ RwLock::ExclusiveLockScope InsertLock(m_InsertLock);
+ ZEN_DEBUG("marking cas block store file '{}' for delete, block #{}", OldBlockFile->GetPath(), BlockIndex);
+ OldBlockFile->MarkAsDeleteOnClose();
+ m_ChunkBlocks.erase(BlockIndex);
+ m_TotalSize.fetch_sub(OldBlockSize);
+ RemovedSize += OldBlockSize;
+ }
+ return true;
+ });
if (NewBlockFile)
{
@@ -1825,7 +1841,10 @@ TEST_CASE("blockstore.compact.blocks")
Store.CompactBlocks(
State,
Alignment,
- [&](const BlockStore::MovedChunksArray&, uint64_t) { CHECK(false); },
+ [&](const BlockStore::MovedChunksArray&, uint64_t) {
+ CHECK(false);
+ return true;
+ },
[]() {
CHECK(false);
return 0;
@@ -1850,6 +1869,7 @@ TEST_CASE("blockstore.compact.blocks")
[&](const BlockStore::MovedChunksArray& Moved, uint64_t Removed) {
RemovedSize += Removed;
CHECK(Moved.empty());
+ return true;
},
[]() { return 0; });
CHECK_EQ(RemovedSize, PreSize);
@@ -1875,6 +1895,7 @@ TEST_CASE("blockstore.compact.blocks")
[&](const BlockStore::MovedChunksArray& Moved, uint64_t Removed) {
RemovedSize += Removed;
CHECK(Moved.empty());
+ return true;
},
[]() { return 0; });
CHECK_EQ(Store.TotalSize() + RemovedSize, PreSize);
@@ -1895,7 +1916,10 @@ TEST_CASE("blockstore.compact.blocks")
Store.CompactBlocks(
State,
Alignment,
- [&](const BlockStore::MovedChunksArray&, uint64_t) { CHECK(false); },
+ [&](const BlockStore::MovedChunksArray&, uint64_t) {
+ CHECK(false);
+ return true;
+ },
[]() {
CHECK(false);
return 0;
@@ -1927,6 +1951,7 @@ TEST_CASE("blockstore.compact.blocks")
[&](const BlockStore::MovedChunksArray& Moved, uint64_t Removed) {
CHECK(Moved.empty());
RemovedSize += Removed;
+ return true;
},
[]() {
CHECK(false);
@@ -1970,6 +1995,7 @@ TEST_CASE("blockstore.compact.blocks")
(*It) = Move.second;
}
RemovedSize += Removed;
+ return true;
},
[]() {
CHECK(false);
@@ -2046,6 +2072,7 @@ TEST_CASE("blockstore.compact.blocks")
(*It) = Move.second;
}
RemovedSize += Removed;
+ return true;
},
[]() {
CHECK(false);