aboutsummaryrefslogtreecommitdiff
path: root/src/zenstore
diff options
context:
space:
mode:
Diffstat (limited to 'src/zenstore')
-rw-r--r--src/zenstore/blockstore.cpp190
-rw-r--r--src/zenstore/compactcas.cpp217
-rw-r--r--src/zenstore/compactcas.h7
-rw-r--r--src/zenstore/filecas.cpp167
-rw-r--r--src/zenstore/filecas.h7
-rw-r--r--src/zenstore/gc.cpp326
-rw-r--r--src/zenstore/include/zenstore/blockstore.h58
-rw-r--r--src/zenstore/include/zenstore/gc.h187
8 files changed, 1131 insertions, 28 deletions
diff --git a/src/zenstore/blockstore.cpp b/src/zenstore/blockstore.cpp
index 02ee204ad..837185201 100644
--- a/src/zenstore/blockstore.cpp
+++ b/src/zenstore/blockstore.cpp
@@ -957,6 +957,196 @@ BlockStore::IterateChunks(const std::vector<BlockStoreLocation>& ChunkLocations,
}
}
+void
+BlockStore::CompactBlocks(const BlockStoreCompactState& CompactState,
+ uint64_t PayloadAlignment,
+ const CompactCallback& ChangeCallback,
+ const ClaimDiskReserveCallback& DiskReserveCallback)
+{
+ uint64_t DeletedSize = 0;
+ uint64_t MovedCount = 0;
+ uint64_t MovedSize = 0;
+
+ Stopwatch TotalTimer;
+ const auto _ = MakeGuard([&] {
+ ZEN_DEBUG("compact blocks for '{}' DONE after {}, deleted {} and moved {} chunks ({}) ",
+ m_BlocksBasePath,
+ NiceTimeSpanMs(TotalTimer.GetElapsedTimeMs()),
+ NiceBytes(DeletedSize),
+ MovedCount,
+ NiceBytes(MovedSize));
+ });
+
+ uint64_t WriteOffset = m_MaxBlockSize + 1u; // Force detect a new block
+ uint32_t NewBlockIndex = 0;
+ MovedChunksArray MovedChunks;
+
+ uint64_t RemovedSize = 0;
+
+ Ref<BlockStoreFile> NewBlockFile;
+ auto NewBlockFileGuard = MakeGuard([&]() {
+ if (NewBlockFile)
+ {
+ ZEN_DEBUG("dropping incomplete cas block store file '{}'", NewBlockFile->GetPath());
+ {
+ RwLock::ExclusiveLockScope _l(m_InsertLock);
+ if (m_ChunkBlocks[NewBlockIndex] == NewBlockFile)
+ {
+ m_ChunkBlocks.erase(NewBlockIndex);
+ }
+ }
+ NewBlockFile->MarkAsDeleteOnClose();
+ }
+ });
+
+ std::vector<uint32_t> RemovedBlocks;
+
+ CompactState.IterateBlocks(
+ [&](uint32_t BlockIndex, const std::vector<size_t>& KeepChunkIndexes, const std::vector<BlockStoreLocation>& ChunkLocations) {
+ ZEN_ASSERT(BlockIndex != m_WriteBlockIndex.load());
+
+ Ref<BlockStoreFile> OldBlockFile;
+ {
+ RwLock::SharedLockScope _(m_InsertLock);
+ auto It = m_ChunkBlocks.find(BlockIndex);
+ if (It == m_ChunkBlocks.end())
+ {
+ // This block has unknown, we can't move anything. Report error?
+ return;
+ }
+ if (!It->second)
+ {
+ // This block has been removed, we can't move anything. Report error?
+ return;
+ }
+ OldBlockFile = It->second;
+ }
+ ZEN_ASSERT(OldBlockFile);
+
+ uint64_t OldBlockSize = OldBlockFile->FileSize();
+
+ // TODO: Add heuristics for determining if it is worth to compact a block (if only a very small part is removed)
+
+ std::vector<uint8_t> Chunk;
+ for (const size_t& ChunkIndex : KeepChunkIndexes)
+ {
+ const BlockStoreLocation ChunkLocation = ChunkLocations[ChunkIndex];
+ Chunk.resize(ChunkLocation.Size);
+ OldBlockFile->Read(Chunk.data(), Chunk.size(), ChunkLocation.Offset);
+
+ if ((WriteOffset + Chunk.size()) > m_MaxBlockSize)
+ {
+ if (NewBlockFile)
+ {
+ NewBlockFile->Flush();
+ MovedSize += NewBlockFile->FileSize();
+ NewBlockFile = nullptr;
+
+ ZEN_ASSERT(!MovedChunks.empty() || RemovedSize > 0); // We should not have a new block if we haven't moved anything
+
+ ChangeCallback(MovedChunks, RemovedSize);
+ DeletedSize += RemovedSize;
+ RemovedSize = 0;
+ MovedCount += MovedChunks.size();
+ MovedChunks.clear();
+ }
+
+ uint32_t NextBlockIndex = m_WriteBlockIndex.load(std::memory_order_relaxed);
+ {
+ RwLock::ExclusiveLockScope InsertLock(m_InsertLock);
+ std::filesystem::path NewBlockPath;
+ NextBlockIndex = GetFreeBlockIndex(NextBlockIndex, InsertLock, NewBlockPath);
+ if (NextBlockIndex == (uint32_t)m_MaxBlockCount)
+ {
+ ZEN_ERROR("unable to allocate a new block in '{}', count limit {} exeeded",
+ m_BlocksBasePath,
+ static_cast<uint64_t>(std::numeric_limits<uint32_t>::max()) + 1);
+ return;
+ }
+
+ NewBlockFile = new BlockStoreFile(NewBlockPath);
+ m_ChunkBlocks[NextBlockIndex] = NewBlockFile;
+ }
+ ZEN_ASSERT(NewBlockFile);
+
+ std::error_code Error;
+ DiskSpace Space = DiskSpaceInfo(m_BlocksBasePath, Error);
+ if (Error)
+ {
+ ZEN_ERROR("get disk space in '{}' FAILED, reason: '{}'", m_BlocksBasePath, Error.message());
+ return;
+ }
+
+ if (Space.Free < m_MaxBlockSize)
+ {
+ uint64_t ReclaimedSpace = DiskReserveCallback();
+ if (Space.Free + ReclaimedSpace < m_MaxBlockSize)
+ {
+ ZEN_WARN("garbage collect for '{}' FAILED, required disk space {}, free {}",
+ m_BlocksBasePath,
+ m_MaxBlockSize,
+ NiceBytes(Space.Free + ReclaimedSpace));
+ {
+ RwLock::ExclusiveLockScope _l(m_InsertLock);
+ ZEN_ASSERT(m_ChunkBlocks[NextBlockIndex] == NewBlockFile);
+ m_ChunkBlocks.erase(NextBlockIndex);
+ }
+ NewBlockFile->MarkAsDeleteOnClose();
+ return;
+ }
+
+ ZEN_INFO("using gc reserve for '{}', reclaimed {}, disk free {}",
+ m_BlocksBasePath,
+ ReclaimedSpace,
+ NiceBytes(Space.Free + ReclaimedSpace));
+ }
+ NewBlockFile->Create(m_MaxBlockSize);
+ NewBlockIndex = NextBlockIndex;
+ WriteOffset = 0;
+ }
+
+ NewBlockFile->Write(Chunk.data(), Chunk.size(), WriteOffset);
+ MovedChunks.push_back({ChunkIndex, {.BlockIndex = NewBlockIndex, .Offset = WriteOffset, .Size = Chunk.size()}});
+ WriteOffset = RoundUp(WriteOffset + Chunk.size(), PayloadAlignment);
+ }
+ Chunk.clear();
+
+ // Report what we have moved so we can purge the old block
+ if (!MovedChunks.empty() || RemovedSize > 0)
+ {
+ ChangeCallback(MovedChunks, RemovedSize);
+ DeletedSize += RemovedSize;
+ RemovedSize = 0;
+ MovedCount += MovedChunks.size();
+ MovedChunks.clear();
+ }
+
+ {
+ RwLock::ExclusiveLockScope InsertLock(m_InsertLock);
+ ZEN_DEBUG("marking cas block store file '{}' for delete, block #{}", OldBlockFile->GetPath(), BlockIndex);
+ OldBlockFile->MarkAsDeleteOnClose();
+ m_ChunkBlocks.erase(BlockIndex);
+ m_TotalSize.fetch_sub(OldBlockSize);
+ RemovedSize += OldBlockSize;
+ }
+ });
+ if (NewBlockFile)
+ {
+ NewBlockFile->Flush();
+ MovedSize += NewBlockFile->FileSize();
+ NewBlockFile = nullptr;
+ }
+
+ if (!MovedChunks.empty() || RemovedSize > 0)
+ {
+ ChangeCallback(MovedChunks, RemovedSize);
+ DeletedSize += RemovedSize;
+ RemovedSize = 0;
+ MovedCount += MovedChunks.size();
+ MovedChunks.clear();
+ }
+}
+
const char*
BlockStore::GetBlockFileExtension()
{
diff --git a/src/zenstore/compactcas.cpp b/src/zenstore/compactcas.cpp
index 115bdcf03..f93dafa21 100644
--- a/src/zenstore/compactcas.cpp
+++ b/src/zenstore/compactcas.cpp
@@ -117,10 +117,12 @@ namespace {
CasContainerStrategy::CasContainerStrategy(GcManager& Gc) : m_Log(logging::Get("containercas")), m_Gc(Gc)
{
m_Gc.AddGcStorage(this);
+ m_Gc.AddGcReferenceStore(*this);
}
CasContainerStrategy::~CasContainerStrategy()
{
+ m_Gc.RemoveGcReferenceStore(*this);
m_Gc.RemoveGcStorage(this);
}
@@ -551,6 +553,221 @@ CasContainerStrategy::CollectGarbage(GcContext& GcCtx)
GcCtx.AddDeletedCids(DeletedChunks);
}
+class CasContainerStoreCompactor : public GcReferenceStoreCompactor
+{
+public:
+ CasContainerStoreCompactor(CasContainerStrategy& Owner,
+ BlockStoreCompactState&& CompactState,
+ std::vector<IoHash>&& CompactStateKeys,
+ std::vector<IoHash>&& PrunedKeys)
+ : m_CasContainerStrategy(Owner)
+ , m_CompactState(std::move(CompactState))
+ , m_CompactStateKeys(std::move(CompactStateKeys))
+ , m_PrunedKeys(std::move(PrunedKeys))
+ {
+ }
+
+ virtual void CompactReferenceStore(GcCtx& Ctx)
+ {
+ size_t CompactedCount = 0;
+ Stopwatch Timer;
+ const auto _ = MakeGuard([&] {
+ ZEN_DEBUG("gc block store '{}': compacted {} cids in {}",
+ m_CasContainerStrategy.m_RootDirectory / m_CasContainerStrategy.m_ContainerBaseName,
+ CompactedCount,
+ NiceTimeSpanMs(Timer.GetElapsedTimeMs()));
+ });
+
+ if (Ctx.Settings.IsDeleteMode && Ctx.Settings.CollectSmallObjects)
+ {
+ // Compact block store
+ m_CasContainerStrategy.m_BlockStore.CompactBlocks(
+ m_CompactState,
+ m_CasContainerStrategy.m_PayloadAlignment,
+ [&](const BlockStore::MovedChunksArray& MovedArray, uint64_t FreedDiskSpace) {
+ std::vector<CasDiskIndexEntry> MovedEntries;
+ RwLock::ExclusiveLockScope _(m_CasContainerStrategy.m_LocationMapLock);
+ for (const std::pair<size_t, BlockStoreLocation>& Moved : MovedArray)
+ {
+ size_t ChunkIndex = Moved.first;
+ const IoHash& Key = m_CompactStateKeys[ChunkIndex];
+
+ if (auto It = m_CasContainerStrategy.m_LocationMap.find(Key); It != m_CasContainerStrategy.m_LocationMap.end())
+ {
+ BlockStoreDiskLocation& Location = m_CasContainerStrategy.m_Locations[It->second];
+ const BlockStoreLocation& OldLocation = m_CompactState.GetLocation(ChunkIndex);
+ if (Location.Get(m_CasContainerStrategy.m_PayloadAlignment) != OldLocation)
+ {
+ // Someone has moved our chunk so lets just skip the new location we were provided, it will be GC:d at a
+ // later time
+ continue;
+ }
+
+ const BlockStoreLocation& NewLocation = Moved.second;
+ Location = BlockStoreDiskLocation(NewLocation, m_CasContainerStrategy.m_PayloadAlignment);
+ MovedEntries.push_back(CasDiskIndexEntry{.Key = Key, .Location = Location});
+ }
+ }
+ m_CasContainerStrategy.m_CasLog.Append(MovedEntries);
+ Ctx.RemovedDiskSpace.fetch_add(FreedDiskSpace);
+ },
+ [&]() { return 0; });
+
+ CompactedCount = m_PrunedKeys.size();
+ Ctx.CompactedReferences.fetch_add(
+ CompactedCount); // Slightly missleading, it might not be compacted if the block is the currently writing block
+ }
+ }
+
+ CasContainerStrategy& m_CasContainerStrategy;
+ BlockStoreCompactState m_CompactState;
+ std::vector<IoHash> m_CompactStateKeys;
+ std::vector<IoHash> m_PrunedKeys;
+};
+
+class CasContainerReferencePruner : public GcReferencePruner
+{
+public:
+ CasContainerReferencePruner(CasContainerStrategy& Owner, std::vector<IoHash>&& Cids)
+ : m_CasContainerStrategy(Owner)
+ , m_Cids(std::move(Cids))
+ {
+ }
+
+ virtual GcReferenceStoreCompactor* RemoveUnreferencedData(GcCtx& Ctx, const GetUnusedReferencesFunc& GetUnusedReferences)
+ {
+ size_t TotalCount = m_Cids.size();
+ size_t PruneCount = 0;
+ Stopwatch Timer;
+ const auto _ = MakeGuard([&] {
+ ZEN_DEBUG("gc block store '{}': removed {} unused cid out of {} in {}",
+ m_CasContainerStrategy.m_RootDirectory / m_CasContainerStrategy.m_ContainerBaseName,
+ PruneCount,
+ TotalCount,
+ NiceTimeSpanMs(Timer.GetElapsedTimeMs()));
+ });
+
+ std::vector<IoHash> UnusedCids = GetUnusedReferences(m_Cids);
+ m_Cids.clear();
+
+ if (UnusedCids.empty())
+ {
+ // Nothing to collect
+ return nullptr;
+ }
+
+ BlockStoreCompactState CompactState;
+ BlockStore::ReclaimSnapshotState BlockSnapshotState;
+ std::vector<IoHash> CompactStateKeys;
+ std::vector<CasDiskIndexEntry> ExpiredEntries;
+ ExpiredEntries.reserve(UnusedCids.size());
+ tsl::robin_set<IoHash, IoHash::Hasher> UnusedKeys;
+
+ {
+ RwLock::ExclusiveLockScope __(m_CasContainerStrategy.m_LocationMapLock);
+ if (Ctx.Settings.CollectSmallObjects)
+ {
+ BlockSnapshotState = m_CasContainerStrategy.m_BlockStore.GetReclaimSnapshotState();
+ }
+
+ for (const IoHash& Cid : UnusedCids)
+ {
+ auto It = m_CasContainerStrategy.m_LocationMap.find(Cid);
+ if (It == m_CasContainerStrategy.m_LocationMap.end())
+ {
+ continue;
+ }
+ CasDiskIndexEntry ExpiredEntry = {.Key = Cid,
+ .Location = m_CasContainerStrategy.m_Locations[It->second],
+ .Flags = CasDiskIndexEntry::kTombstone};
+ const BlockStoreDiskLocation& Location = m_CasContainerStrategy.m_Locations[It->second];
+ BlockStoreLocation BlockLocation = Location.Get(m_CasContainerStrategy.m_PayloadAlignment);
+ if (Ctx.Settings.CollectSmallObjects)
+ {
+ UnusedKeys.insert(Cid);
+ uint32_t BlockIndex = BlockLocation.BlockIndex;
+ bool IsActiveWriteBlock = BlockSnapshotState.m_ActiveWriteBlocks.contains(BlockIndex);
+ if (!IsActiveWriteBlock)
+ {
+ CompactState.AddBlock(BlockIndex);
+ }
+ ExpiredEntries.push_back(ExpiredEntry);
+ }
+ }
+
+ // Get all locations we need to keep for affected blocks
+ if (Ctx.Settings.CollectSmallObjects && !UnusedKeys.empty())
+ {
+ for (const auto& Entry : m_CasContainerStrategy.m_LocationMap)
+ {
+ const IoHash& Key = Entry.first;
+ if (UnusedKeys.contains(Key))
+ {
+ continue;
+ }
+ const BlockStoreDiskLocation& Location = m_CasContainerStrategy.m_Locations[Entry.second];
+ BlockStoreLocation BlockLocation = Location.Get(m_CasContainerStrategy.m_PayloadAlignment);
+ if (CompactState.AddKeepLocation(BlockLocation))
+ {
+ CompactStateKeys.push_back(Key);
+ }
+ }
+ }
+
+ if (Ctx.Settings.IsDeleteMode)
+ {
+ for (const CasDiskIndexEntry& Entry : ExpiredEntries)
+ {
+ m_CasContainerStrategy.m_LocationMap.erase(Entry.Key);
+ }
+ m_CasContainerStrategy.m_CasLog.Append(ExpiredEntries);
+ m_CasContainerStrategy.m_CasLog.Flush();
+ }
+ }
+
+ PruneCount = UnusedKeys.size();
+ Ctx.PrunedReferences.fetch_add(PruneCount);
+ return new CasContainerStoreCompactor(m_CasContainerStrategy,
+ std::move(CompactState),
+ std::move(CompactStateKeys),
+ std::vector<IoHash>(UnusedKeys.begin(), UnusedKeys.end()));
+ }
+
+private:
+ CasContainerStrategy& m_CasContainerStrategy;
+ std::vector<IoHash> m_Cids;
+};
+
+GcReferencePruner*
+CasContainerStrategy::CreateReferencePruner(GcCtx& Ctx)
+{
+ size_t TotalCount = 0;
+ Stopwatch Timer;
+ const auto _ = MakeGuard([&] {
+ ZEN_DEBUG("gc block store '{}': found {} cid keys to check in {}",
+ m_RootDirectory / m_ContainerBaseName,
+ TotalCount,
+ NiceTimeSpanMs(Timer.GetElapsedTimeMs()));
+ });
+
+ std::vector<IoHash> CidsToCheck;
+ {
+ RwLock::SharedLockScope __(m_LocationMapLock);
+ CidsToCheck.reserve(m_LocationMap.size());
+ for (const auto& It : m_LocationMap)
+ {
+ CidsToCheck.push_back(It.first);
+ }
+ }
+ TotalCount = CidsToCheck.size();
+ if (TotalCount == 0)
+ {
+ return {};
+ }
+ Ctx.References.fetch_add(TotalCount);
+ return new CasContainerReferencePruner(*this, std::move(CidsToCheck));
+}
+
void
CasContainerStrategy::CompactIndex(RwLock::ExclusiveLockScope&)
{
diff --git a/src/zenstore/compactcas.h b/src/zenstore/compactcas.h
index 478a1f78e..9ff4ae4fc 100644
--- a/src/zenstore/compactcas.h
+++ b/src/zenstore/compactcas.h
@@ -49,7 +49,7 @@ static_assert(sizeof(CasDiskIndexEntry) == 32);
*/
-struct CasContainerStrategy final : public GcStorage
+struct CasContainerStrategy final : public GcStorage, public GcReferenceStore
{
CasContainerStrategy(GcManager& Gc);
~CasContainerStrategy();
@@ -71,6 +71,8 @@ struct CasContainerStrategy final : public GcStorage
virtual void CollectGarbage(GcContext& GcCtx) override;
virtual GcStorageSize StorageSize() const override;
+ virtual GcReferencePruner* CreateReferencePruner(GcCtx& Ctx) override;
+
private:
CasStore::InsertResult InsertChunk(const void* ChunkData, size_t ChunkSize, const IoHash& ChunkHash);
void MakeIndexSnapshot();
@@ -97,6 +99,9 @@ private:
typedef tsl::robin_map<IoHash, size_t, IoHash::Hasher> LocationMap_t;
LocationMap_t m_LocationMap;
std::vector<BlockStoreDiskLocation> m_Locations;
+
+ friend class CasContainerReferencePruner;
+ friend class CasContainerStoreCompactor;
};
void compactcas_forcelink();
diff --git a/src/zenstore/filecas.cpp b/src/zenstore/filecas.cpp
index 24d0a39bb..e28e0dea4 100644
--- a/src/zenstore/filecas.cpp
+++ b/src/zenstore/filecas.cpp
@@ -122,10 +122,12 @@ FileCasStrategy::ShardingHelper::ShardingHelper(const std::filesystem::path& Roo
FileCasStrategy::FileCasStrategy(GcManager& Gc) : m_Log(logging::Get("filecas")), m_Gc(Gc)
{
m_Gc.AddGcStorage(this);
+ m_Gc.AddGcReferenceStore(*this);
}
FileCasStrategy::~FileCasStrategy()
{
+ m_Gc.RemoveGcReferenceStore(*this);
m_Gc.RemoveGcStorage(this);
}
@@ -1329,7 +1331,170 @@ FileCasStrategy::ScanFolderForCasFiles(const std::filesystem::path& RootDir)
return Entries;
};
- //////////////////////////////////////////////////////////////////////////
+class FileCasStoreCompactor : public GcReferenceStoreCompactor
+{
+public:
+ FileCasStoreCompactor(FileCasStrategy& Owner, std::vector<IoHash>&& ReferencesToClean)
+ : m_FileCasStrategy(Owner)
+ , m_ReferencesToClean(std::move(ReferencesToClean))
+ {
+ }
+
+ virtual void CompactReferenceStore(GcCtx& Ctx)
+ {
+ size_t CompactedCount = 0;
+ Stopwatch Timer;
+ const auto _ = MakeGuard([&] {
+ ZEN_DEBUG("gc file store '{}': removed data for {} unused cids in {}",
+ m_FileCasStrategy.m_RootDirectory,
+ CompactedCount,
+ NiceTimeSpanMs(Timer.GetElapsedTimeMs()));
+ });
+ std::vector<IoHash> ReferencedCleaned;
+ ReferencedCleaned.reserve(m_ReferencesToClean.size());
+
+ for (const IoHash& ChunkHash : m_ReferencesToClean)
+ {
+ FileCasStrategy::ShardingHelper Name(m_FileCasStrategy.m_RootDirectory.c_str(), ChunkHash);
+ {
+ RwLock::SharedLockScope __(m_FileCasStrategy.m_Lock);
+ if (auto It = m_FileCasStrategy.m_Index.find(ChunkHash); It != m_FileCasStrategy.m_Index.end())
+ {
+ // Not regarded as pruned, leave it be
+ continue;
+ }
+ if (Ctx.Settings.IsDeleteMode)
+ {
+ ZEN_DEBUG("deleting CAS payload file '{}'", Name.ShardedPath.ToUtf8());
+ std::error_code Ec;
+ uint64_t SizeOnDisk = std::filesystem::file_size(Name.ShardedPath.c_str(), Ec);
+ if (Ec)
+ {
+ SizeOnDisk = 0;
+ }
+ bool Existed = std::filesystem::remove(Name.ShardedPath.c_str(), Ec);
+ if (Ec)
+ {
+ ZEN_WARN("failed deleting CAS payload file '{}'. Reason '{}'", Name.ShardedPath.ToUtf8(), Ec.message());
+ continue;
+ }
+ if (!Existed)
+ {
+ continue;
+ }
+ Ctx.RemovedDiskSpace.fetch_add(SizeOnDisk);
+ }
+ else
+ {
+ std::error_code Ec;
+ bool Existed = std::filesystem::is_regular_file(Name.ShardedPath.c_str(), Ec);
+ if (Ec)
+ {
+ ZEN_WARN("failed checking CAS payload file '{}'. Reason '{}'", Name.ShardedPath.ToUtf8(), Ec.message());
+ continue;
+ }
+ if (!Existed)
+ {
+ continue;
+ }
+ }
+ ReferencedCleaned.push_back(ChunkHash);
+ }
+ }
+ CompactedCount = ReferencedCleaned.size();
+ Ctx.CompactedReferences.fetch_add(ReferencedCleaned.size());
+ }
+
+private:
+ FileCasStrategy& m_FileCasStrategy;
+ std::vector<IoHash> m_ReferencesToClean;
+};
+
+class FileCasReferencePruner : public GcReferencePruner
+{
+public:
+ FileCasReferencePruner(FileCasStrategy& Owner, std::vector<IoHash>&& Cids) : m_FileCasStrategy(Owner), m_Cids(std::move(Cids)) {}
+
+ virtual GcReferenceStoreCompactor* RemoveUnreferencedData(GcCtx& Ctx, const GetUnusedReferencesFunc& GetUnusedReferences)
+ {
+ size_t TotalCount = m_Cids.size();
+ size_t PruneCount = 0;
+ Stopwatch Timer;
+ const auto _ = MakeGuard([&] {
+ ZEN_DEBUG("gc file store '{}': removed {} unused cid out of {} in {}",
+ m_FileCasStrategy.m_RootDirectory,
+ PruneCount,
+ TotalCount,
+ NiceTimeSpanMs(Timer.GetElapsedTimeMs()));
+ });
+
+ std::vector<IoHash> UnusedReferences = GetUnusedReferences(m_Cids);
+ m_Cids.clear();
+
+ std::vector<IoHash> PrunedReferences;
+ PrunedReferences.reserve(UnusedReferences.size());
+ {
+ RwLock::ExclusiveLockScope __(m_FileCasStrategy.m_Lock);
+ for (const IoHash& ChunkHash : UnusedReferences)
+ {
+ auto It = m_FileCasStrategy.m_Index.find(ChunkHash);
+ if (It == m_FileCasStrategy.m_Index.end())
+ {
+ continue;
+ }
+ if (Ctx.Settings.IsDeleteMode)
+ {
+ uint64_t FileSize = It->second.Size;
+ m_FileCasStrategy.m_Index.erase(It);
+ m_FileCasStrategy.m_CasLog.Append(
+ {.Key = ChunkHash, .Flags = FileCasStrategy::FileCasIndexEntry::kTombStone, .Size = FileSize});
+ m_FileCasStrategy.m_TotalSize.fetch_sub(It->second.Size, std::memory_order_relaxed);
+ }
+ PrunedReferences.push_back(ChunkHash);
+ }
+ }
+
+ PruneCount = PrunedReferences.size();
+ Ctx.PrunedReferences.fetch_add(PruneCount);
+ return new FileCasStoreCompactor(m_FileCasStrategy, std::move(PrunedReferences));
+ }
+
+private:
+ FileCasStrategy& m_FileCasStrategy;
+ std::vector<IoHash> m_Cids;
+};
+
+GcReferencePruner*
+FileCasStrategy::CreateReferencePruner(GcCtx& Ctx)
+{
+ // TODO
+ std::size_t TotalCount = 0;
+ Stopwatch Timer;
+ const auto _ = MakeGuard([&] {
+ ZEN_DEBUG("gc file store '{}': found {} cid keys to check in {}",
+ m_RootDirectory,
+ TotalCount,
+ NiceTimeSpanMs(Timer.GetElapsedTimeMs()));
+ });
+ std::vector<IoHash> CidsToCheck;
+ {
+ RwLock::SharedLockScope __(m_Lock);
+ CidsToCheck.reserve(m_Index.size());
+ for (const auto& It : m_Index)
+ {
+ CidsToCheck.push_back(It.first);
+ }
+ }
+ TotalCount = CidsToCheck.size();
+ if (TotalCount == 0)
+ {
+ return {};
+ }
+ Ctx.References.fetch_add(TotalCount);
+ return new FileCasReferencePruner(*this, std::move(CidsToCheck));
+}
+
+//////////////////////////////////////////////////////////////////////////
#if ZEN_WITH_TESTS
diff --git a/src/zenstore/filecas.h b/src/zenstore/filecas.h
index ea7ff8e8c..2e9a1d5dc 100644
--- a/src/zenstore/filecas.h
+++ b/src/zenstore/filecas.h
@@ -27,7 +27,7 @@ class BasicFile;
/** CAS storage strategy using a file-per-chunk storage strategy
*/
-struct FileCasStrategy final : public GcStorage
+struct FileCasStrategy final : public GcStorage, public GcReferenceStore
{
FileCasStrategy(GcManager& Gc);
~FileCasStrategy();
@@ -44,6 +44,8 @@ struct FileCasStrategy final : public GcStorage
virtual void CollectGarbage(GcContext& GcCtx) override;
virtual GcStorageSize StorageSize() const override;
+ virtual GcReferencePruner* CreateReferencePruner(GcCtx& Ctx) override;
+
private:
void MakeIndexSnapshot();
uint64_t ReadIndexFile(const std::filesystem::path& IndexPath, uint32_t& OutVersion);
@@ -97,6 +99,9 @@ private:
size_t Shard2len = 0;
ExtendablePathBuilder<128> ShardedPath;
};
+
+ friend class FileCasReferencePruner;
+ friend class FileCasStoreCompactor;
};
void filecas_forcelink();
diff --git a/src/zenstore/gc.cpp b/src/zenstore/gc.cpp
index 9743eabf0..e09f46063 100644
--- a/src/zenstore/gc.cpp
+++ b/src/zenstore/gc.cpp
@@ -327,6 +327,280 @@ GcManager::~GcManager()
{
}
+//////// Begin New GC WIP
+
+void
+GcManager::AddGcReferencer(GcReferencer& Referencer)
+{
+ RwLock::ExclusiveLockScope _(m_Lock);
+ m_GcReferencers.push_back(&Referencer);
+}
+void
+GcManager::RemoveGcReferencer(GcReferencer& Referencer)
+{
+ RwLock::ExclusiveLockScope _(m_Lock);
+ std::erase_if(m_GcReferencers, [&](GcReferencer* $) { return $ == &Referencer; });
+}
+
+void
+GcManager::AddGcReferenceStore(GcReferenceStore& ReferenceStore)
+{
+ RwLock::ExclusiveLockScope _(m_Lock);
+ m_GcReferenceStores.push_back(&ReferenceStore);
+}
+void
+GcManager::RemoveGcReferenceStore(GcReferenceStore& ReferenceStore)
+{
+ RwLock::ExclusiveLockScope _(m_Lock);
+ std::erase_if(m_GcReferenceStores, [&](GcReferenceStore* $) { return $ == &ReferenceStore; });
+}
+
+GcResult
+GcManager::CollectGarbage(const GcSettings& Settings)
+{
+ GcCtx Ctx{.Settings = Settings};
+
+ Stopwatch TotalTimer;
+ auto __ = MakeGuard([&]() {
+ ZEN_INFO(
+ "GC: Removed {} items out of {}, deleted {} out of {}. Pruned {} Cid entries out of {}, compacted {} Cid entries out of {}, "
+ "freed "
+ "{} on disk and {} of memory in {}",
+ Ctx.ExpiredItems.load(),
+ Ctx.Items.load(),
+ Ctx.DeletedItems.load(),
+ Ctx.ExpiredItems.load(),
+ Ctx.PrunedReferences.load(),
+ Ctx.References.load(),
+ Ctx.CompactedReferences.load(),
+ Ctx.PrunedReferences.load(),
+ NiceBytes(Ctx.RemovedDiskSpace.load()),
+ NiceBytes(Ctx.RemovedMemory.load()),
+ NiceTimeSpanMs(TotalTimer.GetElapsedTimeMs()));
+ });
+
+ RwLock::SharedLockScope GcLock(m_Lock);
+
+ static const bool SingleThread =
+#if ZEN_BUILD_DEBUG
+ true
+#else
+ false
+#endif
+ ;
+ WorkerThreadPool ThreadPool(SingleThread ? 0 : 8);
+
+ if (!m_GcReferencers.empty())
+ {
+ Latch WorkLeft(1);
+ // First remove any cache keys that may own references
+ Stopwatch Timer;
+ auto _ = MakeGuard([&]() { ZEN_INFO("GC: Removed expired data in {}", NiceTimeSpanMs(Timer.GetElapsedTimeMs())) });
+ for (GcReferencer* Owner : m_GcReferencers)
+ {
+ WorkLeft.AddCount(1);
+ ThreadPool.ScheduleWork([&Ctx, Owner, &WorkLeft]() {
+ auto _ = MakeGuard([&WorkLeft]() { WorkLeft.CountDown(); });
+ Owner->RemoveExpiredData(Ctx);
+ });
+ }
+ WorkLeft.CountDown();
+ WorkLeft.Wait();
+ }
+
+ if (Ctx.Settings.SkipCidDelete)
+ {
+ return GcResult{.Items = Ctx.Items.load(),
+ .ExpiredItems = Ctx.ExpiredItems.load(),
+ .DeletedItems = Ctx.DeletedItems.load(),
+ .References = Ctx.References.load(),
+ .PrunedReferences = Ctx.PrunedReferences.load(),
+ .CompactedReferences = Ctx.CompactedReferences.load(),
+ .RemovedDiskSpace = Ctx.RemovedDiskSpace.load(),
+ .RemovedMemory = Ctx.RemovedMemory.load()};
+ }
+
+ std::vector<std::unique_ptr<GcReferencePruner>> ReferencePruners;
+ if (!m_GcReferenceStores.empty())
+ {
+ ReferencePruners.reserve(m_GcReferenceStores.size());
+ Latch WorkLeft(1);
+ RwLock ReferencePrunersLock;
+ // Easy to go wide, CreateReferencePruner is usually not very heavy but big data sets change that
+ Stopwatch Timer;
+ auto _ = MakeGuard([&]() { ZEN_INFO("GC: Created Cid pruners in {}", NiceTimeSpanMs(Timer.GetElapsedTimeMs())) });
+ for (GcReferenceStore* CidStore : m_GcReferenceStores)
+ {
+ WorkLeft.AddCount(1);
+ ThreadPool.ScheduleWork([&Ctx, CidStore, &WorkLeft, &ReferencePrunersLock, &ReferencePruners]() {
+ auto _ = MakeGuard([&WorkLeft]() { WorkLeft.CountDown(); });
+ // The CidStore will pick a list of CId entries to check, returning a collector
+ std::unique_ptr<GcReferencePruner> ReferencePruner(CidStore->CreateReferencePruner(Ctx));
+ if (ReferencePruner)
+ {
+ RwLock::ExclusiveLockScope __(ReferencePrunersLock);
+ ReferencePruners.emplace_back(std::move(ReferencePruner));
+ }
+ });
+ }
+ WorkLeft.CountDown();
+ WorkLeft.Wait();
+ }
+
+ std::vector<std::unique_ptr<GcReferenceChecker>> ReferenceCheckers;
+ if (!m_GcReferencers.empty())
+ {
+ ReferenceCheckers.reserve(m_GcReferencers.size());
+ Latch WorkLeft(1);
+ RwLock ReferenceCheckersLock;
+ Stopwatch Timer;
+ auto _ = MakeGuard([&]() { ZEN_INFO("GC: Created Cid checkers in {}", NiceTimeSpanMs(Timer.GetElapsedTimeMs())) });
+ // Easy to go wide, CreateReferenceCheckers is potentially heavy
+ // Lock all reference owners from changing the reference data and get access to check for referenced data
+ for (GcReferencer* Referencer : m_GcReferencers)
+ {
+ WorkLeft.AddCount(1);
+ ThreadPool.ScheduleWork([&Ctx, &WorkLeft, Referencer, &ReferenceCheckersLock, &ReferenceCheckers]() {
+ auto _ = MakeGuard([&WorkLeft]() { WorkLeft.CountDown(); });
+ // The Referencer will create a reference checker that guarrantees that the references do not change as long as it lives
+ std::vector<GcReferenceChecker*> Checkers = Referencer->CreateReferenceCheckers(Ctx);
+ try
+ {
+ if (!Checkers.empty())
+ {
+ RwLock::ExclusiveLockScope __(ReferenceCheckersLock);
+ for (auto& Checker : Checkers)
+ {
+ ReferenceCheckers.emplace_back(std::unique_ptr<GcReferenceChecker>(Checker));
+ Checker = nullptr;
+ }
+ }
+ }
+ catch (std::exception&)
+ {
+ while (!Checkers.empty())
+ {
+ delete Checkers.back();
+ Checkers.pop_back();
+ }
+ throw;
+ }
+ });
+ }
+ WorkLeft.CountDown();
+ WorkLeft.Wait();
+ }
+
+ Stopwatch LockStateTimer;
+ if (!ReferenceCheckers.empty())
+ {
+ // Easy to go wide, locking all references checkers so we hafve a stead state of which references are used
+ // From this point we have block all writes to all References (DiskBucket/ProjectStore) until we do delete the ReferenceCheckers
+ Latch WorkLeft(1);
+
+ Stopwatch Timer;
+ auto _ = MakeGuard([&]() { ZEN_INFO("GC: Locked Cid checkers in {}", NiceTimeSpanMs(Timer.GetElapsedTimeMs())) });
+ for (std::unique_ptr<GcReferenceChecker>& ReferenceChecker : ReferenceCheckers)
+ {
+ GcReferenceChecker* Checker = ReferenceChecker.get();
+ WorkLeft.AddCount(1);
+ ThreadPool.ScheduleWork([&Ctx, Checker, &WorkLeft, &ReferenceCheckers]() {
+ auto _ = MakeGuard([&WorkLeft]() { WorkLeft.CountDown(); });
+ Checker->LockState(Ctx);
+ });
+ }
+ WorkLeft.CountDown();
+ WorkLeft.Wait();
+ }
+
+ std::vector<std::unique_ptr<GcReferenceStoreCompactor>> ReferenceStoreCompactors;
+ ReferenceStoreCompactors.reserve(ReferencePruners.size());
+ if (!ReferencePruners.empty())
+ {
+ const auto GetUnusedReferences = [&ReferenceCheckers, &Ctx](std::span<IoHash> References) -> std::vector<IoHash> {
+ HashSet UnusedCids(References.begin(), References.end());
+ for (const std::unique_ptr<GcReferenceChecker>& ReferenceChecker : ReferenceCheckers)
+ {
+ ReferenceChecker->RemoveUsedReferencesFromSet(Ctx, UnusedCids);
+ if (UnusedCids.empty())
+ {
+ return {};
+ }
+ }
+ return std::vector<IoHash>(UnusedCids.begin(), UnusedCids.end());
+ };
+
+ // Easy to go wide, checking all Cids agains references in cache
+ // Ask stores to remove data that the ReferenceCheckers says are not references - this should be a lightweight operation that
+ // only updates in-memory index, actual disk changes should be done by the ReferenceStoreCompactors
+
+ Latch WorkLeft(1);
+ RwLock ReferenceStoreCompactorsLock;
+
+ Stopwatch Timer;
+ auto _ = MakeGuard([&]() { ZEN_INFO("GC: Pruned unreferenced Cid data in {}", NiceTimeSpanMs(Timer.GetElapsedTimeMs())) });
+ for (std::unique_ptr<GcReferencePruner>& ReferencePruner : ReferencePruners)
+ {
+ GcReferencePruner* Pruner = ReferencePruner.get();
+ WorkLeft.AddCount(1);
+ ThreadPool.ScheduleWork(
+ [&Ctx, Pruner, &WorkLeft, &GetUnusedReferences, &ReferenceStoreCompactorsLock, &ReferenceStoreCompactors]() {
+ auto _ = MakeGuard([&WorkLeft]() { WorkLeft.CountDown(); });
+ // Go through all the ReferenceCheckers to see if the list of Cids the collector selected are referenced or not.
+ std::unique_ptr<GcReferenceStoreCompactor> ReferenceCompactor(Pruner->RemoveUnreferencedData(Ctx, GetUnusedReferences));
+ if (ReferenceCompactor)
+ {
+ RwLock::ExclusiveLockScope __(ReferenceStoreCompactorsLock);
+ ReferenceStoreCompactors.emplace_back(std::move(ReferenceCompactor));
+ }
+ });
+ }
+ WorkLeft.CountDown();
+ WorkLeft.Wait();
+ }
+ // Let the GcReferencers add new data, we will only change on-disk data at this point, adding new data is allowed
+ ReferenceCheckers.clear();
+ ZEN_INFO("GC: Writes blocked for {}", NiceTimeSpanMs(LockStateTimer.GetElapsedTimeMs()))
+
+ // Let go of the pruners
+ ReferencePruners.clear();
+
+ if (!ReferenceStoreCompactors.empty())
+ {
+ Latch WorkLeft(1);
+
+ // Easy to go wide
+ // Remove the stuff we deemed unreferenced from disk - may be heavy operation
+ Stopwatch Timer;
+ auto _ = MakeGuard([&]() { ZEN_INFO("GC: Compacted Cid stores in {}", NiceTimeSpanMs(Timer.GetElapsedTimeMs())) });
+ for (std::unique_ptr<GcReferenceStoreCompactor>& StoreCompactor : ReferenceStoreCompactors)
+ {
+ GcReferenceStoreCompactor* Compactor = StoreCompactor.get();
+ WorkLeft.AddCount(1);
+ ThreadPool.ScheduleWork([&Ctx, Compactor, &WorkLeft]() {
+ auto _ = MakeGuard([&WorkLeft]() { WorkLeft.CountDown(); });
+ // Go through all the ReferenceCheckers to see if the list of Cids the collector selected are referenced or not.
+ Compactor->CompactReferenceStore(Ctx);
+ });
+ }
+ WorkLeft.CountDown();
+ WorkLeft.Wait();
+ }
+
+ ReferenceStoreCompactors.clear();
+
+ return GcResult{.Items = Ctx.Items.load(),
+ .ExpiredItems = Ctx.ExpiredItems.load(),
+ .DeletedItems = Ctx.DeletedItems.load(),
+ .References = Ctx.References.load(),
+ .PrunedReferences = Ctx.PrunedReferences.load(),
+ .CompactedReferences = Ctx.CompactedReferences.load(),
+ .RemovedDiskSpace = Ctx.RemovedDiskSpace.load(),
+ .RemovedMemory = Ctx.RemovedMemory.load()};
+}
+
+//////// End New GC WIP
+
void
GcManager::AddGcContributor(GcContributor* Contributor)
{
@@ -645,23 +919,19 @@ GcScheduler::Shutdown()
bool
GcScheduler::TriggerGc(const GcScheduler::TriggerGcParams& Params)
{
- if (m_Config.Enabled)
+ std::unique_lock Lock(m_GcMutex);
+ if (static_cast<uint32_t>(GcSchedulerStatus::kIdle) == m_Status)
{
- std::unique_lock Lock(m_GcMutex);
- if (static_cast<uint32_t>(GcSchedulerStatus::kIdle) == m_Status)
- {
- m_TriggerGcParams = Params;
- uint32_t IdleState = static_cast<uint32_t>(GcSchedulerStatus::kIdle);
+ m_TriggerGcParams = Params;
+ uint32_t IdleState = static_cast<uint32_t>(GcSchedulerStatus::kIdle);
- if (m_Status.compare_exchange_strong(/* expected */ IdleState,
- /* desired */ static_cast<uint32_t>(GcSchedulerStatus::kRunning)))
- {
- m_GcSignal.notify_one();
- return true;
- }
+ if (m_Status.compare_exchange_strong(/* expected */ IdleState,
+ /* desired */ static_cast<uint32_t>(GcSchedulerStatus::kRunning)))
+ {
+ m_GcSignal.notify_one();
+ return true;
}
}
-
return false;
}
@@ -806,7 +1076,7 @@ GcScheduler::SchedulerThread()
break;
}
- if (!m_Config.Enabled && !m_TriggerScrubParams)
+ if (!m_Config.Enabled && !m_TriggerScrubParams && !m_TriggerGcParams)
{
WaitTime = std::chrono::seconds::max();
continue;
@@ -830,6 +1100,7 @@ GcScheduler::SchedulerThread()
std::chrono::seconds MaxProjectStoreDuration = m_Config.MaxProjectStoreDuration;
uint64_t DiskSizeSoftLimit = m_Config.DiskSizeSoftLimit;
bool SkipCid = false;
+ GcVersion UseGCVersion = m_Config.UseGCVersion;
bool DiskSpaceGCTriggered = false;
bool TimeBasedGCTriggered = false;
@@ -863,6 +1134,8 @@ GcScheduler::SchedulerThread()
{
DoDelete = false;
}
+ UseGCVersion = TriggerParams.ForceGCVersion.value_or(UseGCVersion);
+ DoGc = true;
}
if (m_TriggerScrubParams)
@@ -1067,7 +1340,7 @@ GcScheduler::SchedulerThread()
}
}
- CollectGarbage(CacheExpireTime, ProjectStoreExpireTime, DoDelete, CollectSmallObjects, SkipCid);
+ CollectGarbage(CacheExpireTime, ProjectStoreExpireTime, DoDelete, CollectSmallObjects, SkipCid, UseGCVersion);
uint32_t RunningState = static_cast<uint32_t>(GcSchedulerStatus::kRunning);
if (!m_Status.compare_exchange_strong(RunningState, static_cast<uint32_t>(GcSchedulerStatus::kIdle)))
@@ -1148,7 +1421,8 @@ GcScheduler::CollectGarbage(const GcClock::TimePoint& CacheExpireTime,
const GcClock::TimePoint& ProjectStoreExpireTime,
bool Delete,
bool CollectSmallObjects,
- bool SkipCid)
+ bool SkipCid,
+ GcVersion UseGCVersion)
{
ZEN_TRACE_CPU("GcScheduler::CollectGarbage");
@@ -1195,10 +1469,26 @@ GcScheduler::CollectGarbage(const GcClock::TimePoint& CacheExpireTime,
Stopwatch Timer;
const auto __ = MakeGuard([&] { ZEN_INFO("garbage collection DONE in {}", NiceTimeSpanMs(Timer.GetElapsedTimeMs())); });
- GcStorageSize Diff = m_GcManager.CollectGarbage(GcCtx);
+ GcStorageSize Diff;
+ switch (UseGCVersion)
+ {
+ case GcVersion::kV1:
+ Diff = m_GcManager.CollectGarbage(GcCtx);
+ break;
+ case GcVersion::kV2:
+ {
+ GcResult Result = m_GcManager.CollectGarbage({.CacheExpireTime = CacheExpireTime,
+ .ProjectStoreExpireTime = ProjectStoreExpireTime,
+ .CollectSmallObjects = CollectSmallObjects,
+ .IsDeleteMode = Delete,
+ .SkipCidDelete = SkipCid});
+ Diff.DiskSize = Result.RemovedDiskSpace;
+ Diff.MemorySize = Result.RemovedMemory;
+ }
+ break;
+ }
std::chrono::milliseconds ElapsedMS = std::chrono::milliseconds(Timer.GetElapsedTimeMs());
-
if (SkipCid)
{
m_LastLightweightGcTime = GcClock::Now();
diff --git a/src/zenstore/include/zenstore/blockstore.h b/src/zenstore/include/zenstore/blockstore.h
index 56906f570..cd475cd8b 100644
--- a/src/zenstore/include/zenstore/blockstore.h
+++ b/src/zenstore/include/zenstore/blockstore.h
@@ -108,6 +108,8 @@ private:
BasicFile m_File;
};
+class BlockStoreCompactState;
+
class BlockStore
{
public:
@@ -124,6 +126,7 @@ public:
typedef std::vector<size_t> ChunkIndexArray;
typedef std::function<void(const MovedChunksArray& MovedChunks, const ChunkIndexArray& RemovedChunks)> ReclaimCallback;
+ typedef std::function<void(const MovedChunksArray& MovedChunks, uint64_t FreedDiskSpace)> CompactCallback;
typedef std::function<uint64_t()> ClaimDiskReserveCallback;
typedef std::function<void(size_t ChunkIndex, const void* Data, uint64_t Size)> IterateChunksSmallSizeCallback;
typedef std::function<void(size_t ChunkIndex, BlockStoreFile& File, uint64_t Offset, uint64_t Size)> IterateChunksLargeSizeCallback;
@@ -156,6 +159,12 @@ public:
const IterateChunksSmallSizeCallback& SmallSizeCallback,
const IterateChunksLargeSizeCallback& LargeSizeCallback);
+ void CompactBlocks(
+ const BlockStoreCompactState& CompactState,
+ uint64_t PayloadAlignment,
+ const CompactCallback& ChangeCallback = [](const MovedChunksArray&, uint64_t) {},
+ const ClaimDiskReserveCallback& DiskReserveCallback = []() { return 0; });
+
static const char* GetBlockFileExtension();
static std::filesystem::path GetBlockPath(const std::filesystem::path& BlocksBasePath, const uint32_t BlockIndex);
@@ -179,6 +188,55 @@ private:
std::atomic_uint64_t m_TotalSize{};
};
+class BlockStoreCompactState
+{
+public:
+ BlockStoreCompactState() = default;
+
+ void AddBlock(uint32_t BlockIndex)
+ {
+ auto It = m_BlockIndexToChunkMapIndex.find(BlockIndex);
+ if (It == m_BlockIndexToChunkMapIndex.end())
+ {
+ m_KeepChunks.emplace_back(std::vector<size_t>());
+ m_BlockIndexToChunkMapIndex.insert_or_assign(BlockIndex, m_KeepChunks.size() - 1);
+ }
+ }
+
+ bool AddKeepLocation(const BlockStoreLocation& Location)
+ {
+ auto It = m_BlockIndexToChunkMapIndex.find(Location.BlockIndex);
+ if (It == m_BlockIndexToChunkMapIndex.end())
+ {
+ return false;
+ }
+
+ std::vector<size_t>& KeepChunks = m_KeepChunks[It->second];
+ size_t Index = m_ChunkLocations.size();
+ KeepChunks.push_back(Index);
+ m_ChunkLocations.push_back(Location);
+ return true;
+ };
+
+ const BlockStoreLocation& GetLocation(size_t Index) const { return m_ChunkLocations[Index]; }
+
+ void IterateBlocks(std::function<void(uint32_t BlockIndex,
+ const std::vector<size_t>& KeepChunkIndexes,
+ const std::vector<BlockStoreLocation>& ChunkLocations)> Callback) const
+ {
+ for (auto It : m_BlockIndexToChunkMapIndex)
+ {
+ size_t ChunkMapIndex = It.second;
+ Callback(It.first, m_KeepChunks[ChunkMapIndex], m_ChunkLocations);
+ }
+ }
+
+private:
+ std::unordered_map<uint32_t, size_t> m_BlockIndexToChunkMapIndex; // Maps to which vector in BlockKeepChunks to use for a block
+ std::vector<std::vector<size_t>> m_KeepChunks; // One vector per block index with index into ChunkLocations
+ std::vector<BlockStoreLocation> m_ChunkLocations;
+};
+
void blockstore_forcelink();
} // namespace zen
diff --git a/src/zenstore/include/zenstore/gc.h b/src/zenstore/include/zenstore/gc.h
index 42605804e..fa7dce331 100644
--- a/src/zenstore/include/zenstore/gc.h
+++ b/src/zenstore/include/zenstore/gc.h
@@ -20,6 +20,10 @@ ZEN_THIRD_PARTY_INCLUDES_END
#include <span>
#include <thread>
+ZEN_THIRD_PARTY_INCLUDES_START
+#include <tsl/robin_set.h>
+ZEN_THIRD_PARTY_INCLUDES_END
+
namespace spdlog {
class logger;
}
@@ -48,6 +52,151 @@ public:
static TimePoint TimePointFromTick(const Tick TickCount) { return TimePoint{Duration{TickCount}}; }
};
+//////// Begin New GC WIP
+
+struct GcSettings
+{
+ GcClock::TimePoint CacheExpireTime = GcClock::Now();
+ GcClock::TimePoint ProjectStoreExpireTime = GcClock::Now();
+ bool CollectSmallObjects = false;
+ bool IsDeleteMode = false;
+ bool SkipCidDelete = false;
+};
+
+struct GcResult
+{
+ uint64_t Items = 0;
+ uint64_t ExpiredItems = 0;
+ uint64_t DeletedItems = 0;
+ uint64_t References = 0;
+ uint64_t PrunedReferences = 0;
+ uint64_t CompactedReferences = 0;
+ uint64_t RemovedDiskSpace = 0;
+ uint64_t RemovedMemory = 0;
+};
+
+struct GcCtx
+{
+ const GcSettings Settings;
+ std::atomic_uint64_t Items = 0;
+ std::atomic_uint64_t ExpiredItems = 0;
+ std::atomic_uint64_t DeletedItems = 0;
+ std::atomic_uint64_t References = 0;
+ std::atomic_uint64_t PrunedReferences = 0;
+ std::atomic_uint64_t CompactedReferences = 0;
+ std::atomic_uint64_t RemovedDiskSpace = 0;
+ std::atomic_uint64_t RemovedMemory = 0;
+};
+
+typedef tsl::robin_set<IoHash> HashSet;
+
+/**
+ * @brief An interface to remove the stored data on disk after a GcReferencePruner::RemoveUnreferencedData
+ *
+ * CompactReferenceStore is called after pruning (GcReferencePruner::RemoveUnreferencedData) and state locking is
+ * complete so implementor must take care to only remove data that has not been altered since the prune operation.
+ *
+ * Instance will be deleted after CompactReferenceStore has completed execution.
+ *
+ * The subclass constructor should be provided with information on what is intended to be removed.
+ */
+class GcReferenceStoreCompactor
+{
+public:
+ virtual ~GcReferenceStoreCompactor() = default;
+
+ // Remove data on disk based on results from GcReferencePruner::RemoveUnreferencedData
+ virtual void CompactReferenceStore(GcCtx& Ctx) = 0;
+};
+
+/**
+ * @brief An interface to check if a set of Cids are referenced
+ *
+ * Instance will be deleted after RemoveUsedReferencesFromSet has been called 0-n times.
+ *
+ * During construction of the GcReferenceChecker the world is not stopped and this is a good
+ * place to do caching to be able to execute LockState and RemoveUsedReferencesFromSet quickly.
+ */
+class GcReferenceChecker
+{
+public:
+ // Destructor should unlock what was locked in LockState
+ virtual ~GcReferenceChecker() = default;
+
+ // Lock the state and make sure no references changes, usually a read-lock is taken until the destruction
+ // of the instance. Called once before any calls to RemoveUsedReferencesFromSet
+ // The implementation should be as fast as possible as LockState is part of a stop the world (from changes)
+ // until all instances of GcReferenceChecker are deleted
+ virtual void LockState(GcCtx& Ctx) = 0;
+
+ // Go through IoCids and see which ones are referenced. If it is the reference must be removed from IoCids
+ // This function should use pre-cached information on what is referenced as we are in stop the world mode
+ virtual void RemoveUsedReferencesFromSet(GcCtx& Ctx, HashSet& IoCids) = 0;
+};
+
+/**
+ * @brief Interface to handle GC of data that references Cid data
+ *
+ * TODO: Maybe we should split up being a referencer and something that holds cache values?
+ *
+ * GcCacheStore and GcReferencer?
+ *
+ * This interface is registered/unregistered to GcManager vua AddGcReferencer() and RemoveGcReferencer()
+ */
+class GcReferencer
+{
+protected:
+ virtual ~GcReferencer() = default;
+
+public:
+ // Remove expired data based on either GcCtx::Settings CacheExpireTime/ProjectExpireTime
+ // TODO: For disk layer we need to first update it with access times from the memory layer
+ // The implementer of GcReferencer (in our case a disk bucket) does not know about any
+ // potential memory cache layer :(
+ virtual void RemoveExpiredData(GcCtx& Ctx) = 0;
+
+ // Create 0-n GcReferenceChecker for this GcReferencer. Caller will manage lifetime of
+ // returned instances
+ virtual std::vector<GcReferenceChecker*> CreateReferenceCheckers(GcCtx& Ctx) = 0;
+};
+
+/**
+ * @brief Interface to prune - remove pointers to data but not the bulk data on disk - references from a GcReferenceStore
+ */
+class GcReferencePruner
+{
+public:
+ virtual ~GcReferencePruner() = default;
+
+ typedef std::function<std::vector<IoHash>(std::span<IoHash> References)> GetUnusedReferencesFunc;
+
+ // Check a set of references to see if they are in use.
+ // Use the GetUnusedReferences input function to check if references are used and update any pointers
+ // so any query for references determined to be unreferences will not be found.
+ // If any references a found to be unused, return a GcReferenceStoreCompactor instance which will
+ // clean up any stored bulk data mapping to the pruned references.
+ // Caller will manage lifetime of returned instance
+ // This function should execute as fast as possible, so try to prepare a list of references to check ahead of
+ // call to this function and make sure the removal of unreferences items is as lightweight as possible.
+ virtual GcReferenceStoreCompactor* RemoveUnreferencedData(GcCtx& Ctx, const GetUnusedReferencesFunc& GetUnusedReferences) = 0;
+};
+
+/**
+ * @brief A interface to prune referenced (Cid) data from a store
+ */
+class GcReferenceStore
+{
+protected:
+ virtual ~GcReferenceStore() = default;
+
+public:
+ // Create a GcReferencePruner which can check a set of references (decided by implementor) if they are no longer in use
+ // Caller will manage lifetime of returned instance
+ virtual GcReferencePruner* CreateReferencePruner(GcCtx& Ctx) = 0;
+};
+
+//////// End New GC WIP
+
/** Garbage Collection context object
*/
class GcContext
@@ -141,6 +290,18 @@ public:
GcManager();
~GcManager();
+ //////// Begin New GC WIP
+
+ void AddGcReferencer(GcReferencer& Referencer);
+ void RemoveGcReferencer(GcReferencer& Referencer);
+
+ void AddGcReferenceStore(GcReferenceStore& ReferenceStore);
+ void RemoveGcReferenceStore(GcReferenceStore& ReferenceStore);
+
+ GcResult CollectGarbage(const GcSettings& Settings);
+
+ //////// End New GC WIP
+
void AddGcContributor(GcContributor* Contributor);
void RemoveGcContributor(GcContributor* Contributor);
@@ -163,6 +324,9 @@ private:
std::vector<GcStorage*> m_GcStorage;
CidStore* m_CidStore = nullptr;
const DiskWriteBlocker* m_DiskWriteBlocker = nullptr;
+
+ std::vector<GcReferencer*> m_GcReferencers;
+ std::vector<GcReferenceStore*> m_GcReferenceStores;
};
enum class GcSchedulerStatus : uint32_t
@@ -172,6 +336,12 @@ enum class GcSchedulerStatus : uint32_t
kStopped
};
+enum class GcVersion : uint32_t
+{
+ kV1,
+ kV2
+};
+
struct GcSchedulerConfig
{
std::filesystem::path RootDirectory;
@@ -185,6 +355,7 @@ struct GcSchedulerConfig
uint64_t DiskSizeSoftLimit = 0;
uint64_t MinimumFreeDiskSpaceToAllowWrites = 1ul << 28;
std::chrono::seconds LightweightInterval{};
+ GcVersion UseGCVersion = GcVersion::kV1;
};
struct GcSchedulerState
@@ -246,12 +417,13 @@ public:
struct TriggerGcParams
{
- bool CollectSmallObjects = false;
- std::chrono::seconds MaxCacheDuration = std::chrono::seconds::max();
- std::chrono::seconds MaxProjectStoreDuration = std::chrono::seconds::max();
- uint64_t DiskSizeSoftLimit = 0;
- bool SkipCid = false;
- bool SkipDelete = false;
+ bool CollectSmallObjects = false;
+ std::chrono::seconds MaxCacheDuration = std::chrono::seconds::max();
+ std::chrono::seconds MaxProjectStoreDuration = std::chrono::seconds::max();
+ uint64_t DiskSizeSoftLimit = 0;
+ bool SkipCid = false;
+ bool SkipDelete = false;
+ std::optional<GcVersion> ForceGCVersion;
};
bool TriggerGc(const TriggerGcParams& Params);
@@ -270,7 +442,8 @@ private:
const GcClock::TimePoint& ProjectStoreExpireTime,
bool Delete,
bool CollectSmallObjects,
- bool SkipCid);
+ bool SkipCid,
+ GcVersion UseGCVersion);
void ScrubStorage(bool DoDelete, std::chrono::seconds TimeSlice);
spdlog::logger& Log() { return m_Log; }
virtual bool AreDiskWritesAllowed() const override { return !m_AreDiskWritesBlocked.load(); }