aboutsummaryrefslogtreecommitdiff
path: root/src/zenserver/cache/cachedisklayer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/zenserver/cache/cachedisklayer.cpp')
-rw-r--r--src/zenserver/cache/cachedisklayer.cpp483
1 files changed, 314 insertions, 169 deletions
diff --git a/src/zenserver/cache/cachedisklayer.cpp b/src/zenserver/cache/cachedisklayer.cpp
index 9117b8820..955ab3a04 100644
--- a/src/zenserver/cache/cachedisklayer.cpp
+++ b/src/zenserver/cache/cachedisklayer.cpp
@@ -2502,6 +2502,10 @@ ZenCacheDiskLayer::CacheBucket::PutStandaloneCacheValue(const IoHash& HashKey, c
RwLock::ExclusiveLockScope IndexLock(m_IndexLock);
ValueLock.ReleaseNow();
+ if (m_UpdatedKeys)
+ {
+ m_UpdatedKeys->insert(HashKey);
+ }
PayloadIndex EntryIndex = {};
if (auto It = m_Index.find(HashKey); It == m_Index.end())
@@ -2652,6 +2656,10 @@ ZenCacheDiskLayer::CacheBucket::PutInlineCacheValue(const IoHash& HashKey, const
m_SlogFile.Append({.Key = HashKey, .Location = Location});
RwLock::ExclusiveLockScope IndexLock(m_IndexLock);
+ if (m_UpdatedKeys)
+ {
+ m_UpdatedKeys->insert(HashKey);
+ }
if (auto It = m_Index.find(HashKey); It != m_Index.end())
{
PayloadIndex EntryIndex = It.value();
@@ -2767,6 +2775,10 @@ public:
}
else
{
+ RwLock::SharedLockScope ValueLock(m_Bucket.LockForHash(ExpiredKey.first));
+ IndexLock.ReleaseNow();
+ ZEN_DEBUG("GCV2: cachebucket [COMPACT] '{}': checking standalone cache file '{}'", m_Bucket.m_BucketDir, Path.ToUtf8());
+
std::error_code Ec;
bool Existed = std::filesystem::is_regular_file(FilePath, Ec);
if (Ec)
@@ -2792,9 +2804,12 @@ public:
if (Ctx.Settings.CollectSmallObjects)
{
+ m_Bucket.m_IndexLock.WithExclusiveLock([&]() { m_Bucket.m_UpdatedKeys = std::make_unique<HashSet>(); });
+ auto __ = MakeGuard([&]() { m_Bucket.m_IndexLock.WithExclusiveLock([&]() { m_Bucket.m_UpdatedKeys.reset(); }); });
+
std::unordered_map<uint32_t, uint64_t> BlockUsage;
{
- RwLock::SharedLockScope __(m_Bucket.m_IndexLock);
+ RwLock::SharedLockScope ___(m_Bucket.m_IndexLock);
for (const auto& Entry : m_Bucket.m_Index)
{
ZenCacheDiskLayer::CacheBucket::PayloadIndex Index = Entry.second;
@@ -2807,14 +2822,13 @@ public:
}
uint32_t BlockIndex = Loc.Location.BlockLocation.GetBlockIndex();
uint64_t ChunkSize = RoundUp(Loc.Size(), m_Bucket.m_Configuration.PayloadAlignment);
- auto It = BlockUsage.find(BlockIndex);
- if (It == BlockUsage.end())
+ if (auto It = BlockUsage.find(BlockIndex); It != BlockUsage.end())
{
- BlockUsage.insert_or_assign(BlockIndex, ChunkSize);
+ It->second += ChunkSize;
}
else
{
- It->second += ChunkSize;
+ BlockUsage.insert_or_assign(BlockIndex, ChunkSize);
}
}
}
@@ -2830,7 +2844,7 @@ public:
if (BlocksToCompact.size() > 0)
{
{
- RwLock::SharedLockScope __(m_Bucket.m_IndexLock);
+ RwLock::SharedLockScope ___(m_Bucket.m_IndexLock);
for (const auto& Entry : m_Bucket.m_Index)
{
ZenCacheDiskLayer::CacheBucket::PayloadIndex Index = Entry.second;
@@ -2863,27 +2877,25 @@ public:
m_Bucket.m_Configuration.PayloadAlignment,
[&](const BlockStore::MovedChunksArray& MovedArray, uint64_t FreedDiskSpace) {
std::vector<DiskIndexEntry> MovedEntries;
- RwLock::ExclusiveLockScope _(m_Bucket.m_IndexLock);
+ MovedEntries.reserve(MovedArray.size());
+ RwLock::ExclusiveLockScope _(m_Bucket.m_IndexLock);
for (const std::pair<size_t, BlockStoreLocation>& Moved : MovedArray)
{
size_t ChunkIndex = Moved.first;
const IoHash& Key = BlockCompactStateKeys[ChunkIndex];
+ if (m_Bucket.m_UpdatedKeys->contains(Key))
+ {
+ continue;
+ }
+
if (auto It = m_Bucket.m_Index.find(Key); It != m_Bucket.m_Index.end())
{
- ZenCacheDiskLayer::CacheBucket::BucketPayload& Payload = m_Bucket.m_Payloads[It->second];
- const BlockStoreLocation& OldLocation = BlockCompactState.GetLocation(ChunkIndex);
- if (Payload.Location.GetBlockLocation(m_Bucket.m_Configuration.PayloadAlignment) != OldLocation)
- {
- // Someone has moved our chunk so lets just skip the new location we were provided, it will be
- // GC:d at a later time
- continue;
- }
- const BlockStoreLocation& NewLocation = Moved.second;
-
- Payload.Location = DiskLocation(NewLocation,
- m_Bucket.m_Configuration.PayloadAlignment,
- Payload.Location.GetFlags());
+ ZenCacheDiskLayer::CacheBucket::BucketPayload& Payload = m_Bucket.m_Payloads[It->second];
+ const BlockStoreLocation& NewLocation = Moved.second;
+ Payload.Location = DiskLocation(NewLocation,
+ m_Bucket.m_Configuration.PayloadAlignment,
+ Payload.Location.GetFlags());
MovedEntries.push_back({.Key = Key, .Location = Payload.Location});
}
}
@@ -2955,9 +2967,9 @@ ZenCacheDiskLayer::CacheBucket::RemoveExpiredData(GcCtx& Ctx, GcStats& Stats)
// Find out expired keys
for (const auto& Entry : m_Index)
{
- const IoHash& Key = Entry.first;
- ZenCacheDiskLayer::CacheBucket::PayloadIndex EntryIndex = Entry.second;
- GcClock::Tick AccessTime = m_AccessTimes[EntryIndex];
+ const IoHash& Key = Entry.first;
+ PayloadIndex EntryIndex = Entry.second;
+ GcClock::Tick AccessTime = m_AccessTimes[EntryIndex];
if (AccessTime >= ExpireTicks)
{
continue;
@@ -3004,7 +3016,7 @@ ZenCacheDiskLayer::CacheBucket::RemoveExpiredData(GcCtx& Ctx, GcStats& Stats)
}
}
- if (!ExpiredEntries.empty())
+ if (Ctx.Settings.IsDeleteMode && !ExpiredEntries.empty())
{
std::vector<BucketPayload> Payloads;
std::vector<AccessTime> AccessTimes;
@@ -3028,22 +3040,253 @@ ZenCacheDiskLayer::CacheBucket::RemoveExpiredData(GcCtx& Ctx, GcStats& Stats)
class DiskBucketReferenceChecker : public GcReferenceChecker
{
+ using PayloadIndex = ZenCacheDiskLayer::CacheBucket::PayloadIndex;
+ using BucketPayload = ZenCacheDiskLayer::CacheBucket::BucketPayload;
+ using CacheBucket = ZenCacheDiskLayer::CacheBucket;
+ using ReferenceIndex = ZenCacheDiskLayer::CacheBucket::ReferenceIndex;
+
public:
- DiskBucketReferenceChecker(ZenCacheDiskLayer::CacheBucket& Owner) : m_CacheBucket(Owner) {}
+ DiskBucketReferenceChecker(CacheBucket& Owner) : m_CacheBucket(Owner) {}
virtual ~DiskBucketReferenceChecker()
{
- m_IndexLock.reset();
- if (!m_CacheBucket.m_Configuration.EnableReferenceCaching)
+ try
+ {
+ m_IndexLock.reset();
+ if (!m_CacheBucket.m_Configuration.EnableReferenceCaching)
+ {
+ m_CacheBucket.m_IndexLock.WithExclusiveLock([&]() { m_CacheBucket.m_UpdatedKeys.reset(); });
+ // If reference caching is not enabled, we temporarily used the data structure for reference caching, lets reset it
+ m_CacheBucket.ClearReferenceCache();
+ }
+ }
+ catch (std::exception& Ex)
+ {
+ ZEN_ERROR("~DiskBucketReferenceChecker threw exception: '{}'", Ex.what());
+ }
+ }
+
+ virtual void PreCache(GcCtx& Ctx) override
+ {
+ ZEN_TRACE_CPU("Z$::Disk::Bucket::PreCache");
+
+ Stopwatch Timer;
+ const auto _ = MakeGuard([&] {
+ if (!Ctx.Settings.Verbose)
+ {
+ return;
+ }
+ ZEN_INFO("GCV2: cachebucket [PRECACHE] '{}': found {} references in {}",
+ m_CacheBucket.m_BucketDir,
+ m_CacheBucket.m_ReferenceCount,
+ NiceTimeSpanMs(Timer.GetElapsedTimeMs()));
+ });
+
+ std::vector<IoHash> UpdateKeys;
+ std::vector<size_t> ReferenceCounts;
+ std::vector<IoHash> References;
+
+ auto GetAttachments = [&References, &ReferenceCounts](const void* CbObjectData) {
+ size_t CurrentReferenceCount = References.size();
+ CbObjectView Obj(CbObjectData);
+ Obj.IterateAttachments([&References](CbFieldView Field) { References.emplace_back(Field.AsAttachment()); });
+ ReferenceCounts.push_back(References.size() - CurrentReferenceCount);
+ };
+
+ // Refresh cache
{
- // If reference caching is not enabled, we temporarily used the data structure for reference caching, lets reset it
- m_CacheBucket.ClearReferenceCache();
+ // If reference caching is enabled the references will be updated at modification for us so we don't need to track modifications
+ if (!m_CacheBucket.m_Configuration.EnableReferenceCaching)
+ {
+ m_CacheBucket.m_IndexLock.WithExclusiveLock([&]() { m_CacheBucket.m_UpdatedKeys = std::make_unique<HashSet>(); });
+ }
+
+ std::vector<IoHash> StandaloneKeys;
+ {
+ std::vector<IoHash> InlineKeys;
+ std::unordered_map<uint32_t, std::size_t> BlockIndexToEntriesPerBlockIndex;
+ struct InlineEntry
+ {
+ uint32_t InlineKeyIndex;
+ uint32_t Offset;
+ uint32_t Size;
+ };
+ std::vector<std::vector<InlineEntry>> EntriesPerBlock;
+
+ {
+ RwLock::SharedLockScope IndexLock(m_CacheBucket.m_IndexLock);
+ for (const auto& Entry : m_CacheBucket.m_Index)
+ {
+ if (Ctx.IsCancelledFlag.load())
+ {
+ IndexLock.ReleaseNow();
+ m_CacheBucket.m_IndexLock.WithExclusiveLock([&]() { m_CacheBucket.m_UpdatedKeys.reset(); });
+ return;
+ }
+
+ PayloadIndex EntryIndex = Entry.second;
+ const BucketPayload& Payload = m_CacheBucket.m_Payloads[EntryIndex];
+ const DiskLocation& Loc = Payload.Location;
+
+ if (!Loc.IsFlagSet(DiskLocation::kStructured))
+ {
+ continue;
+ }
+ if (m_CacheBucket.m_Configuration.EnableReferenceCaching &&
+ m_CacheBucket.m_FirstReferenceIndex[EntryIndex] != ReferenceIndex::Unknown())
+ {
+ continue;
+ }
+ const IoHash& Key = Entry.first;
+ if (Loc.IsFlagSet(DiskLocation::kStandaloneFile))
+ {
+ StandaloneKeys.push_back(Key);
+ continue;
+ }
+
+ BlockStoreLocation ChunkLocation = Loc.GetBlockLocation(m_CacheBucket.m_Configuration.PayloadAlignment);
+ InlineEntry UpdateEntry = {.InlineKeyIndex = gsl::narrow<uint32_t>(InlineKeys.size()),
+ .Offset = gsl::narrow<uint32_t>(ChunkLocation.Offset),
+ .Size = gsl::narrow<uint32_t>(ChunkLocation.Size)};
+ InlineKeys.push_back(Key);
+
+ if (auto It = BlockIndexToEntriesPerBlockIndex.find(ChunkLocation.BlockIndex);
+ It != BlockIndexToEntriesPerBlockIndex.end())
+ {
+ EntriesPerBlock[It->second].emplace_back(UpdateEntry);
+ }
+ else
+ {
+ BlockIndexToEntriesPerBlockIndex.insert_or_assign(ChunkLocation.BlockIndex, EntriesPerBlock.size());
+ EntriesPerBlock.emplace_back(std::vector<InlineEntry>{UpdateEntry});
+ }
+ }
+ }
+
+ for (auto It : BlockIndexToEntriesPerBlockIndex)
+ {
+ uint32_t BlockIndex = It.first;
+
+ Ref<BlockStoreFile> BlockFile = m_CacheBucket.m_BlockStore.GetBlockFile(BlockIndex);
+ if (BlockFile)
+ {
+ size_t EntriesPerBlockIndex = It.second;
+ std::vector<InlineEntry>& InlineEntries = EntriesPerBlock[EntriesPerBlockIndex];
+
+ std::sort(InlineEntries.begin(), InlineEntries.end(), [&](const InlineEntry& Lhs, const InlineEntry& Rhs) -> bool {
+ return Lhs.Offset < Rhs.Offset;
+ });
+
+ uint64_t BlockFileSize = BlockFile->FileSize();
+ BasicFileBuffer BlockBuffer(BlockFile->GetBasicFile(), 32768);
+ for (const InlineEntry& InlineEntry : InlineEntries)
+ {
+ if ((InlineEntry.Offset + InlineEntry.Size) > BlockFileSize)
+ {
+ ReferenceCounts.push_back(0);
+ }
+ else
+ {
+ MemoryView ChunkView = BlockBuffer.MakeView(InlineEntry.Size, InlineEntry.Offset);
+ if (ChunkView.GetSize() == InlineEntry.Size)
+ {
+ GetAttachments(ChunkView.GetData());
+ }
+ else
+ {
+ std::vector<uint8_t> Buffer(InlineEntry.Size);
+ BlockBuffer.Read(Buffer.data(), InlineEntry.Size, InlineEntry.Offset);
+ GetAttachments(Buffer.data());
+ }
+ }
+ const IoHash& Key = InlineKeys[InlineEntry.InlineKeyIndex];
+ UpdateKeys.push_back(Key);
+ }
+ }
+ }
+ }
+ {
+ for (const IoHash& Key : StandaloneKeys)
+ {
+ if (Ctx.IsCancelledFlag.load())
+ {
+ m_CacheBucket.m_IndexLock.WithExclusiveLock([&]() { m_CacheBucket.m_UpdatedKeys.reset(); });
+ return;
+ }
+
+ IoBuffer Buffer = m_CacheBucket.GetStandaloneCacheValue(ZenContentType::kCbObject, Key);
+ if (!Buffer)
+ {
+ continue;
+ }
+
+ GetAttachments(Buffer.GetData());
+ UpdateKeys.push_back(Key);
+ }
+ }
+ }
+
+ {
+ size_t ReferenceOffset = 0;
+ RwLock::ExclusiveLockScope IndexLock(m_CacheBucket.m_IndexLock);
+
+ if (!m_CacheBucket.m_Configuration.EnableReferenceCaching)
+ {
+ ZEN_ASSERT(m_CacheBucket.m_FirstReferenceIndex.empty());
+ ZEN_ASSERT(m_CacheBucket.m_ReferenceHashes.empty());
+ ZEN_ASSERT(m_CacheBucket.m_NextReferenceHashesIndexes.empty());
+ ZEN_ASSERT(m_CacheBucket.m_ReferenceCount == 0);
+ ZEN_ASSERT(m_CacheBucket.m_UpdatedKeys);
+
+ // If reference caching is not enabled, we will resize and use the data structure in place for reference caching when
+ // we figure out what this bucket references. This will be reset once the DiskBucketReferenceChecker is deleted.
+ m_CacheBucket.m_FirstReferenceIndex.resize(m_CacheBucket.m_Payloads.size(), ReferenceIndex::Unknown());
+ m_CacheBucket.m_ReferenceHashes.reserve(References.size());
+ m_CacheBucket.m_NextReferenceHashesIndexes.reserve(References.size());
+ }
+ else
+ {
+ ZEN_ASSERT(!m_CacheBucket.m_UpdatedKeys);
+ }
+
+ for (size_t Index = 0; Index < UpdateKeys.size(); Index++)
+ {
+ const IoHash& Key = UpdateKeys[Index];
+ size_t ReferenceCount = ReferenceCounts[Index];
+ if (auto It = m_CacheBucket.m_Index.find(Key); It != m_CacheBucket.m_Index.end())
+ {
+ PayloadIndex EntryIndex = It->second;
+ if (m_CacheBucket.m_Configuration.EnableReferenceCaching)
+ {
+ if (m_CacheBucket.m_FirstReferenceIndex[EntryIndex] != ReferenceIndex::Unknown())
+ {
+ // The reference data is valid and what we have is old/redundant
+ continue;
+ }
+ }
+ else if (m_CacheBucket.m_UpdatedKeys->contains(Key))
+ {
+ // Our pre-cache data is invalid
+ continue;
+ }
+
+ m_CacheBucket.SetReferences(IndexLock,
+ m_CacheBucket.m_FirstReferenceIndex[EntryIndex],
+ std::span<IoHash>{References.data() + ReferenceOffset, ReferenceCount});
+ }
+ ReferenceOffset += ReferenceCount;
+ }
+
+ if (m_CacheBucket.m_Configuration.EnableReferenceCaching && !UpdateKeys.empty())
+ {
+ m_CacheBucket.CompactReferences(IndexLock);
+ }
}
}
virtual void LockState(GcCtx& Ctx) override
{
- ZEN_TRACE_CPU("Z$::Disk::Bucket::RemoveExpiredData");
+ ZEN_TRACE_CPU("Z$::Disk::Bucket::LockState");
Stopwatch Timer;
const auto _ = MakeGuard([&] {
@@ -3062,31 +3305,38 @@ public:
{
m_UncachedReferences.clear();
m_IndexLock.reset();
+ m_CacheBucket.m_IndexLock.WithExclusiveLock([&]() { m_CacheBucket.m_UpdatedKeys.reset(); });
return;
}
- // Rescan to see if any cache items needs refreshing since last pass when we had the lock
- for (const auto& Entry : m_CacheBucket.m_Index)
+ if (m_CacheBucket.m_UpdatedKeys)
{
- if (Ctx.IsCancelledFlag.load())
+ const HashSet& UpdatedKeys(*m_CacheBucket.m_UpdatedKeys);
+ for (const IoHash& Key : UpdatedKeys)
{
- m_UncachedReferences.clear();
- m_IndexLock.reset();
- return;
- }
+ if (Ctx.IsCancelledFlag.load())
+ {
+ m_UncachedReferences.clear();
+ m_IndexLock.reset();
+ m_CacheBucket.m_IndexLock.WithExclusiveLock([&]() { m_CacheBucket.m_UpdatedKeys.reset(); });
+ return;
+ }
+
+ auto It = m_CacheBucket.m_Index.find(Key);
+ if (It == m_CacheBucket.m_Index.end())
+ {
+ continue;
+ }
- size_t PayloadIndex = Entry.second;
- const ZenCacheDiskLayer::CacheBucket::BucketPayload& Payload = m_CacheBucket.m_Payloads[PayloadIndex];
- const DiskLocation& Loc = Payload.Location;
+ PayloadIndex EntryIndex = It->second;
+ const BucketPayload& Payload = m_CacheBucket.m_Payloads[EntryIndex];
+ const DiskLocation& Loc = Payload.Location;
+
+ if (!Loc.IsFlagSet(DiskLocation::kStructured))
+ {
+ continue;
+ }
- if (!Loc.IsFlagSet(DiskLocation::kStructured))
- {
- continue;
- }
- ZEN_ASSERT(!m_CacheBucket.m_FirstReferenceIndex.empty());
- const IoHash& Key = Entry.first;
- if (m_CacheBucket.m_FirstReferenceIndex[PayloadIndex] == ZenCacheDiskLayer::CacheBucket::ReferenceIndex::Unknown())
- {
IoBuffer Buffer;
if (Loc.IsFlagSet(DiskLocation::kStandaloneFile))
{
@@ -3128,15 +3378,27 @@ public:
for (const IoHash& ReferenceHash : m_CacheBucket.m_ReferenceHashes)
{
- IoCids.erase(ReferenceHash);
+ if (IoCids.erase(ReferenceHash) == 1)
+ {
+ if (IoCids.empty())
+ {
+ return;
+ }
+ }
}
for (const IoHash& ReferenceHash : m_UncachedReferences)
{
- IoCids.erase(ReferenceHash);
+ if (IoCids.erase(ReferenceHash) == 1)
+ {
+ if (IoCids.empty())
+ {
+ return;
+ }
+ }
}
}
- ZenCacheDiskLayer::CacheBucket& m_CacheBucket;
+ CacheBucket& m_CacheBucket;
std::unique_ptr<RwLock::SharedLockScope> m_IndexLock;
HashSet m_UncachedReferences;
};
@@ -3152,126 +3414,9 @@ ZenCacheDiskLayer::CacheBucket::CreateReferenceCheckers(GcCtx& Ctx)
{
return;
}
- ZEN_INFO("GCV2: cachebucket [CREATE CHECKERS] '{}': found {} references in {}",
- m_BucketDir,
- m_ReferenceCount,
- NiceTimeSpanMs(Timer.GetElapsedTimeMs()));
+ ZEN_INFO("GCV2: cachebucket [CREATE CHECKERS] '{}': completed in {}", m_BucketDir, NiceTimeSpanMs(Timer.GetElapsedTimeMs()));
});
- std::vector<IoHash> UpdateKeys;
- std::vector<IoHash> StandaloneKeys;
- std::vector<size_t> ReferenceCounts;
- std::vector<IoHash> References;
-
- // Refresh cache
- {
- RwLock::SharedLockScope IndexLock(m_IndexLock);
- for (const auto& Entry : m_Index)
- {
- if (Ctx.IsCancelledFlag.load())
- {
- return {};
- }
-
- size_t PayloadIndex = Entry.second;
- const ZenCacheDiskLayer::CacheBucket::BucketPayload& Payload = m_Payloads[PayloadIndex];
- const DiskLocation& Loc = Payload.Location;
-
- if (!Loc.IsFlagSet(DiskLocation::kStructured))
- {
- continue;
- }
- if (m_Configuration.EnableReferenceCaching &&
- m_FirstReferenceIndex[PayloadIndex] != ZenCacheDiskLayer::CacheBucket::ReferenceIndex::Unknown())
- {
- continue;
- }
- const IoHash& Key = Entry.first;
- if (Loc.IsFlagSet(DiskLocation::kStandaloneFile))
- {
- StandaloneKeys.push_back(Key);
- continue;
- }
- IoBuffer Buffer = GetInlineCacheValue(Loc);
- if (!Buffer)
- {
- UpdateKeys.push_back(Key);
- ReferenceCounts.push_back(0);
- continue;
- }
- size_t CurrentReferenceCount = References.size();
- {
- CbObjectView Obj(Buffer.GetData());
- Obj.IterateAttachments([&References](CbFieldView Field) { References.emplace_back(Field.AsAttachment()); });
- Buffer = {};
- }
- UpdateKeys.push_back(Key);
- ReferenceCounts.push_back(References.size() - CurrentReferenceCount);
- }
- }
- {
- for (const IoHash& Key : StandaloneKeys)
- {
- if (Ctx.IsCancelledFlag.load())
- {
- return {};
- }
-
- IoBuffer Buffer = GetStandaloneCacheValue(ZenContentType::kCbObject, Key);
- if (!Buffer)
- {
- continue;
- }
-
- size_t CurrentReferenceCount = References.size();
- {
- CbObjectView Obj(Buffer.GetData());
- Obj.IterateAttachments([&References](CbFieldView Field) { References.emplace_back(Field.AsAttachment()); });
- Buffer = {};
- }
- UpdateKeys.push_back(Key);
- ReferenceCounts.push_back(References.size() - CurrentReferenceCount);
- }
- }
-
- {
- size_t ReferenceOffset = 0;
- RwLock::ExclusiveLockScope IndexLock(m_IndexLock);
- if (!m_Configuration.EnableReferenceCaching)
- {
- ZEN_ASSERT(m_FirstReferenceIndex.empty());
- ZEN_ASSERT(m_ReferenceHashes.empty());
- ZEN_ASSERT(m_NextReferenceHashesIndexes.empty());
- ZEN_ASSERT(m_ReferenceCount == 0);
- // If reference caching is not enabled, we will resize and use the data structure in place for reference caching when
- // we figure out what this bucket references. This will be reset once the DiskBucketReferenceChecker is deleted.
- m_FirstReferenceIndex.resize(m_Payloads.size());
- }
- for (size_t Index = 0; Index < UpdateKeys.size(); Index++)
- {
- const IoHash& Key = UpdateKeys[Index];
- size_t ReferenceCount = ReferenceCounts[Index];
- auto It = m_Index.find(Key);
- if (It == m_Index.end())
- {
- ReferenceOffset += ReferenceCount;
- continue;
- }
- if (m_FirstReferenceIndex[It->second] != ReferenceIndex::Unknown())
- {
- continue;
- }
- SetReferences(IndexLock,
- m_FirstReferenceIndex[It->second],
- std::span<IoHash>{References.data() + ReferenceOffset, ReferenceCount});
- ReferenceOffset += ReferenceCount;
- }
- if (m_Configuration.EnableReferenceCaching)
- {
- CompactReferences(IndexLock);
- }
- }
-
return {new DiskBucketReferenceChecker(*this)};
}