aboutsummaryrefslogtreecommitdiff
path: root/src/zenserver/cache/cachedisklayer.cpp
diff options
context:
space:
mode:
authorDan Engelbrecht <[email protected]>2023-11-06 13:00:51 +0100
committerGitHub <[email protected]>2023-11-06 13:00:51 +0100
commit04d57bec77a159a44e9955630b053d64c289c348 (patch)
treeb170a4344eaf64764d222637530d2c9516a1624e /src/zenserver/cache/cachedisklayer.cpp
parentzen copy-state command to copy a zenserver data directory without the bulk da... (diff)
downloadzen-04d57bec77a159a44e9955630b053d64c289c348.tar.xz
zen-04d57bec77a159a44e9955630b053d64c289c348.zip
reduce cachebucket mem (#509)
* reduce memory footprint for disk cache separate dense arrays for rawhash+rawsize and memcache buffer * don't write RawHash/RawSize for buckets with no such metadata * helper functions * make index into metadata and cached payload type safe * helper functions for memcached
Diffstat (limited to 'src/zenserver/cache/cachedisklayer.cpp')
-rw-r--r--src/zenserver/cache/cachedisklayer.cpp614
1 files changed, 354 insertions, 260 deletions
diff --git a/src/zenserver/cache/cachedisklayer.cpp b/src/zenserver/cache/cachedisklayer.cpp
index c0efdc76d..c670d5088 100644
--- a/src/zenserver/cache/cachedisklayer.cpp
+++ b/src/zenserver/cache/cachedisklayer.cpp
@@ -165,9 +165,6 @@ SaveCompactBinaryObject(const fs::path& Path, const CbObject& Object)
//////////////////////////////////////////////////////////////////////////
-const size_t ZenCacheDiskLayer::CacheBucket::UnknownReferencesIndex;
-const size_t ZenCacheDiskLayer::CacheBucket::NoReferencesIndex;
-
ZenCacheDiskLayer::CacheBucket::CacheBucket(GcManager& Gc,
std::atomic_uint64_t& OuterCacheMemoryUsage,
std::string BucketName,
@@ -280,25 +277,32 @@ ZenCacheDiskLayer::CacheBucket::OpenOrCreate(std::filesystem::path BucketDir, bo
}
KeyIndexOffset = 0;
CbArrayView RawHashArray = Manifest["RawHash"].AsArrayView();
- for (CbFieldView& RawHashView : RawHashArray)
+ CbArrayView RawSizeArray = Manifest["RawSize"].AsArrayView();
+ if (RawHashArray.Num() == RawSizeArray.Num())
{
- size_t KeyIndex = KeysIndexes[KeyIndexOffset++];
- if (KeyIndex == (uint64_t)-1)
+ auto RawHashIt = RawHashArray.CreateViewIterator();
+ auto RawSizeIt = RawSizeArray.CreateViewIterator();
+ while (RawHashIt != CbFieldViewIterator())
{
- continue;
+ size_t KeyIndex = KeysIndexes[KeyIndexOffset++];
+ if (KeyIndex == (uint64_t)-1)
+ {
+ continue;
+ }
+ uint64_t RawSize = RawSizeIt.AsUInt64();
+ IoHash RawHash = RawHashIt.AsHash();
+ if (RawSize != 0 || RawHash != IoHash::Zero)
+ {
+ BucketPayload& Payload = m_Payloads[KeyIndex];
+ SetMetaData(Payload, BucketMetaData{.RawSize = RawSize, .RawHash = RawHash});
+ }
+ RawHashIt++;
+ RawSizeIt++;
}
- m_Payloads[KeyIndex].RawHash = RawHashView.AsHash();
}
- KeyIndexOffset = 0;
- CbArrayView RawSizeArray = Manifest["RawSize"].AsArrayView();
- for (CbFieldView& RawSizeView : RawSizeArray)
+ else
{
- size_t KeyIndex = KeysIndexes[KeyIndexOffset++];
- if (KeyIndex == (uint64_t)-1)
- {
- continue;
- }
- m_Payloads[KeyIndex].RawSize = RawSizeView.AsUInt64();
+ ZEN_WARN("Mismatch in size between 'RawHash' and 'RawSize' arrays in {}, skipping meta data", ManifestPath);
}
}
@@ -333,8 +337,8 @@ ZenCacheDiskLayer::CacheBucket::OpenOrCreate(std::filesystem::path BucketDir, bo
ZEN_SCOPED_ERROR("detected bad index entry in index - {}", EntryIndex);
}
- m_Payloads[EntryIndex].RawHash = RawHash;
- m_Payloads[EntryIndex].RawSize = RawSize;
+ BucketPayload& Payload = m_Payloads[EntryIndex];
+ SetMetaData(Payload, BucketMetaData{.RawSize = RawSize, .RawHash = RawHash});
}
}
}
@@ -497,19 +501,15 @@ ZenCacheDiskLayer::CacheBucket::ReadIndexFile(const std::filesystem::path& Index
ZEN_WARN("skipping invalid entry in '{}', reason: '{}'", IndexPath, InvalidEntryReason);
continue;
}
- size_t EntryIndex = m_Payloads.size();
- m_Payloads.emplace_back(BucketPayload{.Location = Entry.Location, .RawSize = 0, .RawHash = IoHash::Zero});
+ PayloadIndex EntryIndex = PayloadIndex(m_Payloads.size());
+ m_Payloads.emplace_back(BucketPayload{.Location = Entry.Location});
m_Index.insert_or_assign(Entry.Key, EntryIndex);
EntryCount++;
}
m_AccessTimes.resize(m_Payloads.size(), AccessTime(GcClock::TickCount()));
- if (m_Configuration.MemCacheSizeThreshold > 0)
- {
- m_CachedPayloads.resize(m_Payloads.size());
- }
if (m_Configuration.EnableReferenceCaching)
{
- m_FirstReferenceIndex.resize(m_Payloads.size(), UnknownReferencesIndex);
+ m_FirstReferenceIndex.resize(m_Payloads.size());
}
OutVersion = CacheBucketIndexHeader::Version2;
return Header.LogPosition;
@@ -563,19 +563,15 @@ ZenCacheDiskLayer::CacheBucket::ReadLog(const std::filesystem::path& LogPath, ui
++InvalidEntryCount;
return;
}
- size_t EntryIndex = m_Payloads.size();
- m_Payloads.emplace_back(BucketPayload{.Location = Record.Location, .RawSize = 0u, .RawHash = IoHash::Zero});
+ PayloadIndex EntryIndex = PayloadIndex(m_Payloads.size());
+ m_Payloads.emplace_back(BucketPayload{.Location = Record.Location});
m_Index.insert_or_assign(Record.Key, EntryIndex);
},
SkipEntryCount);
m_AccessTimes.resize(m_Payloads.size(), AccessTime(GcClock::TickCount()));
- if (m_Configuration.MemCacheSizeThreshold > 0)
- {
- m_CachedPayloads.resize(m_Payloads.size());
- }
if (m_Configuration.EnableReferenceCaching)
{
- m_FirstReferenceIndex.resize(m_Payloads.size(), UnknownReferencesIndex);
+ m_FirstReferenceIndex.resize(m_Payloads.size());
}
if (InvalidEntryCount)
{
@@ -597,7 +593,10 @@ ZenCacheDiskLayer::CacheBucket::OpenLog(const bool IsNew)
m_Index.clear();
m_Payloads.clear();
m_AccessTimes.clear();
- m_CachedPayloads.clear();
+ m_MetaDatas.clear();
+ m_FreeMetaDatas.clear();
+ m_MemCachedPayloads.clear();
+ m_FreeMemCachedPayloads.clear();
m_FirstReferenceIndex.clear();
m_ReferenceHashes.clear();
m_NextReferenceHashesIndexes.clear();
@@ -740,29 +739,31 @@ ZenCacheDiskLayer::CacheBucket::Get(const IoHash& HashKey, ZenCacheValue& OutVal
return false;
}
- size_t EntryIndex = It.value();
- m_AccessTimes[EntryIndex] = GcClock::TickCount();
- DiskLocation Location = m_Payloads[EntryIndex].Location;
- bool FillRawHashAndRawSize = (!Location.IsFlagSet(DiskLocation::kStructured)) && (Location.Size() > 0);
- if (FillRawHashAndRawSize)
+ size_t EntryIndex = It.value();
+ m_AccessTimes[EntryIndex] = GcClock::TickCount();
+ DiskLocation Location = m_Payloads[EntryIndex].Location;
+
+ bool FillRawHashAndRawSize = (!Location.IsFlagSet(DiskLocation::kStructured)) && (Location.Size() > 0);
+
+ const BucketPayload* Payload = &m_Payloads[EntryIndex];
+ if (Payload->MetaData)
{
- const BucketPayload& Payload = m_Payloads[EntryIndex];
- if (Payload.RawHash != IoHash::Zero || Payload.RawSize != 0)
- {
- OutValue.RawHash = Payload.RawHash;
- OutValue.RawSize = Payload.RawSize;
- FillRawHashAndRawSize = false;
- }
+ const BucketMetaData& MetaData = m_MetaDatas[Payload->MetaData];
+ OutValue.RawHash = MetaData.RawHash;
+ OutValue.RawSize = MetaData.RawSize;
+ FillRawHashAndRawSize = false;
}
- if (m_Configuration.MemCacheSizeThreshold > 0 && m_CachedPayloads[EntryIndex])
+ if (Payload->MemCached)
{
- OutValue.Value = m_CachedPayloads[EntryIndex];
+ OutValue.Value = m_MemCachedPayloads[Payload->MemCached];
+ Payload = nullptr;
IndexLock.ReleaseNow();
m_MemoryHitCount++;
}
else
{
+ Payload = nullptr;
IndexLock.ReleaseNow();
if (m_Configuration.MemCacheSizeThreshold > 0)
{
@@ -785,12 +786,11 @@ ZenCacheDiskLayer::CacheBucket::Get(const IoHash& HashKey, ZenCacheValue& OutVal
RwLock::ExclusiveLockScope _(m_IndexLock);
if (auto UpdateIt = m_Index.find(HashKey); UpdateIt != m_Index.end())
{
+ BucketPayload& WritePayload = m_Payloads[EntryIndex];
// Only update if it has not already been updated by other thread
- if (!m_CachedPayloads[UpdateIt->second])
+ if (!WritePayload.MemCached)
{
- m_CachedPayloads[UpdateIt->second] = OutValue.Value;
- AddMemCacheUsage(ValueSize);
- m_MemoryWriteCount++;
+ SetMemCachedData(WritePayload, OutValue.Value);
}
}
}
@@ -819,10 +819,11 @@ ZenCacheDiskLayer::CacheBucket::Get(const IoHash& HashKey, ZenCacheValue& OutVal
if (auto WriteIt = m_Index.find(HashKey); WriteIt != m_Index.end())
{
BucketPayload& WritePayload = m_Payloads[WriteIt.value()];
- if (OutValue.RawHash == IoHash::Zero && OutValue.RawSize == 0)
+
+ // Only set if no other path has already updated the meta data
+ if (!WritePayload.MetaData)
{
- WritePayload.RawHash = OutValue.RawHash;
- WritePayload.RawSize = OutValue.RawSize;
+ SetMetaData(WritePayload, {.RawSize = OutValue.RawSize, .RawHash = OutValue.RawHash});
}
}
}
@@ -866,9 +867,8 @@ ZenCacheDiskLayer::CacheBucket::MemCacheTrim(GcClock::TimePoint ExpireTime)
{
if (m_AccessTimes[Kv.second] < ExpireTicks)
{
- size_t PayloadSize = m_CachedPayloads[Kv.second].GetSize();
- RemoveMemCacheUsage(PayloadSize);
- m_CachedPayloads[Kv.second] = {};
+ BucketPayload& Payload = m_Payloads[Kv.second];
+ RemoveMemCachedData(Payload);
}
}
}
@@ -881,8 +881,9 @@ ZenCacheDiskLayer::CacheBucket::GetUsageByAccess(GcClock::TimePoint TickStart,
RwLock::SharedLockScope _(m_IndexLock);
for (const auto& It : m_Index)
{
- size_t Index = It.second;
- if (!m_CachedPayloads[Index])
+ size_t Index = It.second;
+ BucketPayload& Payload = m_Payloads[Index];
+ if (!Payload.MemCached)
{
continue;
}
@@ -897,7 +898,7 @@ ZenCacheDiskLayer::CacheBucket::GetUsageByAccess(GcClock::TimePoint TickStart,
{
InOutUsageSlots.resize(uint64_t(Slot + 1), 0);
}
- InOutUsageSlots[Slot] += m_CachedPayloads[Index].GetSize();
+ InOutUsageSlots[Slot] += m_MemCachedPayloads[Payload.MemCached].GetSize();
}
}
@@ -922,7 +923,10 @@ ZenCacheDiskLayer::CacheBucket::Drop()
m_Index.clear();
m_Payloads.clear();
m_AccessTimes.clear();
- m_CachedPayloads.clear();
+ m_MetaDatas.clear();
+ m_FreeMetaDatas.clear();
+ m_MemCachedPayloads.clear();
+ m_FreeMemCachedPayloads.clear();
m_FirstReferenceIndex.clear();
m_ReferenceHashes.clear();
m_NextReferenceHashesIndexes.clear();
@@ -1017,21 +1021,38 @@ ZenCacheDiskLayer::CacheBucket::MakeManifest(IndexMap&& Index,
}
Writer.EndArray();
- Writer.BeginArray("RawHash"sv);
- for (auto& Kv : Index)
+ if (!m_MetaDatas.empty())
{
- const BucketPayload& Payload = Payloads[Kv.second];
- Writer.AddHash(Payload.RawHash);
- }
- Writer.EndArray();
+ Writer.BeginArray("RawHash"sv);
+ for (auto& Kv : Index)
+ {
+ const BucketPayload& Payload = Payloads[Kv.second];
+ if (Payload.MetaData)
+ {
+ Writer.AddHash(m_MetaDatas[Payload.MetaData].RawHash);
+ }
+ else
+ {
+ Writer.AddHash(IoHash::Zero);
+ }
+ }
+ Writer.EndArray();
- Writer.BeginArray("RawSize"sv);
- for (auto& Kv : Index)
- {
- const BucketPayload& Payload = Payloads[Kv.second];
- Writer.AddInteger(Payload.RawSize);
+ Writer.BeginArray("RawSize"sv);
+ for (auto& Kv : Index)
+ {
+ const BucketPayload& Payload = Payloads[Kv.second];
+ if (Payload.MetaData)
+ {
+ Writer.AddInteger(m_MetaDatas[Payload.MetaData].RawSize);
+ }
+ else
+ {
+ Writer.AddInteger(0);
+ }
+ }
+ Writer.EndArray();
}
- Writer.EndArray();
}
return Writer.Save();
}
@@ -1293,12 +1314,8 @@ ZenCacheDiskLayer::CacheBucket::ScrubStorage(ScrubContext& Ctx)
m_StandaloneSize.fetch_sub(Location.Size(), std::memory_order::relaxed);
}
- if (m_Configuration.MemCacheSizeThreshold > 0)
- {
- size_t CachedSize = m_CachedPayloads[It->second].GetSize();
- RemoveMemCacheUsage(CachedSize);
- m_CachedPayloads[It->second] = IoBuffer{};
- }
+ RemoveMemCachedData(Payload);
+ RemoveMetaData(Payload);
Location.Flags |= DiskLocation::kTombStone;
LogEntries.push_back(DiskIndexEntry{.Key = BadKey, .Location = Location});
@@ -1325,15 +1342,16 @@ ZenCacheDiskLayer::CacheBucket::ScrubStorage(ScrubContext& Ctx)
// Clean up m_AccessTimes and m_Payloads vectors
{
- std::vector<BucketPayload> Payloads;
- std::vector<AccessTime> AccessTimes;
- std::vector<IoBuffer> CachedPayloads;
- std::vector<size_t> FirstReferenceIndex;
- IndexMap Index;
+ std::vector<BucketPayload> Payloads;
+ std::vector<AccessTime> AccessTimes;
+ std::vector<BucketMetaData> MetaDatas;
+ std::vector<IoBuffer> MemCachedPayloads;
+ std::vector<ReferenceIndex> FirstReferenceIndex;
+ IndexMap Index;
{
RwLock::ExclusiveLockScope IndexLock(m_IndexLock);
- CompactState(Payloads, AccessTimes, CachedPayloads, FirstReferenceIndex, Index, IndexLock);
+ CompactState(Payloads, AccessTimes, MetaDatas, MemCachedPayloads, FirstReferenceIndex, Index, IndexLock);
}
}
}
@@ -1381,10 +1399,10 @@ ZenCacheDiskLayer::CacheBucket::GatherReferences(GcContext& GcCtx)
const GcClock::Tick ExpireTicks = ExpireTime.time_since_epoch().count();
- IndexMap Index;
- std::vector<AccessTime> AccessTimes;
- std::vector<BucketPayload> Payloads;
- std::vector<size_t> FirstReferenceIndex;
+ IndexMap Index;
+ std::vector<AccessTime> AccessTimes;
+ std::vector<BucketPayload> Payloads;
+ std::vector<ReferenceIndex> FirstReferenceIndex;
{
RwLock::SharedLockScope __(m_IndexLock);
#if CALCULATE_BLOCKING_TIME
@@ -1437,7 +1455,7 @@ ZenCacheDiskLayer::CacheBucket::GatherReferences(GcContext& GcCtx)
}
if (m_Configuration.EnableReferenceCaching)
{
- if (FirstReferenceIndex.empty() || (FirstReferenceIndex[PayloadIndex] == UnknownReferencesIndex))
+ if (FirstReferenceIndex.empty() || FirstReferenceIndex[PayloadIndex] == ReferenceIndex::Unknown())
{
StructuredItemsWithUnknownAttachments.push_back(Entry);
continue;
@@ -1500,11 +1518,13 @@ ZenCacheDiskLayer::CacheBucket::GatherReferences(GcContext& GcCtx)
#endif // CALCULATE_BLOCKING_TIME
if (auto It = m_Index.find(Key); It != m_Index.end())
{
- if (m_Configuration.MemCacheSizeThreshold > 0)
+ const BucketPayload& CachedPayload = Payloads[PayloadIndex];
+ if (CachedPayload.MemCached)
{
- Buffer = m_CachedPayloads[It->second];
+ Buffer = m_MemCachedPayloads[CachedPayload.MemCached];
+ ZEN_ASSERT_SLOW(Buffer);
}
- if (!Buffer)
+ else
{
DiskLocation Location = m_Payloads[It->second].Location;
IndexLock.ReleaseNow();
@@ -1536,7 +1556,7 @@ ZenCacheDiskLayer::CacheBucket::GatherReferences(GcContext& GcCtx)
#endif // CALCULATE_BLOCKING_TIME
if (auto It = m_Index.find(Key); It != m_Index.end())
{
- if (m_FirstReferenceIndex[It->second] == UnknownReferencesIndex)
+ if (m_FirstReferenceIndex[It->second] == ReferenceIndex::Unknown())
{
SetReferences(IndexLock,
m_FirstReferenceIndex[It->second],
@@ -1625,11 +1645,12 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx)
if (!DeletedChunks.empty())
{
// Clean up m_AccessTimes and m_Payloads vectors
- std::vector<BucketPayload> Payloads;
- std::vector<AccessTime> AccessTimes;
- std::vector<IoBuffer> CachedPayloads;
- std::vector<size_t> FirstReferenceIndex;
- IndexMap Index;
+ std::vector<BucketPayload> Payloads;
+ std::vector<AccessTime> AccessTimes;
+ std::vector<BucketMetaData> MetaDatas;
+ std::vector<IoBuffer> MemCachedPayloads;
+ std::vector<ReferenceIndex> FirstReferenceIndex;
+ IndexMap Index;
{
RwLock::ExclusiveLockScope IndexLock(m_IndexLock);
Stopwatch Timer;
@@ -1638,7 +1659,7 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx)
WriteBlockTimeUs += ElapsedUs;
WriteBlockLongestTimeUs = std::max(ElapsedUs, WriteBlockLongestTimeUs);
});
- CompactState(Payloads, AccessTimes, CachedPayloads, FirstReferenceIndex, Index, IndexLock);
+ CompactState(Payloads, AccessTimes, MetaDatas, MemCachedPayloads, FirstReferenceIndex, Index, IndexLock);
}
GcCtx.AddDeletedCids(std::vector<IoHash>(DeletedChunks.begin(), DeletedChunks.end()));
}
@@ -1838,9 +1859,9 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx)
}
for (const size_t ChunkIndex : RemovedChunks)
{
- const IoHash& ChunkHash = ChunkIndexToChunkHash[ChunkIndex];
- size_t EntryIndex = m_Index[ChunkHash];
- const BucketPayload& Payload = m_Payloads[EntryIndex];
+ const IoHash& ChunkHash = ChunkIndexToChunkHash[ChunkIndex];
+ size_t EntryIndex = m_Index[ChunkHash];
+ BucketPayload& Payload = m_Payloads[EntryIndex];
if (Payloads[Index[ChunkHash]].Location != Payload.Location)
{
// Entry has been updated while GC was running, ignore the delete
@@ -1851,12 +1872,10 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx)
.Location = DiskLocation(OldDiskLocation.GetBlockLocation(m_Configuration.PayloadAlignment),
m_Configuration.PayloadAlignment,
OldDiskLocation.GetFlags() | DiskLocation::kTombStone)});
- if (m_Configuration.MemCacheSizeThreshold > 0 && m_CachedPayloads[EntryIndex])
- {
- uint64_t CachePayloadSize = m_CachedPayloads[EntryIndex].Size();
- RemoveMemCacheUsage(CachePayloadSize);
- m_CachedPayloads[EntryIndex] = IoBuffer{};
- }
+
+ RemoveMemCachedData(Payload);
+ RemoveMetaData(Payload);
+
m_Index.erase(ChunkHash);
DeletedChunks.insert(ChunkHash);
}
@@ -1892,7 +1911,7 @@ ZenCacheDiskLayer::CacheBucket::EntryCount() const
}
CacheValueDetails::ValueDetails
-ZenCacheDiskLayer::CacheBucket::GetValueDetails(const IoHash& Key, size_t Index) const
+ZenCacheDiskLayer::CacheBucket::GetValueDetails(const IoHash& Key, PayloadIndex Index) const
{
std::vector<IoHash> Attachments;
const BucketPayload& Payload = m_Payloads[Index];
@@ -1904,9 +1923,10 @@ ZenCacheDiskLayer::CacheBucket::GetValueDetails(const IoHash& Key, size_t Index)
CbObjectView Obj(Value.GetData());
Obj.IterateAttachments([&Attachments](CbFieldView Field) { Attachments.emplace_back(Field.AsAttachment()); });
}
+ BucketMetaData MetaData = GetMetaData(Payload);
return CacheValueDetails::ValueDetails{.Size = Payload.Location.Size(),
- .RawSize = Payload.RawSize,
- .RawHash = Payload.RawHash,
+ .RawSize = MetaData.RawSize,
+ .RawHash = MetaData.RawHash,
.LastAccess = m_AccessTimes[Index],
.Attachments = std::move(Attachments),
.ContentType = Payload.Location.GetContentType()};
@@ -2088,52 +2108,132 @@ ZenCacheDiskLayer::CacheBucket::PutStandaloneCacheValue(const IoHash& HashKey, c
RwLock::ExclusiveLockScope IndexLock(m_IndexLock);
ValueLock.ReleaseNow();
+ PayloadIndex EntryIndex = {};
if (auto It = m_Index.find(HashKey); It == m_Index.end())
{
// Previously unknown object
- size_t EntryIndex = m_Payloads.size();
- m_Payloads.emplace_back(BucketPayload{.Location = Loc, .RawSize = Value.RawSize, .RawHash = Value.RawHash});
+ EntryIndex = PayloadIndex(m_Payloads.size());
+ m_Payloads.emplace_back(BucketPayload{.Location = Loc});
m_AccessTimes.emplace_back(GcClock::TickCount());
- if (m_Configuration.MemCacheSizeThreshold > 0)
- {
- m_CachedPayloads.emplace_back(IoBuffer{});
- }
if (m_Configuration.EnableReferenceCaching)
{
- m_FirstReferenceIndex.emplace_back(UnknownReferencesIndex);
+ m_FirstReferenceIndex.emplace_back(ReferenceIndex{});
SetReferences(IndexLock, m_FirstReferenceIndex.back(), References);
}
m_Index.insert_or_assign(HashKey, EntryIndex);
}
else
{
- size_t EntryIndex = It.value();
- ZEN_ASSERT_SLOW(EntryIndex < m_AccessTimes.size());
+ EntryIndex = It.value();
+ ZEN_ASSERT_SLOW(EntryIndex < PayloadIndex(m_AccessTimes.size()));
BucketPayload& Payload = m_Payloads[EntryIndex];
uint64_t OldSize = Payload.Location.Size();
- Payload = BucketPayload{.Location = Loc, .RawSize = Value.RawSize, .RawHash = Value.RawHash};
+ Payload = BucketPayload{.Location = Loc};
if (m_Configuration.EnableReferenceCaching)
{
SetReferences(IndexLock, m_FirstReferenceIndex[EntryIndex], References);
}
m_AccessTimes[EntryIndex] = GcClock::TickCount();
- if (m_Configuration.MemCacheSizeThreshold > 0)
- {
- if (m_CachedPayloads[EntryIndex])
- {
- uint64_t CachePayloadSize = m_CachedPayloads[EntryIndex].Size();
- RemoveMemCacheUsage(CachePayloadSize);
- m_CachedPayloads[EntryIndex] = IoBuffer{};
- }
- }
+ RemoveMemCachedData(Payload);
m_StandaloneSize.fetch_sub(OldSize, std::memory_order::relaxed);
}
+ if (Value.RawSize != 0 || Value.RawHash != IoHash::Zero)
+ {
+ SetMetaData(m_Payloads[EntryIndex], {.RawSize = Value.RawSize, .RawHash = Value.RawHash});
+ }
+ else
+ {
+ RemoveMetaData(m_Payloads[EntryIndex]);
+ }
m_SlogFile.Append({.Key = HashKey, .Location = Loc});
m_StandaloneSize.fetch_add(NewFileSize, std::memory_order::relaxed);
}
void
+ZenCacheDiskLayer::CacheBucket::SetMetaData(BucketPayload& Payload, const ZenCacheDiskLayer::CacheBucket::BucketMetaData& MetaData)
+{
+ if (Payload.MetaData)
+ {
+ m_MetaDatas[Payload.MetaData] = MetaData;
+ }
+ else
+ {
+ if (m_FreeMetaDatas.empty())
+ {
+ Payload.MetaData = MetaDataIndex(m_MetaDatas.size());
+ m_MetaDatas.emplace_back(MetaData);
+ }
+ else
+ {
+ Payload.MetaData = m_FreeMetaDatas.back();
+ m_FreeMetaDatas.pop_back();
+ m_MetaDatas[Payload.MetaData] = MetaData;
+ }
+ }
+}
+
+void
+ZenCacheDiskLayer::CacheBucket::RemoveMetaData(BucketPayload& Payload)
+{
+ if (Payload.MetaData)
+ {
+ m_FreeMetaDatas.push_back(Payload.MetaData);
+ Payload.MetaData = {};
+ }
+}
+
+void
+ZenCacheDiskLayer::CacheBucket::SetMemCachedData(BucketPayload& Payload, IoBuffer& MemCachedData)
+{
+ uint64_t PayloadSize = MemCachedData.GetSize();
+ ZEN_ASSERT(PayloadSize != 0);
+ if (m_FreeMemCachedPayloads.empty())
+ {
+ if (m_MemCachedPayloads.size() != std::numeric_limits<uint32_t>::max())
+ {
+ Payload.MemCached = MemCachedIndex(gsl::narrow<uint32_t>(m_MemCachedPayloads.size()));
+ m_MemCachedPayloads.push_back(MemCachedData);
+ AddMemCacheUsage(PayloadSize);
+ m_MemoryWriteCount++;
+ }
+ }
+ else
+ {
+ Payload.MemCached = m_FreeMemCachedPayloads.back();
+ m_FreeMemCachedPayloads.pop_back();
+ m_MemCachedPayloads[Payload.MemCached] = MemCachedData;
+ AddMemCacheUsage(PayloadSize);
+ m_MemoryWriteCount++;
+ }
+}
+
+size_t
+ZenCacheDiskLayer::CacheBucket::RemoveMemCachedData(BucketPayload& Payload)
+{
+ if (Payload.MemCached)
+ {
+ size_t PayloadSize = m_MemCachedPayloads[Payload.MemCached].GetSize();
+ RemoveMemCacheUsage(PayloadSize);
+ m_MemCachedPayloads[Payload.MemCached] = IoBuffer{};
+ m_FreeMemCachedPayloads.push_back(Payload.MemCached);
+ Payload.MemCached = {};
+ return PayloadSize;
+ }
+ return 0;
+}
+
+ZenCacheDiskLayer::CacheBucket::BucketMetaData
+ZenCacheDiskLayer::CacheBucket::GetMetaData(const BucketPayload& Payload) const
+{
+ if (Payload.MetaData)
+ {
+ return m_MetaDatas[Payload.MetaData];
+ }
+ return {};
+}
+
+void
ZenCacheDiskLayer::CacheBucket::PutInlineCacheValue(const IoHash& HashKey, const ZenCacheValue& Value, std::span<IoHash> References)
{
ZEN_TRACE_CPU("Z$::Disk::Bucket::PutInlineCacheValue");
@@ -2155,64 +2255,51 @@ ZenCacheDiskLayer::CacheBucket::PutInlineCacheValue(const IoHash& HashKey, const
? IoBufferBuilder::ReadFromFileMaybe(Value.Value)
: IoBuffer{};
- m_BlockStore.WriteChunk(
- Value.Value.Data(),
- Value.Value.Size(),
- m_Configuration.PayloadAlignment,
- [&](const BlockStoreLocation& BlockStoreLocation) {
- DiskLocation Location(BlockStoreLocation, m_Configuration.PayloadAlignment, EntryFlags);
- m_SlogFile.Append({.Key = HashKey, .Location = Location});
+ m_BlockStore.WriteChunk(Value.Value.Data(),
+ Value.Value.Size(),
+ m_Configuration.PayloadAlignment,
+ [&](const BlockStoreLocation& BlockStoreLocation) {
+ DiskLocation Location(BlockStoreLocation, m_Configuration.PayloadAlignment, EntryFlags);
+ m_SlogFile.Append({.Key = HashKey, .Location = Location});
- RwLock::ExclusiveLockScope IndexLock(m_IndexLock);
- if (auto It = m_Index.find(HashKey); It != m_Index.end())
- {
- size_t EntryIndex = It.value();
- ZEN_ASSERT_SLOW(EntryIndex < m_AccessTimes.size());
- m_Payloads[EntryIndex] = (BucketPayload{.Location = Location, .RawSize = Value.RawSize, .RawHash = Value.RawHash});
- m_AccessTimes[EntryIndex] = GcClock::TickCount();
+ RwLock::ExclusiveLockScope IndexLock(m_IndexLock);
+ PayloadIndex EntryIndex = {};
+ if (auto It = m_Index.find(HashKey); It != m_Index.end())
+ {
+ EntryIndex = It.value();
+ ZEN_ASSERT_SLOW(EntryIndex < PayloadIndex(m_AccessTimes.size()));
+ BucketPayload& Payload = m_Payloads[EntryIndex];
- if (MemCacheEnabled)
- {
- if (m_CachedPayloads[EntryIndex])
- {
- uint64_t OldCachedSize = m_CachedPayloads[EntryIndex].GetSize();
- RemoveMemCacheUsage(OldCachedSize);
- }
+ RemoveMemCachedData(Payload);
- if (MemCacheBuffer)
- {
- AddMemCacheUsage(PayloadSize);
- m_MemoryWriteCount++;
- }
- m_CachedPayloads[EntryIndex] = std::move(MemCacheBuffer);
- }
- if (m_Configuration.EnableReferenceCaching)
- {
- SetReferences(IndexLock, m_FirstReferenceIndex[EntryIndex], References);
- }
- }
- else
- {
- size_t EntryIndex = m_Payloads.size();
- m_Payloads.emplace_back(BucketPayload{.Location = Location, .RawSize = Value.RawSize, .RawHash = Value.RawHash});
- m_AccessTimes.emplace_back(GcClock::TickCount());
- if (MemCacheEnabled)
- {
- if (MemCacheBuffer)
- {
- AddMemCacheUsage(PayloadSize);
- m_MemoryWriteCount++;
- }
- m_CachedPayloads.emplace_back(std::move(MemCacheBuffer));
- }
- if (m_Configuration.EnableReferenceCaching)
- {
- m_FirstReferenceIndex.emplace_back(UnknownReferencesIndex);
- SetReferences(IndexLock, m_FirstReferenceIndex.back(), References);
- }
- m_Index.insert_or_assign(HashKey, EntryIndex);
- }
- });
+ Payload = (BucketPayload{.Location = Location});
+ m_AccessTimes[EntryIndex] = GcClock::TickCount();
+
+ if (m_Configuration.EnableReferenceCaching)
+ {
+ SetReferences(IndexLock, m_FirstReferenceIndex[EntryIndex], References);
+ }
+ }
+ else
+ {
+ EntryIndex = PayloadIndex(m_Payloads.size());
+ m_Payloads.emplace_back(BucketPayload{.Location = Location});
+ m_AccessTimes.emplace_back(GcClock::TickCount());
+ if (m_Configuration.EnableReferenceCaching)
+ {
+ m_FirstReferenceIndex.emplace_back(ReferenceIndex{});
+ SetReferences(IndexLock, m_FirstReferenceIndex.back(), References);
+ }
+ m_Index.insert_or_assign(HashKey, EntryIndex);
+ }
+
+ if (MemCacheBuffer)
+ {
+ BucketPayload& Payload = m_Payloads[EntryIndex];
+ SetMemCachedData(Payload, MemCacheBuffer);
+ }
+ RemoveMetaData(m_Payloads[EntryIndex]);
+ });
}
std::string
@@ -2324,12 +2411,9 @@ ZenCacheDiskLayer::CacheBucket::RemoveExpiredData(GcCtx& Ctx, GcReferencerStats&
{
auto It = m_Index.find(Entry.Key);
ZEN_ASSERT(It != m_Index.end());
- if (m_Configuration.MemCacheSizeThreshold > 0 && m_CachedPayloads[It->second])
- {
- size_t PayloadSize = m_CachedPayloads[It->second].GetSize();
- Stats.RemovedMemory += PayloadSize;
- RemoveMemCacheUsage(PayloadSize);
- }
+ BucketPayload& Payload = m_Payloads[It->second];
+ RemoveMetaData(Payload);
+ Stats.RemovedMemory += RemoveMemCachedData(Payload);
m_Index.erase(It);
}
m_SlogFile.Append(ExpiredEntries);
@@ -2425,14 +2509,15 @@ ZenCacheDiskLayer::CacheBucket::RemoveExpiredData(GcCtx& Ctx, GcReferencerStats&
[&]() { return 0; });
}
- std::vector<BucketPayload> Payloads;
- std::vector<AccessTime> AccessTimes;
- std::vector<IoBuffer> CachedPayloads;
- std::vector<size_t> FirstReferenceIndex;
- IndexMap Index;
+ std::vector<BucketPayload> Payloads;
+ std::vector<AccessTime> AccessTimes;
+ std::vector<BucketMetaData> MetaDatas;
+ std::vector<IoBuffer> MemCachedPayloads;
+ std::vector<ReferenceIndex> FirstReferenceIndex;
+ IndexMap Index;
{
RwLock::ExclusiveLockScope IndexLock(m_IndexLock);
- CompactState(Payloads, AccessTimes, CachedPayloads, FirstReferenceIndex, Index, IndexLock);
+ CompactState(Payloads, AccessTimes, MetaDatas, MemCachedPayloads, FirstReferenceIndex, Index, IndexLock);
}
}
@@ -2480,7 +2565,7 @@ public:
}
ZEN_ASSERT(!m_CacheBucket.m_FirstReferenceIndex.empty());
const IoHash& Key = Entry.first;
- if (m_CacheBucket.m_FirstReferenceIndex[PayloadIndex] == ZenCacheDiskLayer::CacheBucket::UnknownReferencesIndex)
+ if (m_CacheBucket.m_FirstReferenceIndex[PayloadIndex] == ZenCacheDiskLayer::CacheBucket::ReferenceIndex::Unknown())
{
IoBuffer Buffer;
if (Loc.IsFlagSet(DiskLocation::kStandaloneFile))
@@ -2555,7 +2640,7 @@ ZenCacheDiskLayer::CacheBucket::CreateReferenceCheckers(GcCtx& Ctx)
continue;
}
if (m_Configuration.EnableReferenceCaching &&
- m_FirstReferenceIndex[PayloadIndex] != ZenCacheDiskLayer::CacheBucket::UnknownReferencesIndex)
+ m_FirstReferenceIndex[PayloadIndex] != ZenCacheDiskLayer::CacheBucket::ReferenceIndex::Unknown())
{
continue;
}
@@ -2613,7 +2698,7 @@ ZenCacheDiskLayer::CacheBucket::CreateReferenceCheckers(GcCtx& Ctx)
ZEN_ASSERT(m_ReferenceCount == 0);
// If reference caching is not enabled, we will resize and use the data structure in place for reference caching when
// we figure out what this bucket references. This will be reset once the DiskBucketReferenceChecker is deleted.
- m_FirstReferenceIndex.resize(m_Payloads.size(), UnknownReferencesIndex);
+ m_FirstReferenceIndex.resize(m_Payloads.size());
}
for (size_t Index = 0; Index < UpdateKeys.size(); Index++)
{
@@ -2625,7 +2710,7 @@ ZenCacheDiskLayer::CacheBucket::CreateReferenceCheckers(GcCtx& Ctx)
ReferenceOffset += ReferenceCount;
continue;
}
- if (m_FirstReferenceIndex[It->second] != ZenCacheDiskLayer::CacheBucket::UnknownReferencesIndex)
+ if (m_FirstReferenceIndex[It->second] != ReferenceIndex::Unknown())
{
continue;
}
@@ -2646,9 +2731,9 @@ ZenCacheDiskLayer::CacheBucket::CreateReferenceCheckers(GcCtx& Ctx)
void
ZenCacheDiskLayer::CacheBucket::CompactReferences(RwLock::ExclusiveLockScope&)
{
- std::vector<size_t> FirstReferenceIndex;
- std::vector<IoHash> NewReferenceHashes;
- std::vector<size_t> NewNextReferenceHashesIndexes;
+ std::vector<ReferenceIndex> FirstReferenceIndex;
+ std::vector<IoHash> NewReferenceHashes;
+ std::vector<ReferenceIndex> NewNextReferenceHashesIndexes;
FirstReferenceIndex.reserve(m_ReferenceCount);
NewReferenceHashes.reserve(m_ReferenceCount);
@@ -2656,27 +2741,27 @@ ZenCacheDiskLayer::CacheBucket::CompactReferences(RwLock::ExclusiveLockScope&)
for (const auto& It : m_Index)
{
- size_t SourceIndex = m_FirstReferenceIndex[It.second];
- if (SourceIndex == UnknownReferencesIndex)
+ ReferenceIndex SourceIndex = m_FirstReferenceIndex[It.second];
+ if (SourceIndex == ReferenceIndex::Unknown())
{
- FirstReferenceIndex.push_back(UnknownReferencesIndex);
+ FirstReferenceIndex.push_back(ReferenceIndex{});
continue;
}
- if (SourceIndex == NoReferencesIndex)
+ if (SourceIndex == ReferenceIndex::None())
{
- FirstReferenceIndex.push_back(NoReferencesIndex);
+ FirstReferenceIndex.push_back(ReferenceIndex::None());
continue;
}
- FirstReferenceIndex.push_back(NewNextReferenceHashesIndexes.size());
+ FirstReferenceIndex.push_back(ReferenceIndex{NewNextReferenceHashesIndexes.size()});
NewReferenceHashes.push_back(m_ReferenceHashes[SourceIndex]);
- NewNextReferenceHashesIndexes.push_back(NoReferencesIndex);
+ NewNextReferenceHashesIndexes.push_back(ReferenceIndex::None());
SourceIndex = m_NextReferenceHashesIndexes[SourceIndex];
- while (SourceIndex != NoReferencesIndex)
+ while (SourceIndex != ReferenceIndex::None())
{
- NewNextReferenceHashesIndexes.back() = NewReferenceHashes.size();
+ NewNextReferenceHashesIndexes.back() = ReferenceIndex{NewReferenceHashes.size()};
NewReferenceHashes.push_back(m_ReferenceHashes[SourceIndex]);
- NewNextReferenceHashesIndexes.push_back(NoReferencesIndex);
+ NewNextReferenceHashesIndexes.push_back(ReferenceIndex::None());
SourceIndex = m_NextReferenceHashesIndexes[SourceIndex];
}
}
@@ -2686,35 +2771,35 @@ ZenCacheDiskLayer::CacheBucket::CompactReferences(RwLock::ExclusiveLockScope&)
m_ReferenceCount = m_ReferenceHashes.size();
}
-size_t
+ZenCacheDiskLayer::CacheBucket::ReferenceIndex
ZenCacheDiskLayer::CacheBucket::AllocateReferenceEntry(RwLock::ExclusiveLockScope&, const IoHash& Key)
{
- size_t ReferenceIndex = m_ReferenceHashes.size();
+ ReferenceIndex NewIndex = ReferenceIndex{m_ReferenceHashes.size()};
m_ReferenceHashes.push_back(Key);
- m_NextReferenceHashesIndexes.push_back(NoReferencesIndex);
+ m_NextReferenceHashesIndexes.emplace_back(ReferenceIndex::None());
m_ReferenceCount++;
- return ReferenceIndex;
+ return NewIndex;
}
void
ZenCacheDiskLayer::CacheBucket::SetReferences(RwLock::ExclusiveLockScope& Lock,
- std::size_t& FirstReferenceIndex,
+ ReferenceIndex& FirstReferenceIndex,
std::span<IoHash> References)
{
auto ReferenceIt = References.begin();
- if (FirstReferenceIndex == UnknownReferencesIndex)
+ if (FirstReferenceIndex == ReferenceIndex::Unknown())
{
- FirstReferenceIndex = NoReferencesIndex;
+ FirstReferenceIndex = ReferenceIndex::None();
}
- size_t CurrentIndex = FirstReferenceIndex;
- if (CurrentIndex != NoReferencesIndex)
+ ReferenceIndex CurrentIndex = FirstReferenceIndex;
+ if (CurrentIndex != ReferenceIndex::None())
{
if (ReferenceIt != References.end())
{
ZEN_ASSERT_SLOW(*ReferenceIt != IoHash::Zero);
- if (CurrentIndex == NoReferencesIndex)
+ if (CurrentIndex == ReferenceIndex::None())
{
CurrentIndex = AllocateReferenceEntry(Lock, *ReferenceIt);
FirstReferenceIndex = CurrentIndex;
@@ -2739,64 +2824,64 @@ ZenCacheDiskLayer::CacheBucket::SetReferences(RwLock::ExclusiveLockScope& Lock,
while (ReferenceIt != References.end())
{
- ZEN_ASSERT(CurrentIndex != NoReferencesIndex);
+ ZEN_ASSERT(CurrentIndex != ReferenceIndex::None());
ZEN_ASSERT_SLOW(*ReferenceIt != IoHash::Zero);
- size_t ReferenceIndex = m_NextReferenceHashesIndexes[CurrentIndex];
- if (ReferenceIndex == NoReferencesIndex)
+ ReferenceIndex NextReferenceIndex = m_NextReferenceHashesIndexes[CurrentIndex];
+ if (NextReferenceIndex == ReferenceIndex::None())
{
- ReferenceIndex = AllocateReferenceEntry(Lock, *ReferenceIt);
- m_NextReferenceHashesIndexes[CurrentIndex] = ReferenceIndex;
+ NextReferenceIndex = AllocateReferenceEntry(Lock, *ReferenceIt);
+ m_NextReferenceHashesIndexes[CurrentIndex] = NextReferenceIndex;
}
else
{
- m_ReferenceHashes[ReferenceIndex] = *ReferenceIt;
+ m_ReferenceHashes[NextReferenceIndex] = *ReferenceIt;
}
- CurrentIndex = ReferenceIndex;
+ CurrentIndex = NextReferenceIndex;
ReferenceIt++;
}
- while (CurrentIndex != NoReferencesIndex)
+ while (CurrentIndex != ReferenceIndex::None())
{
- size_t NextIndex = m_NextReferenceHashesIndexes[CurrentIndex];
- if (NextIndex != NoReferencesIndex)
+ ReferenceIndex NextIndex = m_NextReferenceHashesIndexes[CurrentIndex];
+ if (NextIndex != ReferenceIndex::None())
{
m_ReferenceHashes[CurrentIndex] = IoHash::Zero;
ZEN_ASSERT(m_ReferenceCount > 0);
m_ReferenceCount--;
- m_NextReferenceHashesIndexes[CurrentIndex] = NoReferencesIndex;
+ m_NextReferenceHashesIndexes[CurrentIndex] = ReferenceIndex::None();
}
CurrentIndex = NextIndex;
}
}
void
-ZenCacheDiskLayer::CacheBucket::RemoveReferences(RwLock::ExclusiveLockScope&, std::size_t& FirstReferenceIndex)
+ZenCacheDiskLayer::CacheBucket::RemoveReferences(RwLock::ExclusiveLockScope&, ReferenceIndex& FirstReferenceIndex)
{
- if (FirstReferenceIndex == UnknownReferencesIndex)
+ if (FirstReferenceIndex == ReferenceIndex::Unknown())
{
return;
}
- size_t CurrentIndex = FirstReferenceIndex;
- while (CurrentIndex != NoReferencesIndex)
+ ReferenceIndex CurrentIndex = FirstReferenceIndex;
+ while (CurrentIndex == ReferenceIndex::None())
{
m_ReferenceHashes[CurrentIndex] = IoHash::Zero;
ZEN_ASSERT(m_ReferenceCount > 0);
m_ReferenceCount--;
CurrentIndex = m_NextReferenceHashesIndexes[CurrentIndex];
}
- FirstReferenceIndex = UnknownReferencesIndex;
+ FirstReferenceIndex = {};
}
bool
-ZenCacheDiskLayer::CacheBucket::LockedGetReferences(std::size_t FirstReferenceIndex, std::vector<IoHash>& OutReferences) const
+ZenCacheDiskLayer::CacheBucket::LockedGetReferences(ReferenceIndex FirstReferenceIndex, std::vector<IoHash>& OutReferences) const
{
- if (FirstReferenceIndex == UnknownReferencesIndex)
+ if (FirstReferenceIndex == ReferenceIndex::Unknown())
{
return false;
}
- size_t CurrentIndex = FirstReferenceIndex;
- while (CurrentIndex != NoReferencesIndex)
+ ReferenceIndex CurrentIndex = FirstReferenceIndex;
+ while (CurrentIndex != ReferenceIndex::None())
{
ZEN_ASSERT_SLOW(m_ReferenceHashes[CurrentIndex] != IoHash::Zero);
OutReferences.push_back(m_ReferenceHashes[CurrentIndex]);
@@ -2819,17 +2904,17 @@ ZenCacheDiskLayer::CacheBucket::ClearReferenceCache()
}
void
-ZenCacheDiskLayer::CacheBucket::CompactState(std::vector<BucketPayload>& Payloads,
- std::vector<AccessTime>& AccessTimes,
- std::vector<IoBuffer>& CachedPayloads,
- std::vector<size_t>& FirstReferenceIndex,
- IndexMap& Index,
- RwLock::ExclusiveLockScope& IndexLock)
+ZenCacheDiskLayer::CacheBucket::CompactState(std::vector<BucketPayload>& Payloads,
+ std::vector<AccessTime>& AccessTimes,
+ std::vector<BucketMetaData>& MetaDatas,
+ std::vector<IoBuffer>& MemCachedPayloads,
+ std::vector<ReferenceIndex>& FirstReferenceIndex,
+ IndexMap& Index,
+ RwLock::ExclusiveLockScope& IndexLock)
{
size_t EntryCount = m_Index.size();
Payloads.reserve(EntryCount);
AccessTimes.reserve(EntryCount);
- CachedPayloads.reserve(EntryCount);
if (m_Configuration.EnableReferenceCaching)
{
FirstReferenceIndex.reserve(EntryCount);
@@ -2837,12 +2922,19 @@ ZenCacheDiskLayer::CacheBucket::CompactState(std::vector<BucketPayload>& Payload
Index.reserve(EntryCount);
for (auto It : m_Index)
{
- size_t EntryIndex = Payloads.size();
+ PayloadIndex EntryIndex = PayloadIndex(Payloads.size());
Payloads.push_back(m_Payloads[It.second]);
+ BucketPayload& Payload = Payloads.back();
AccessTimes.push_back(m_AccessTimes[It.second]);
- if (m_Configuration.MemCacheSizeThreshold > 0)
+ if (Payload.MetaData)
+ {
+ MetaDatas.push_back(m_MetaDatas[Payload.MetaData]);
+ Payload.MetaData = MetaDataIndex(m_MetaDatas.size() - 1);
+ }
+ if (Payload.MemCached)
{
- CachedPayloads.push_back(std::move(m_CachedPayloads[It.second]));
+ MemCachedPayloads.push_back(std::move(m_MemCachedPayloads[Payload.MemCached]));
+ Payload.MemCached = MemCachedIndex(gsl::narrow<uint32_t>(MemCachedPayloads.size() - 1));
}
if (m_Configuration.EnableReferenceCaching)
{
@@ -2853,10 +2945,12 @@ ZenCacheDiskLayer::CacheBucket::CompactState(std::vector<BucketPayload>& Payload
m_Index.swap(Index);
m_Payloads.swap(Payloads);
m_AccessTimes.swap(AccessTimes);
- if (m_Configuration.MemCacheSizeThreshold > 0)
- {
- m_CachedPayloads.swap(CachedPayloads);
- }
+ m_MetaDatas.swap(MetaDatas);
+ m_FreeMetaDatas.clear();
+ m_FreeMetaDatas.shrink_to_fit();
+ m_MemCachedPayloads.swap(MemCachedPayloads);
+ m_FreeMemCachedPayloads.clear();
+ m_FreeMetaDatas.shrink_to_fit();
if (m_Configuration.EnableReferenceCaching)
{
m_FirstReferenceIndex.swap(FirstReferenceIndex);