aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan Engelbrecht <[email protected]>2023-02-20 16:18:06 +0000
committerGitHub <[email protected]>2023-02-20 08:18:06 -0800
commit71fc4c9b2ca23d3fb7db3bbe46056527170e0309 (patch)
tree1d1aba9309d2a4552ed3e8ec7d9785bf69d1ce5e
parentEnhanced rpc request recording (#229) (diff)
downloadzen-0.2.4-pre0.tar.xz
zen-0.2.4-pre0.zip
Refactor CacheBuckets to allow for storing RawHash/RawSize (#232)v0.2.4-pre0v0.2.4
* refactored MemoryCacheBucket to allow for storing RawHash/RawSize. * remove redundant conversions in AccessTime * reduce max count for memory cache bucket to 32-bit value * refactored DiskCacheBucket to allow for storing RawHash/RawSize.
-rw-r--r--zenserver/cache/structuredcachestore.cpp249
-rw-r--r--zenserver/cache/structuredcachestore.h107
2 files changed, 258 insertions, 98 deletions
diff --git a/zenserver/cache/structuredcachestore.cpp b/zenserver/cache/structuredcachestore.cpp
index b32497a30..d93c54a06 100644
--- a/zenserver/cache/structuredcachestore.cpp
+++ b/zenserver/cache/structuredcachestore.cpp
@@ -22,6 +22,8 @@
#include <xxhash.h>
+#include <limits>
+
#if ZEN_PLATFORM_WINDOWS
# include <zencore/windows.h>
#endif
@@ -574,7 +576,8 @@ ZenCacheMemoryLayer::CacheBucket::Scrub(ScrubContext& Ctx)
for (auto& Kv : m_CacheMap)
{
- if (!ValidateEntry(Kv.first, Kv.second.Payload.GetContentType(), Kv.second.Payload))
+ const BucketPayload& Payload = m_Payloads[Kv.second];
+ if (!ValidateEntry(Kv.first, Payload.Payload.GetContentType(), Payload.Payload))
{
BadHashes.push_back(Kv.first);
}
@@ -590,8 +593,8 @@ void
ZenCacheMemoryLayer::CacheBucket::GatherAccessTimes(std::vector<zen::access_tracking::KeyAccessTime>& AccessTimes)
{
RwLock::SharedLockScope _(m_BucketLock);
- std::transform(m_CacheMap.begin(), m_CacheMap.end(), std::back_inserter(AccessTimes), [](const auto& Kv) {
- return access_tracking::KeyAccessTime{.Key = Kv.first, .LastAccess = Kv.second.LastAccess};
+ std::transform(m_CacheMap.begin(), m_CacheMap.end(), std::back_inserter(AccessTimes), [this](const auto& Kv) {
+ return access_tracking::KeyAccessTime{.Key = Kv.first, .LastAccess = m_AccessTimes[Kv.second]};
});
}
@@ -602,10 +605,12 @@ ZenCacheMemoryLayer::CacheBucket::Get(const IoHash& HashKey, ZenCacheValue& OutV
if (auto It = m_CacheMap.find(HashKey); It != m_CacheMap.end())
{
- BucketValue& Value = It.value();
- OutValue.Value = Value.Payload;
+ uint32_t EntryIndex = It.value();
+ ZEN_ASSERT_SLOW(EntryIndex < m_Payloads.size());
+ ZEN_ASSERT_SLOW(m_AccessTimes.size() == m_Payloads.size());
- Value.LastAccess.store(GcClock::TickCount(), std::memory_order_relaxed);
+ OutValue.Value = m_Payloads[EntryIndex].Payload;
+ m_AccessTimes[EntryIndex] = GcClock::TickCount();
return true;
}
@@ -616,21 +621,48 @@ ZenCacheMemoryLayer::CacheBucket::Get(const IoHash& HashKey, ZenCacheValue& OutV
void
ZenCacheMemoryLayer::CacheBucket::Put(const IoHash& HashKey, const ZenCacheValue& Value)
{
- {
- BucketValue IndexValue(Value.Value, GcClock::TickCount());
+ size_t PayloadSize = Value.Value.GetSize();
+ IoHash RawHash = IoHash::Zero;
+ uint32_t RawSize = 0u;
+ // TODO: Temporary hack - should come from caller really as they likely already figured out at least rawhash (via attachment message)
+ // if (Value.Value.GetContentType() == ZenContentType::kCompressedBinary)
+ // {
+ // CompressedBuffer Compressed = CompressedBuffer::FromCompressedNoValidate(IoBuffer(Value.Value));
+ // RawHash = Compressed.DecodeRawHash();
+ // RawSize = gsl::narrow<uint32_t>(Compressed.DecodeRawSize());
+ // }
+ {
+ GcClock::Tick AccessTime = GcClock::TickCount();
RwLock::ExclusiveLockScope _(m_BucketLock);
+ if (m_CacheMap.size() == std::numeric_limits<uint32_t>::max())
+ {
+ // No more space in our memory cache!
+ return;
+ }
if (auto It = m_CacheMap.find(HashKey); It != m_CacheMap.end())
{
- m_TotalSize.fetch_sub(It->second.Payload.GetSize(), std::memory_order::relaxed);
- It.value() = std::move(IndexValue);
+ uint32_t EntryIndex = It.value();
+ ZEN_ASSERT_SLOW(EntryIndex < m_Payloads.size());
+
+ m_TotalSize.fetch_sub(PayloadSize, std::memory_order::relaxed);
+ BucketPayload& Payload = m_Payloads[EntryIndex];
+ Payload.Payload = Value.Value;
+ Payload.RawHash = RawHash;
+ Payload.RawSize = RawSize;
+ m_AccessTimes[EntryIndex] = AccessTime;
}
else
{
- m_CacheMap.insert_or_assign(HashKey, std::move(IndexValue));
+ uint32_t EntryIndex = gsl::narrow<uint32_t>(m_Payloads.size());
+ m_Payloads.emplace_back(BucketPayload{.Payload = Value.Value, .RawSize = RawSize, .RawHash = RawHash});
+ m_AccessTimes.emplace_back(AccessTime);
+ m_CacheMap.insert_or_assign(HashKey, EntryIndex);
}
+ ZEN_ASSERT_SLOW(m_Payloads.size() == m_CacheMap.size());
+ ZEN_ASSERT_SLOW(m_AccessTimes.size() == m_Payloads.size());
}
- m_TotalSize.fetch_add(Value.Value.GetSize(), std::memory_order::relaxed);
+ m_TotalSize.fetch_add(PayloadSize, std::memory_order::relaxed);
}
void
@@ -638,6 +670,8 @@ ZenCacheMemoryLayer::CacheBucket::Drop()
{
RwLock::ExclusiveLockScope _(m_BucketLock);
m_CacheMap.clear();
+ m_AccessTimes.clear();
+ m_Payloads.clear();
m_TotalSize.store(0);
}
@@ -705,7 +739,9 @@ ZenCacheDiskLayer::CacheBucket::OpenOrCreate(std::filesystem::path BucketDir, bo
if (auto It = m_Index.find(Key); It != m_Index.end())
{
- It.value().LastAccess.store(Obj["LastAccess"sv].AsInt64(), std::memory_order_relaxed);
+ size_t EntryIndex = It.value();
+ ZEN_ASSERT_SLOW(EntryIndex < m_AccessTimes.size());
+ m_AccessTimes[EntryIndex] = Obj["LastAccess"sv].AsInt64();
}
}
@@ -759,7 +795,10 @@ ZenCacheDiskLayer::CacheBucket::MakeIndexSnapshot()
{
DiskIndexEntry& IndexEntry = Entries[EntryIndex++];
IndexEntry.Key = Entry.first;
- IndexEntry.Location = Entry.second.Location;
+ IndexEntry.Location = m_Payloads[Entry.second].Location;
+ // TODO: Update DiskIndexEntry
+ // IndexEntry.RawHash = m_Payloads[Entry.second].RawHash;
+ // IndexEntry.RawSize = m_Payloads[Entry.second].RawSize;
}
}
@@ -835,7 +874,13 @@ ZenCacheDiskLayer::CacheBucket::ReadIndexFile()
ZEN_WARN("skipping invalid entry in '{}', reason: '{}'", IndexPath, InvalidEntryReason);
continue;
}
- m_Index.insert_or_assign(Entry.Key, IndexEntry(Entry.Location, GcClock::TickCount()));
+ size_t EntryIndex = m_Payloads.size();
+ // TODO: Get from stored index or check payload to get the relevant info
+ IoHash RawHash = IoHash::Zero;
+ uint64_t RawSize = 0u;
+ m_Payloads.emplace_back(BucketPayload{.Location = Entry.Location, .RawSize = RawSize, .RawHash = RawHash});
+ m_AccessTimes.emplace_back(GcClock::TickCount());
+ m_Index.insert_or_assign(Entry.Key, EntryIndex);
}
return Header.LogPosition;
@@ -887,7 +932,13 @@ ZenCacheDiskLayer::CacheBucket::ReadLog(uint64_t SkipEntryCount)
++InvalidEntryCount;
return;
}
- m_Index.insert_or_assign(Record.Key, IndexEntry(Record.Location, GcClock::TickCount()));
+ size_t EntryIndex = m_Payloads.size();
+ // TODO: Get from stored index or check payload to get the relevant info
+ IoHash RawHash = IoHash::Zero;
+ uint64_t RawSize = 0u;
+ m_Payloads.emplace_back(BucketPayload{.Location = Record.Location, .RawSize = RawSize, .RawHash = RawHash});
+ m_AccessTimes.emplace_back(GcClock::TickCount());
+ m_Index.insert_or_assign(Record.Key, EntryIndex);
},
SkipEntryCount);
if (InvalidEntryCount)
@@ -930,7 +981,8 @@ ZenCacheDiskLayer::CacheBucket::OpenLog(const fs::path& BucketDir, const bool Is
KnownLocations.reserve(m_Index.size());
for (const auto& Entry : m_Index)
{
- const DiskLocation& Location = Entry.second.Location;
+ size_t EntryIndex = Entry.second;
+ const DiskLocation& Location = m_Payloads[EntryIndex].Location;
if (Location.IsFlagSet(DiskLocation::kStandaloneFile))
{
m_TotalStandaloneSize.fetch_add(Location.Size(), std::memory_order::relaxed);
@@ -1009,9 +1061,10 @@ ZenCacheDiskLayer::CacheBucket::Get(const IoHash& HashKey, ZenCacheValue& OutVal
{
return false;
}
- IndexEntry& Entry = It.value();
- Entry.LastAccess.store(GcClock::TickCount(), std::memory_order_relaxed);
- DiskLocation Location = Entry.Location;
+ size_t EntryIndex = It.value();
+ BucketPayload Payload = m_Payloads[EntryIndex];
+ m_AccessTimes[EntryIndex] = GcClock::TickCount();
+ DiskLocation Location = Payload.Location;
if (Location.IsFlagSet(DiskLocation::kStandaloneFile))
{
// We don't need to hold the index lock when we read a standalone file
@@ -1075,12 +1128,12 @@ ZenCacheDiskLayer::CacheBucket::SaveManifest()
Writer.BeginArray("Timestamps"sv);
for (auto& Kv : m_Index)
{
- const IoHash& Key = Kv.first;
- const IndexEntry& Entry = Kv.second;
+ const IoHash& Key = Kv.first;
+ GcClock::Tick AccessTime = m_AccessTimes[Kv.second];
Writer.BeginObject();
Writer << "Key"sv << Key;
- Writer << "LastAccess"sv << Entry.LastAccess;
+ Writer << "LastAccess"sv << AccessTime;
Writer.EndObject();
}
Writer.EndArray();
@@ -1127,8 +1180,9 @@ ZenCacheDiskLayer::CacheBucket::Scrub(ScrubContext& Ctx)
for (auto& Kv : m_Index)
{
- const IoHash& HashKey = Kv.first;
- const DiskLocation& Loc = Kv.second.Location;
+ const IoHash& HashKey = Kv.first;
+ const BucketPayload& Payload = m_Payloads[Kv.second];
+ const DiskLocation& Loc = Payload.Location;
if (Loc.IsFlagSet(DiskLocation::kStandaloneFile))
{
@@ -1189,7 +1243,8 @@ ZenCacheDiskLayer::CacheBucket::Scrub(ScrubContext& Ctx)
BadKeys.push_back(Hash);
return;
}
- ZenContentType ContentType = m_Index.at(Hash).Location.GetContentType();
+ const BucketPayload& Payload = m_Payloads[m_Index.at(Hash)];
+ ZenContentType ContentType = Payload.Location.GetContentType();
if (!ValidateEntry(Hash, ContentType, Buffer))
{
BadKeys.push_back(Hash);
@@ -1208,7 +1263,8 @@ ZenCacheDiskLayer::CacheBucket::Scrub(ScrubContext& Ctx)
BadKeys.push_back(Hash);
return;
}
- ZenContentType ContentType = m_Index.at(Hash).Location.GetContentType();
+ const BucketPayload& Payload = m_Payloads[m_Index.at(Hash)];
+ ZenContentType ContentType = Payload.Location.GetContentType();
if (!ValidateEntry(Hash, ContentType, Buffer))
{
BadKeys.push_back(Hash);
@@ -1239,7 +1295,7 @@ ZenCacheDiskLayer::CacheBucket::Scrub(ScrubContext& Ctx)
{
// Log a tombstone and delete the in-memory index for the bad entry
const auto It = m_Index.find(BadKey);
- DiskLocation Location = It->second.Location;
+ DiskLocation Location = m_Payloads[It->second].Location;
Location.Flags |= DiskLocation::kTombStone;
LogEntries.push_back(DiskIndexEntry{.Key = BadKey, .Location = Location});
m_Index.erase(BadKey);
@@ -1263,6 +1319,31 @@ ZenCacheDiskLayer::CacheBucket::Scrub(ScrubContext& Ctx)
}
}
m_SlogFile.Append(LogEntries);
+
+ // Clean up m_AccessTimes and m_Payloads vectors
+ {
+ std::vector<BucketPayload> Payloads;
+ std::vector<AccessTime> AccessTimes;
+ IndexMap Index;
+
+ {
+ RwLock::ExclusiveLockScope __(m_IndexLock);
+ size_t EntryCount = m_Index.size();
+ Payloads.reserve(EntryCount);
+ AccessTimes.reserve(EntryCount);
+ Index.reserve(EntryCount);
+ for (auto It : m_Index)
+ {
+ size_t EntryIndex = Payloads.size();
+ Payloads.push_back(m_Payloads[EntryIndex]);
+ AccessTimes.push_back(m_AccessTimes[EntryIndex]);
+ Index.insert({It.first, EntryIndex});
+ }
+ m_Index.swap(Index);
+ m_Payloads.swap(Payloads);
+ m_AccessTimes.swap(AccessTimes);
+ }
+ }
}
}
@@ -1319,14 +1400,15 @@ ZenCacheDiskLayer::CacheBucket::GatherReferences(GcContext& GcCtx)
for (const auto& Entry : Index)
{
- const IoHash& Key = Entry.first;
- if (Entry.second.LastAccess < ExpireTicks)
+ const IoHash& Key = Entry.first;
+ GcClock::Tick AccessTime = m_AccessTimes[Entry.second];
+ if (AccessTime < ExpireTicks)
{
ExpiredKeys.push_back(Key);
continue;
}
- const DiskLocation& Loc = Entry.second.Location;
+ const DiskLocation& Loc = m_Payloads[Entry.second].Location;
if (Loc.IsFlagSet(DiskLocation::kStructured))
{
@@ -1387,8 +1469,8 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx)
uint64_t DeletedSize = 0;
uint64_t OldTotalSize = TotalSize();
- uint64_t DeletedCount = 0;
- uint64_t MovedCount = 0;
+ std::unordered_set<IoHash> DeletedChunks;
+ uint64_t MovedCount = 0;
const auto _ = MakeGuard([&] {
ZEN_DEBUG(
@@ -1403,7 +1485,7 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx)
NiceLatencyNs(ReadBlockTimeUs),
NiceLatencyNs(ReadBlockLongestTimeUs),
NiceBytes(DeletedSize),
- DeletedCount,
+ DeletedChunks.size(),
MovedCount,
TotalChunkCount,
NiceBytes(OldTotalSize));
@@ -1429,6 +1511,41 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx)
return;
}
+ auto __ = MakeGuard([&]() {
+ if (!DeletedChunks.empty())
+ {
+ // Clean up m_AccessTimes and m_Payloads vectors
+ std::vector<BucketPayload> Payloads;
+ std::vector<AccessTime> AccessTimes;
+ IndexMap Index;
+
+ {
+ RwLock::ExclusiveLockScope _(m_IndexLock);
+ Stopwatch Timer;
+ const auto ___ = MakeGuard([&] {
+ uint64_t ElapsedUs = Timer.GetElapsedTimeUs();
+ WriteBlockTimeUs += ElapsedUs;
+ WriteBlockLongestTimeUs = std::max(ElapsedUs, WriteBlockLongestTimeUs);
+ });
+ size_t EntryCount = m_Index.size();
+ Payloads.reserve(EntryCount);
+ AccessTimes.reserve(EntryCount);
+ Index.reserve(EntryCount);
+ for (auto It : m_Index)
+ {
+ size_t EntryIndex = Payloads.size();
+ Payloads.push_back(m_Payloads[EntryIndex]);
+ AccessTimes.push_back(m_AccessTimes[EntryIndex]);
+ Index.insert({It.first, EntryIndex});
+ }
+ m_Index.swap(Index);
+ m_Payloads.swap(Payloads);
+ m_AccessTimes.swap(AccessTimes);
+ }
+ GcCtx.AddDeletedCids(std::vector<IoHash>(DeletedChunks.begin(), DeletedChunks.end()));
+ }
+ });
+
std::vector<DiskIndexEntry> ExpiredStandaloneEntries;
IndexMap Index;
BlockStore::ReclaimSnapshotState BlockStoreState;
@@ -1454,7 +1571,7 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx)
{
if (auto It = Index.find(Key); It != Index.end())
{
- DiskIndexEntry Entry = {.Key = It->first, .Location = It->second.Location};
+ DiskIndexEntry Entry = {.Key = It->first, .Location = m_Payloads[It->second].Location};
if (Entry.Location.Flags & DiskLocation::kStandaloneFile)
{
Entry.Location.Flags |= DiskLocation::kTombStone;
@@ -1468,6 +1585,7 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx)
{
m_Index.erase(Entry.Key);
m_TotalStandaloneSize.fetch_sub(Entry.Location.Size(), std::memory_order::relaxed);
+ DeletedChunks.insert(Entry.Key);
}
m_SlogFile.Append(ExpiredStandaloneEntries);
}
@@ -1530,12 +1648,18 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx)
continue;
}
m_SlogFile.Append(DiskIndexEntry{.Key = Key, .Location = RestoreLocation});
- m_Index.insert({Key, {Loc, GcClock::TickCount()}});
+ size_t EntryIndex = m_Payloads.size();
+ // TODO: Get from stored index or check payload to get the relevant info
+ IoHash RawHash = IoHash::Zero;
+ uint64_t RawSize = 0u;
+ m_Payloads.emplace_back(BucketPayload{.Location = RestoreLocation, .RawSize = RawSize, .RawHash = RawHash});
+ m_AccessTimes.emplace_back(GcClock::TickCount());
+ m_Index.insert({Key, EntryIndex});
m_TotalStandaloneSize.fetch_add(RestoreLocation.Size(), std::memory_order::relaxed);
+ DeletedChunks.erase(Key);
continue;
}
DeletedSize += Entry.Location.Size();
- DeletedCount++;
}
}
@@ -1545,7 +1669,7 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx)
TotalChunkHashes.reserve(TotalChunkCount);
for (const auto& Entry : Index)
{
- const DiskLocation& Location = Entry.second.Location;
+ const DiskLocation& Location = m_Payloads[Entry.second].Location;
if (Location.Flags & DiskLocation::kStandaloneFile)
{
@@ -1569,7 +1693,7 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx)
GcCtx.FilterCids(TotalChunkHashes, [&](const IoHash& ChunkHash, bool Keep) {
auto KeyIt = Index.find(ChunkHash);
- const DiskLocation& DiskLocation = KeyIt->second.Location;
+ const DiskLocation& DiskLocation = m_Payloads[KeyIt->second].Location;
BlockStoreLocation Location = DiskLocation.GetBlockLocation(m_PayloadAlignment);
size_t ChunkIndex = ChunkLocations.size();
ChunkLocations.push_back(Location);
@@ -1596,7 +1720,6 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx)
return;
}
- std::vector<IoHash> DeletedChunks;
m_BlockStore.ReclaimSpace(
BlockStoreState,
ChunkLocations,
@@ -1611,19 +1734,19 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx)
size_t ChunkIndex = Entry.first;
const BlockStoreLocation& NewLocation = Entry.second;
const IoHash& ChunkHash = ChunkIndexToChunkHash[ChunkIndex];
- const DiskLocation& OldDiskLocation = Index[ChunkHash].Location;
+ const DiskLocation& OldDiskLocation = m_Payloads[Index[ChunkHash]].Location;
LogEntries.push_back(
{.Key = ChunkHash, .Location = DiskLocation(NewLocation, m_PayloadAlignment, OldDiskLocation.GetFlags())});
}
for (const size_t ChunkIndex : RemovedChunks)
{
const IoHash& ChunkHash = ChunkIndexToChunkHash[ChunkIndex];
- const DiskLocation& OldDiskLocation = Index[ChunkHash].Location;
+ const DiskLocation& OldDiskLocation = m_Payloads[Index[ChunkHash]].Location;
LogEntries.push_back({.Key = ChunkHash,
.Location = DiskLocation(OldDiskLocation.GetBlockLocation(m_PayloadAlignment),
m_PayloadAlignment,
OldDiskLocation.GetFlags() | DiskLocation::kTombStone)});
- DeletedChunks.push_back(ChunkHash);
+ DeletedChunks.insert(ChunkHash);
}
m_SlogFile.Append(LogEntries);
@@ -1643,13 +1766,11 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx)
m_Index.erase(Entry.Key);
continue;
}
- m_Index[Entry.Key].Location = Entry.Location;
+ m_Payloads[m_Index[Entry.Key]].Location = Entry.Location;
}
}
},
[&]() { return GcCtx.CollectSmallObjects(); });
-
- GcCtx.AddDeletedCids(DeletedChunks);
}
void
@@ -1661,8 +1782,9 @@ ZenCacheDiskLayer::CacheBucket::UpdateAccessTimes(const std::vector<zen::access_
{
if (auto It = m_Index.find(KeyTime.Key); It != m_Index.end())
{
- IndexEntry& Entry = It.value();
- Entry.LastAccess.store(KeyTime.LastAccess, std::memory_order_relaxed);
+ size_t EntryIndex = It.value();
+ ZEN_ASSERT_SLOW(EntryIndex < m_AccessTimes.size());
+ m_AccessTimes[EntryIndex] = KeyTime.LastAccess;
}
}
}
@@ -1807,19 +1929,27 @@ ZenCacheDiskLayer::CacheBucket::PutStandaloneCacheValue(const IoHash& HashKey, c
}
DiskLocation Loc(NewFileSize, EntryFlags);
- IndexEntry Entry = IndexEntry(Loc, GcClock::TickCount());
+ // TODO: Get from caller input
+ IoHash RawHash = IoHash::Zero;
+ size_t RawSize = 0u;
RwLock::ExclusiveLockScope _(m_IndexLock);
if (auto It = m_Index.find(HashKey); It == m_Index.end())
{
// Previously unknown object
- m_Index.insert_or_assign(HashKey, std::move(Entry));
+ size_t EntryIndex = m_Payloads.size();
+ m_Payloads.emplace_back(BucketPayload{.Location = Loc, .RawSize = RawSize, .RawHash = RawHash});
+ m_AccessTimes.emplace_back(GcClock::TickCount());
+ m_Index.insert_or_assign(HashKey, EntryIndex);
}
else
{
// TODO: should check if write is idempotent and bail out if it is?
- m_TotalStandaloneSize.fetch_sub(It.value().Location.Size(), std::memory_order::relaxed);
- It.value() = std::move(Entry);
+ size_t EntryIndex = It.value();
+ ZEN_ASSERT_SLOW(EntryIndex < m_AccessTimes.size());
+ m_Payloads[EntryIndex] = BucketPayload{.Location = Loc, .RawSize = RawSize, .RawHash = RawHash};
+ m_AccessTimes.emplace_back(GcClock::TickCount());
+ m_TotalStandaloneSize.fetch_sub(Loc.Size(), std::memory_order::relaxed);
}
m_SlogFile.Append({.Key = HashKey, .Location = Loc});
@@ -1844,19 +1974,26 @@ ZenCacheDiskLayer::CacheBucket::PutInlineCacheValue(const IoHash& HashKey, const
DiskLocation Location(BlockStoreLocation, m_PayloadAlignment, EntryFlags);
const DiskIndexEntry DiskIndexEntry{.Key = HashKey, .Location = Location};
m_SlogFile.Append(DiskIndexEntry);
+ // TODO: Get from caller input
+ IoHash RawHash = IoHash::Zero;
+ uint64_t RawSize = 0u;
RwLock::ExclusiveLockScope _(m_IndexLock);
if (auto It = m_Index.find(HashKey); It != m_Index.end())
{
// TODO: should check if write is idempotent and bail out if it is?
// this would requiring comparing contents on disk unless we add a
// content hash to the index entry
- IndexEntry& Entry = It.value();
- Entry.Location = Location;
- Entry.LastAccess.store(GcClock::TickCount(), std::memory_order_relaxed);
+ size_t EntryIndex = It.value();
+ ZEN_ASSERT_SLOW(EntryIndex < m_AccessTimes.size());
+ m_Payloads[EntryIndex] = (BucketPayload{.Location = Location, .RawSize = RawSize, .RawHash = RawHash});
+ m_AccessTimes[EntryIndex] = GcClock::TickCount();
}
else
{
- m_Index.insert({HashKey, {Location, GcClock::TickCount()}});
+ size_t EntryIndex = m_Payloads.size();
+ m_Payloads.emplace_back(BucketPayload{.Location = Location, .RawSize = RawSize, .RawHash = RawHash});
+ m_AccessTimes.emplace_back(GcClock::TickCount());
+ m_Index.insert_or_assign(HashKey, EntryIndex);
}
});
}
diff --git a/zenserver/cache/structuredcachestore.h b/zenserver/cache/structuredcachestore.h
index e6e9942bb..3d2896ff8 100644
--- a/zenserver/cache/structuredcachestore.h
+++ b/zenserver/cache/structuredcachestore.h
@@ -132,13 +132,48 @@ struct DiskIndexEntry
static_assert(sizeof(DiskIndexEntry) == 32);
+// This store the access time as seconds since epoch internally in a 32-bit value giving is a range of 136 years since epoch
+struct AccessTime
+{
+ explicit AccessTime(GcClock::Tick Tick) noexcept : SecondsSinceEpoch(ToSeconds(Tick)) {}
+ AccessTime& operator=(GcClock::Tick Tick) noexcept
+ {
+ SecondsSinceEpoch.store(ToSeconds(Tick), std::memory_order_relaxed);
+ return *this;
+ }
+ operator GcClock::Tick() const noexcept
+ {
+ return std::chrono::duration_cast<GcClock::Duration>(std::chrono::seconds(SecondsSinceEpoch.load(std::memory_order_relaxed)))
+ .count();
+ }
+
+ AccessTime(AccessTime&& Rhs) noexcept : SecondsSinceEpoch(Rhs.SecondsSinceEpoch.load(std::memory_order_relaxed)) {}
+ AccessTime(const AccessTime& Rhs) noexcept : SecondsSinceEpoch(Rhs.SecondsSinceEpoch.load(std::memory_order_relaxed)) {}
+ AccessTime& operator=(AccessTime&& Rhs) noexcept
+ {
+ SecondsSinceEpoch.store(Rhs.SecondsSinceEpoch.load(std::memory_order_relaxed), std::memory_order_relaxed);
+ return *this;
+ }
+ AccessTime& operator=(const AccessTime& Rhs) noexcept
+ {
+ SecondsSinceEpoch.store(Rhs.SecondsSinceEpoch.load(std::memory_order_relaxed), std::memory_order_relaxed);
+ return *this;
+ }
+
+private:
+ static uint32_t ToSeconds(GcClock::Tick Tick)
+ {
+ return gsl::narrow<uint32_t>(std::chrono::duration_cast<std::chrono::seconds>(GcClock::Duration(Tick)).count());
+ }
+ std::atomic_uint32_t SecondsSinceEpoch;
+};
+
/** In-memory cache storage
Intended for small values which are frequently accessed
This should have a better memory management policy to maintain reasonable
footprint.
-
*/
class ZenCacheMemoryLayer
{
@@ -184,28 +219,24 @@ public:
private:
struct CacheBucket
{
- struct BucketValue
+#pragma pack(push)
+#pragma pack(1)
+ struct BucketPayload
{
- IoBuffer Payload;
- std::atomic_int64_t LastAccess;
-
- BucketValue() : Payload(), LastAccess() {}
- BucketValue(IoBuffer Value, const int64_t Timestamp) : Payload(Value), LastAccess(Timestamp) {}
- BucketValue(const BucketValue& V) : Payload(V.Payload), LastAccess(V.LastAccess.load(std::memory_order_relaxed)) {}
- BucketValue(BucketValue&& V) : Payload(std::move(V.Payload)), LastAccess(V.LastAccess.load(std::memory_order_relaxed)) {}
-
- BucketValue& operator=(const BucketValue& V) { return *this = BucketValue(V); }
- BucketValue& operator=(BucketValue&& V)
- {
- Payload = std::move(V.Payload);
- LastAccess.store(V.LastAccess.load(), std::memory_order_relaxed);
- return *this;
- }
+ IoBuffer Payload; // 8
+ uint32_t RawSize; // 4
+ IoHash RawHash; // 20
};
+#pragma pack(pop)
+ static_assert(sizeof(BucketPayload) == 32u);
+ static_assert(sizeof(AccessTime) == 4u);
+
+ mutable RwLock m_BucketLock;
+ std::vector<AccessTime> m_AccessTimes;
+ std::vector<BucketPayload> m_Payloads;
+ tsl::robin_map<IoHash, uint32_t> m_CacheMap;
- mutable RwLock m_BucketLock;
- tsl::robin_map<IoHash, BucketValue> m_CacheMap;
- std::atomic_uint64_t m_TotalSize{};
+ std::atomic_uint64_t m_TotalSize{};
bool Get(const IoHash& HashKey, ZenCacheValue& OutValue);
void Put(const IoHash& HashKey, const ZenCacheValue& Value);
@@ -259,7 +290,6 @@ public:
void GatherReferences(GcContext& GcCtx);
void CollectGarbage(GcContext& GcCtx);
void UpdateAccessTimes(const zen::access_tracking::AccessTimes& AccessTimes);
- // void IterateBuckets(const std::function<void(std::string_view Bucket)>& Callback) const;
void DiscoverBuckets();
uint64_t TotalSize() const;
@@ -305,31 +335,24 @@ private:
TCasLogFile<DiskIndexEntry> m_SlogFile;
uint64_t m_LogFlushPosition = 0;
- struct IndexEntry
+#pragma pack(push)
+#pragma pack(1)
+ struct BucketPayload
{
- DiskLocation Location;
- std::atomic_int64_t LastAccess;
-
- IndexEntry() : Location(), LastAccess() {}
- IndexEntry(const DiskLocation& Loc, const int64_t Timestamp) : Location(Loc), LastAccess(Timestamp) {}
- IndexEntry(const IndexEntry& E) : Location(E.Location), LastAccess(E.LastAccess.load(std::memory_order_relaxed)) {}
- IndexEntry(IndexEntry&& E) noexcept : Location(std::move(E.Location)), LastAccess(E.LastAccess.load(std::memory_order_relaxed))
- {
- }
-
- IndexEntry& operator=(const IndexEntry& E) { return *this = IndexEntry(E); }
- IndexEntry& operator=(IndexEntry&& E) noexcept
- {
- Location = std::move(E.Location);
- LastAccess.store(E.LastAccess.load(), std::memory_order_relaxed);
- return *this;
- }
+ DiskLocation Location; // 12
+ uint64_t RawSize; // 8
+ IoHash RawHash; // 20
};
+#pragma pack(pop)
+ static_assert(sizeof(BucketPayload) == 40u);
+ static_assert(sizeof(AccessTime) == 4u);
- using IndexMap = tsl::robin_map<IoHash, IndexEntry, IoHash::Hasher>;
+ using IndexMap = tsl::robin_map<IoHash, size_t, IoHash::Hasher>;
- mutable RwLock m_IndexLock;
- IndexMap m_Index;
+ mutable RwLock m_IndexLock;
+ std::vector<AccessTime> m_AccessTimes;
+ std::vector<BucketPayload> m_Payloads;
+ IndexMap m_Index;
std::atomic_uint64_t m_TotalStandaloneSize{};