From c6cce91a514ba747b19f4fe8acfd2443405c960d Mon Sep 17 00:00:00 2001 From: Dan Engelbrecht Date: Mon, 11 Dec 2023 06:36:48 -0500 Subject: mem cache perf improvements (#592) - Improvement: Refactor memory cache for faster trimming and correct trim reporting - Improvement: Added trace scopes for memory cache trimming Adding a link back to the cache item payload on the memory cache item allows us to iterate over only the items cached in memory instead of over the entire index. This also allows us to do efficient compact of the memory cache array when trimming. It adds 4 bytes of overhead to each item cached in memory. --- src/zenserver/cache/cachedisklayer.cpp | 236 ++++++++++++++++++--------------- 1 file changed, 132 insertions(+), 104 deletions(-) (limited to 'src/zenserver/cache/cachedisklayer.cpp') diff --git a/src/zenserver/cache/cachedisklayer.cpp b/src/zenserver/cache/cachedisklayer.cpp index 13f3c9e58..0987cd0f1 100644 --- a/src/zenserver/cache/cachedisklayer.cpp +++ b/src/zenserver/cache/cachedisklayer.cpp @@ -209,9 +209,6 @@ namespace { zen::Sleep(100); } while (true); } - - uint64_t EstimateMemCachePayloadMemory(uint64_t PayloadSize) { return 8u + 32u + RoundUp(PayloadSize, 8u); } - } // namespace namespace fs = std::filesystem; @@ -1189,7 +1186,7 @@ ZenCacheDiskLayer::CacheBucket::Get(const IoHash& HashKey, ZenCacheValue& OutVal return false; } - size_t EntryIndex = It.value(); + PayloadIndex EntryIndex = It.value(); m_AccessTimes[EntryIndex] = GcClock::TickCount(); DiskLocation Location = m_Payloads[EntryIndex].Location; @@ -1206,7 +1203,7 @@ ZenCacheDiskLayer::CacheBucket::Get(const IoHash& HashKey, ZenCacheValue& OutVal if (Payload->MemCached) { - OutValue.Value = m_MemCachedPayloads[Payload->MemCached]; + OutValue.Value = m_MemCachedPayloads[Payload->MemCached].Payload; Payload = nullptr; IndexLock.ReleaseNow(); m_MemoryHitCount++; @@ -1240,7 +1237,7 @@ ZenCacheDiskLayer::CacheBucket::Get(const IoHash& HashKey, ZenCacheValue& OutVal // Only update if it has not already been updated by other thread if (!WritePayload.MemCached) { - SetMemCachedData(UpdateIndexLock, WritePayload, OutValue.Value); + SetMemCachedData(UpdateIndexLock, UpdateIt->second, OutValue.Value); } } } @@ -1307,64 +1304,84 @@ ZenCacheDiskLayer::CacheBucket::Put(const IoHash& HashKey, const ZenCacheValue& m_DiskWriteCount++; } -void +uint64_t ZenCacheDiskLayer::CacheBucket::MemCacheTrim(GcClock::TimePoint ExpireTime) { + ZEN_TRACE_CPU("Z$::Disk::Bucket::MemCacheTrim"); + + uint64_t Trimmed = 0; GcClock::Tick ExpireTicks = ExpireTime.time_since_epoch().count(); RwLock::ExclusiveLockScope IndexLock(m_IndexLock); - if (m_MemCachedPayloads.empty()) + uint32_t MemCachedCount = gsl::narrow(m_MemCachedPayloads.size()); + if (MemCachedCount == 0) { - return; + return 0; } - for (const auto& Kv : m_Index) + + uint32_t WriteIndex = 0; + for (uint32_t ReadIndex = 0; ReadIndex < MemCachedCount; ++ReadIndex) { - size_t Index = Kv.second; - BucketPayload& Payload = m_Payloads[Index]; - if (!Payload.MemCached) + MemCacheData& Data = m_MemCachedPayloads[ReadIndex]; + if (!Data.Payload) { continue; } - if (m_AccessTimes[Index] < ExpireTicks) + PayloadIndex Index = Data.OwnerIndex; + ZEN_ASSERT_SLOW(m_Payloads[Index].MemCached == MemCachedIndex(ReadIndex)); + GcClock::Tick AccessTime = m_AccessTimes[Index]; + if (AccessTime < ExpireTicks) + { + size_t PayloadSize = Data.Payload.GetSize(); + RemoveMemCacheUsage(EstimateMemCachePayloadMemory(PayloadSize)); + Data = {}; + m_Payloads[Index].MemCached = {}; + Trimmed += PayloadSize; + continue; + } + if (ReadIndex > WriteIndex) { - RemoveMemCachedData(IndexLock, Payload); + m_MemCachedPayloads[WriteIndex] = MemCacheData{.Payload = std::move(Data.Payload), .OwnerIndex = Index}; + m_Payloads[Index].MemCached = MemCachedIndex(WriteIndex); } + WriteIndex++; } + m_MemCachedPayloads.resize(WriteIndex); m_MemCachedPayloads.shrink_to_fit(); - m_FreeMemCachedPayloads.shrink_to_fit(); - m_FreeMetaDatas.shrink_to_fit(); + zen::Reset(m_FreeMemCachedPayloads); + return Trimmed; } void -ZenCacheDiskLayer::CacheBucket::GetUsageByAccess(GcClock::TimePoint TickStart, - GcClock::Duration SectionLength, - std::vector& InOutUsageSlots) +ZenCacheDiskLayer::CacheBucket::GetUsageByAccess(GcClock::TimePoint Now, GcClock::Duration MaxAge, std::vector& InOutUsageSlots) { + ZEN_TRACE_CPU("Z$::Disk::Bucket::GetUsageByAccess"); + + size_t SlotCount = InOutUsageSlots.capacity(); RwLock::SharedLockScope _(m_IndexLock); - if (m_MemCachedPayloads.empty()) + uint32_t MemCachedCount = gsl::narrow(m_MemCachedPayloads.size()); + if (MemCachedCount == 0) { return; } - for (const auto& It : m_Index) + for (uint32_t ReadIndex = 0; ReadIndex < MemCachedCount; ++ReadIndex) { - size_t Index = It.second; - BucketPayload& Payload = m_Payloads[Index]; - if (!Payload.MemCached) + MemCacheData& Data = m_MemCachedPayloads[ReadIndex]; + if (!Data.Payload) { continue; } + PayloadIndex Index = Data.OwnerIndex; + ZEN_ASSERT_SLOW(m_Payloads[Index].MemCached == MemCachedIndex(ReadIndex)); GcClock::TimePoint ItemAccessTime = GcClock::TimePointFromTick(GcClock::Tick(m_AccessTimes[Index])); - GcClock::Duration Age = TickStart.time_since_epoch() - ItemAccessTime.time_since_epoch(); - uint64_t Slot = gsl::narrow(Age.count() > 0 ? Age.count() / SectionLength.count() : 0); - if (Slot >= InOutUsageSlots.capacity()) + GcClock::Duration Age = Now > ItemAccessTime ? Now - ItemAccessTime : GcClock::Duration(0); + size_t Slot = Age < MaxAge ? gsl::narrow((Age.count() * SlotCount) / MaxAge.count()) : (SlotCount - 1); + ZEN_ASSERT_SLOW(Slot < SlotCount); + if (Slot >= InOutUsageSlots.size()) { - Slot = InOutUsageSlots.capacity() - 1; + InOutUsageSlots.resize(Slot + 1, 0); } - if (Slot > InOutUsageSlots.size()) - { - InOutUsageSlots.resize(uint64_t(Slot + 1), 0); - } - InOutUsageSlots[Slot] += m_MemCachedPayloads[Payload.MemCached].GetSize(); + InOutUsageSlots[Slot] += EstimateMemCachePayloadMemory(Data.Payload.GetSize()); } } @@ -1823,7 +1840,7 @@ ZenCacheDiskLayer::CacheBucket::ScrubStorage(ScrubContext& Ctx) std::vector Payloads; std::vector AccessTimes; std::vector MetaDatas; - std::vector MemCachedPayloads; + std::vector MemCachedPayloads; std::vector FirstReferenceIndex; IndexMap Index; @@ -2002,7 +2019,7 @@ ZenCacheDiskLayer::CacheBucket::GatherReferences(GcContext& GcCtx) const BucketPayload& CachedPayload = Payloads[It->second]; if (CachedPayload.MemCached) { - Buffer = m_MemCachedPayloads[CachedPayload.MemCached]; + Buffer = m_MemCachedPayloads[CachedPayload.MemCached].Payload; ZEN_ASSERT_SLOW(Buffer); } else @@ -2124,7 +2141,7 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx) std::vector Payloads; std::vector AccessTimes; std::vector MetaDatas; - std::vector MemCachedPayloads; + std::vector MemCachedPayloads; std::vector FirstReferenceIndex; IndexMap Index; { @@ -2468,7 +2485,10 @@ ZenCacheDiskLayer::CollectGarbage(GcContext& GcCtx) { Bucket->CollectGarbage(GcCtx); } - MemCacheTrim(Buckets, GcCtx.CacheExpireTime()); + if (!m_IsMemCacheTrimming) + { + MemCacheTrim(Buckets, GcCtx.CacheExpireTime()); + } } void @@ -2671,16 +2691,17 @@ ZenCacheDiskLayer::CacheBucket::RemoveMetaData(RwLock::ExclusiveLockScope&, Buck } void -ZenCacheDiskLayer::CacheBucket::SetMemCachedData(RwLock::ExclusiveLockScope&, BucketPayload& Payload, IoBuffer& MemCachedData) +ZenCacheDiskLayer::CacheBucket::SetMemCachedData(RwLock::ExclusiveLockScope&, PayloadIndex PayloadIndex, IoBuffer& MemCachedData) { - uint64_t PayloadSize = MemCachedData.GetSize(); + BucketPayload& Payload = m_Payloads[PayloadIndex]; + uint64_t PayloadSize = MemCachedData.GetSize(); ZEN_ASSERT(PayloadSize != 0); if (m_FreeMemCachedPayloads.empty()) { if (m_MemCachedPayloads.size() != std::numeric_limits::max()) { Payload.MemCached = MemCachedIndex(gsl::narrow(m_MemCachedPayloads.size())); - m_MemCachedPayloads.push_back(MemCachedData); + m_MemCachedPayloads.emplace_back(MemCacheData{.Payload = MemCachedData, .OwnerIndex = PayloadIndex}); AddMemCacheUsage(EstimateMemCachePayloadMemory(PayloadSize)); m_MemoryWriteCount++; } @@ -2689,7 +2710,7 @@ ZenCacheDiskLayer::CacheBucket::SetMemCachedData(RwLock::ExclusiveLockScope&, Bu { Payload.MemCached = m_FreeMemCachedPayloads.back(); m_FreeMemCachedPayloads.pop_back(); - m_MemCachedPayloads[Payload.MemCached] = MemCachedData; + m_MemCachedPayloads[Payload.MemCached] = MemCacheData{.Payload = MemCachedData, .OwnerIndex = PayloadIndex}; AddMemCacheUsage(EstimateMemCachePayloadMemory(PayloadSize)); m_MemoryWriteCount++; } @@ -2700,9 +2721,9 @@ ZenCacheDiskLayer::CacheBucket::RemoveMemCachedData(RwLock::ExclusiveLockScope&, { if (Payload.MemCached) { - size_t PayloadSize = m_MemCachedPayloads[Payload.MemCached].GetSize(); + size_t PayloadSize = m_MemCachedPayloads[Payload.MemCached].Payload.GetSize(); RemoveMemCacheUsage(EstimateMemCachePayloadMemory(PayloadSize)); - m_MemCachedPayloads[Payload.MemCached] = IoBuffer{}; + m_MemCachedPayloads[Payload.MemCached] = {}; m_FreeMemCachedPayloads.push_back(Payload.MemCached); Payload.MemCached = {}; return PayloadSize; @@ -3117,7 +3138,7 @@ ZenCacheDiskLayer::CacheBucket::RemoveExpiredData(GcCtx& Ctx, GcStats& Stats) std::vector Payloads; std::vector AccessTimes; std::vector MetaDatas; - std::vector MemCachedPayloads; + std::vector MemCachedPayloads; std::vector FirstReferenceIndex; IndexMap Index; { @@ -3708,7 +3729,7 @@ ZenCacheDiskLayer::CacheBucket::CompactState(RwLock::ExclusiveLockScope&, std::vector& Payloads, std::vector& AccessTimes, std::vector& MetaDatas, - std::vector& MemCachedPayloads, + std::vector& MemCachedPayloads, std::vector& FirstReferenceIndex, IndexMap& Index, RwLock::ExclusiveLockScope& IndexLock) @@ -3738,7 +3759,8 @@ ZenCacheDiskLayer::CacheBucket::CompactState(RwLock::ExclusiveLockScope&, } if (Payload.MemCached) { - MemCachedPayloads.push_back(std::move(m_MemCachedPayloads[Payload.MemCached])); + MemCachedPayloads.emplace_back( + MemCacheData{.Payload = std::move(m_MemCachedPayloads[Payload.MemCached].Payload), .OwnerIndex = EntryIndex}); Payload.MemCached = MemCachedIndex(gsl::narrow(MemCachedPayloads.size() - 1)); } if (m_Configuration.EnableReferenceCaching) @@ -4216,17 +4238,8 @@ ZenCacheDiskLayer::MemCacheTrim() ZEN_TRACE_CPU("Z$::Disk::MemCacheTrim"); ZEN_ASSERT(m_Configuration.MemCacheTargetFootprintBytes != 0); - - const GcClock::TimePoint Now = GcClock::Now(); - - const GcClock::Tick NowTick = Now.time_since_epoch().count(); - const std::chrono::seconds TrimInterval = std::chrono::seconds(m_Configuration.MemCacheTrimIntervalSeconds); - GcClock::Tick LastTrimTick = m_LastTickMemCacheTrim; - const GcClock::Tick NextAllowedTrimTick = LastTrimTick + GcClock::Duration(TrimInterval).count(); - if (NowTick < NextAllowedTrimTick) - { - return; - } + ZEN_ASSERT(m_Configuration.MemCacheMaxAgeSeconds != 0); + ZEN_ASSERT(m_Configuration.MemCacheTrimIntervalSeconds != 0); bool Expected = false; if (!m_IsMemCacheTrimming.compare_exchange_strong(Expected, true)) @@ -4234,75 +4247,90 @@ ZenCacheDiskLayer::MemCacheTrim() return; } - // Bump time forward so we don't keep trying to do m_IsTrimming.compare_exchange_strong - const GcClock::Tick NextTrimTick = NowTick + GcClock::Duration(TrimInterval).count(); - m_LastTickMemCacheTrim.store(NextTrimTick); + try + { + m_JobQueue.QueueJob("ZenCacheDiskLayer::MemCacheTrim", [this](JobContext&) { + ZEN_TRACE_CPU("Z$::ZenCacheDiskLayer::MemCacheTrim [Async]"); + + const std::chrono::seconds TrimInterval = std::chrono::seconds(m_Configuration.MemCacheTrimIntervalSeconds); + uint64_t TrimmedSize = 0; + Stopwatch Timer; + const auto Guard = MakeGuard([&] { + ZEN_INFO("trimmed {} (remaining {}), from memory cache in {}", + NiceBytes(TrimmedSize), + NiceBytes(m_TotalMemCachedSize), + NiceTimeSpanMs(Timer.GetElapsedTimeMs())); + + const GcClock::Tick NowTick = GcClock::TickCount(); + const GcClock::Tick NextTrimTick = NowTick + GcClock::Duration(TrimInterval).count(); + m_NextAllowedTrimTick.store(NextTrimTick); + m_IsMemCacheTrimming.store(false); + }); - m_JobQueue.QueueJob("ZenCacheDiskLayer::MemCacheTrim", [this, Now, TrimInterval](JobContext&) { - ZEN_TRACE_CPU("Z$::ZenCacheDiskLayer::MemCacheTrim [Async]"); + const std::chrono::seconds MaxAge = std::chrono::seconds(m_Configuration.MemCacheMaxAgeSeconds); - uint64_t StartSize = m_TotalMemCachedSize.load(); - Stopwatch Timer; - const auto Guard = MakeGuard([&] { - uint64_t EndSize = m_TotalMemCachedSize.load(); - ZEN_INFO("trimmed {} (remaining {}), from memory cache in {}", - NiceBytes(StartSize > EndSize ? StartSize - EndSize : 0), - NiceBytes(m_TotalMemCachedSize), - NiceTimeSpanMs(Timer.GetElapsedTimeMs())); - m_IsMemCacheTrimming.store(false); - }); + static const size_t UsageSlotCount = 2048; + std::vector UsageSlots; + UsageSlots.reserve(UsageSlotCount); - const std::chrono::seconds MaxAge = std::chrono::seconds(m_Configuration.MemCacheMaxAgeSeconds); - - std::vector UsageSlots; - UsageSlots.reserve(std::chrono::seconds(MaxAge / TrimInterval).count()); + std::vector Buckets; + { + RwLock::SharedLockScope __(m_Lock); + Buckets.reserve(m_Buckets.size()); + for (auto& Kv : m_Buckets) + { + Buckets.push_back(Kv.second.get()); + } + } - std::vector Buckets; - { - RwLock::SharedLockScope __(m_Lock); - Buckets.reserve(m_Buckets.size()); - for (auto& Kv : m_Buckets) + const GcClock::TimePoint Now = GcClock::Now(); { - Buckets.push_back(Kv.second.get()); + ZEN_TRACE_CPU("Z$::ZenCacheDiskLayer::MemCacheTrim GetUsageByAccess"); + for (CacheBucket* Bucket : Buckets) + { + Bucket->GetUsageByAccess(Now, MaxAge, UsageSlots); + } } - } - for (CacheBucket* Bucket : Buckets) - { - Bucket->GetUsageByAccess(Now, GcClock::Duration(TrimInterval), UsageSlots); - } - uint64_t TotalSize = 0; - for (size_t Index = 0; Index < UsageSlots.size(); ++Index) - { - TotalSize += UsageSlots[Index]; - if (TotalSize >= m_Configuration.MemCacheTargetFootprintBytes) + uint64_t TotalSize = 0; + for (size_t Index = 0; Index < UsageSlots.size(); ++Index) { - GcClock::TimePoint ExpireTime = Now - (TrimInterval * Index); - MemCacheTrim(Buckets, ExpireTime); - break; + TotalSize += UsageSlots[Index]; + if (TotalSize >= m_Configuration.MemCacheTargetFootprintBytes) + { + GcClock::TimePoint ExpireTime = Now - ((GcClock::Duration(MaxAge) * Index) / UsageSlotCount); + TrimmedSize = MemCacheTrim(Buckets, ExpireTime); + break; + } } - } - }); + }); + } + catch (std::exception& Ex) + { + ZEN_ERROR("Failed scheduling ZenCacheDiskLayer::MemCacheTrim. Reason: '{}'", Ex.what()); + m_IsMemCacheTrimming.store(false); + } } -void +uint64_t ZenCacheDiskLayer::MemCacheTrim(std::vector& Buckets, GcClock::TimePoint ExpireTime) { if (m_Configuration.MemCacheTargetFootprintBytes == 0) { - return; + return 0; } - RwLock::SharedLockScope __(m_Lock); + uint64_t TrimmedSize = 0; for (CacheBucket* Bucket : Buckets) { - Bucket->MemCacheTrim(ExpireTime); + TrimmedSize += Bucket->MemCacheTrim(ExpireTime); } const GcClock::TimePoint Now = GcClock::Now(); const GcClock::Tick NowTick = Now.time_since_epoch().count(); const std::chrono::seconds TrimInterval = std::chrono::seconds(m_Configuration.MemCacheTrimIntervalSeconds); - GcClock::Tick LastTrimTick = m_LastTickMemCacheTrim; + GcClock::Tick LastTrimTick = m_NextAllowedTrimTick; const GcClock::Tick NextAllowedTrimTick = NowTick + GcClock::Duration(TrimInterval).count(); - m_LastTickMemCacheTrim.compare_exchange_strong(LastTrimTick, NextAllowedTrimTick); + m_NextAllowedTrimTick.compare_exchange_strong(LastTrimTick, NextAllowedTrimTick); + return TrimmedSize; } #if ZEN_WITH_TESTS -- cgit v1.2.3 From 16fd9ea89c7560216b654843400ab3d852b04e16 Mon Sep 17 00:00:00 2001 From: Dan Engelbrecht Date: Wed, 13 Dec 2023 09:25:05 -0500 Subject: improve trace (#606) * Adding some more trace scopes for better visiblity * Removed spammy trace scope when replaying oplogs * Remove "::Disk" from trace scopes - redundant now that we have merge disk and memory layers --- src/zenserver/cache/cachedisklayer.cpp | 87 +++++++++++++++++++++------------- 1 file changed, 53 insertions(+), 34 deletions(-) (limited to 'src/zenserver/cache/cachedisklayer.cpp') diff --git a/src/zenserver/cache/cachedisklayer.cpp b/src/zenserver/cache/cachedisklayer.cpp index 0987cd0f1..f1aab6093 100644 --- a/src/zenserver/cache/cachedisklayer.cpp +++ b/src/zenserver/cache/cachedisklayer.cpp @@ -504,6 +504,8 @@ BucketManifestSerializer::ReadSidecarFile(RwLock::ExclusiveLockScope& B std::vector& AccessTimes, std::vector& Payloads) { + ZEN_TRACE_CPU("Z$::ReadSidecarFile"); + ZEN_ASSERT(AccessTimes.size() == Payloads.size()); std::error_code Ec; @@ -590,6 +592,8 @@ BucketManifestSerializer::WriteSidecarFile(RwLock::SharedLockScope&, const std::vector& Payloads, const std::vector& MetaDatas) { + ZEN_TRACE_CPU("Z$::WriteSidecarFile"); + BucketMetaHeader Header; Header.EntryCount = m_ManifestEntryCount; Header.LogPosition = SnapshotLogPosition; @@ -698,7 +702,7 @@ ZenCacheDiskLayer::CacheBucket::OpenOrCreate(std::filesystem::path BucketDir, bo { using namespace std::literals; - ZEN_TRACE_CPU("Z$::Disk::Bucket::OpenOrCreate"); + ZEN_TRACE_CPU("Z$::Bucket::OpenOrCreate"); ZEN_ASSERT(m_IsFlushing.load()); // We want to take the lock here since we register as a GC referencer a construction @@ -765,7 +769,7 @@ ZenCacheDiskLayer::CacheBucket::OpenOrCreate(std::filesystem::path BucketDir, bo void ZenCacheDiskLayer::CacheBucket::WriteIndexSnapshotLocked(const std::function& ClaimDiskReserveFunc) { - ZEN_TRACE_CPU("Z$::Disk::Bucket::WriteIndexSnapshot"); + ZEN_TRACE_CPU("Z$::Bucket::WriteIndexSnapshot"); const uint64_t LogCount = m_SlogFile.GetLogCount(); if (m_LogFlushPosition == LogCount) @@ -875,7 +879,7 @@ ZenCacheDiskLayer::CacheBucket::WriteIndexSnapshotLocked(const std::function DataFilePath; BuildPath(DataFilePath, HashKey); @@ -1172,6 +1176,8 @@ ZenCacheDiskLayer::CacheBucket::GetStandaloneCacheValue(ZenContentType ContentTy bool ZenCacheDiskLayer::CacheBucket::Get(const IoHash& HashKey, ZenCacheValue& OutValue) { + ZEN_TRACE_CPU("Z$::Bucket::Get"); + metrics::RequestStats::Scope StatsScope(m_GetOps, 0); RwLock::SharedLockScope IndexLock(m_IndexLock); @@ -1228,7 +1234,7 @@ ZenCacheDiskLayer::CacheBucket::Get(const IoHash& HashKey, ZenCacheValue& OutVal size_t ValueSize = OutValue.Value.GetSize(); if (OutValue.Value && ValueSize <= m_Configuration.MemCacheSizeThreshold) { - ZEN_TRACE_CPU("Z$::Disk::Bucket::Get::MemCache"); + ZEN_TRACE_CPU("Z$::Bucket::Get::MemCache"); OutValue.Value = IoBufferBuilder::ReadFromFileMaybe(OutValue.Value); RwLock::ExclusiveLockScope UpdateIndexLock(m_IndexLock); if (auto UpdateIt = m_Index.find(HashKey); UpdateIt != m_Index.end()) @@ -1247,7 +1253,7 @@ ZenCacheDiskLayer::CacheBucket::Get(const IoHash& HashKey, ZenCacheValue& OutVal if (FillRawHashAndRawSize) { - ZEN_TRACE_CPU("Z$::Disk::Bucket::Get::MetaData"); + ZEN_TRACE_CPU("Z$::Bucket::Get::MetaData"); if (Location.IsFlagSet(DiskLocation::kCompressed)) { if (!CompressedBuffer::ValidateCompressedHeader(OutValue.Value, OutValue.RawHash, OutValue.RawSize)) @@ -1290,6 +1296,8 @@ ZenCacheDiskLayer::CacheBucket::Get(const IoHash& HashKey, ZenCacheValue& OutVal void ZenCacheDiskLayer::CacheBucket::Put(const IoHash& HashKey, const ZenCacheValue& Value, std::span References) { + ZEN_TRACE_CPU("Z$::Bucket::Put"); + metrics::RequestStats::Scope $(m_PutOps, Value.Value.Size()); if (Value.Value.Size() >= m_Configuration.LargeObjectThreshold) @@ -1307,7 +1315,7 @@ ZenCacheDiskLayer::CacheBucket::Put(const IoHash& HashKey, const ZenCacheValue& uint64_t ZenCacheDiskLayer::CacheBucket::MemCacheTrim(GcClock::TimePoint ExpireTime) { - ZEN_TRACE_CPU("Z$::Disk::Bucket::MemCacheTrim"); + ZEN_TRACE_CPU("Z$::Bucket::MemCacheTrim"); uint64_t Trimmed = 0; GcClock::Tick ExpireTicks = ExpireTime.time_since_epoch().count(); @@ -1355,7 +1363,7 @@ ZenCacheDiskLayer::CacheBucket::MemCacheTrim(GcClock::TimePoint ExpireTime) void ZenCacheDiskLayer::CacheBucket::GetUsageByAccess(GcClock::TimePoint Now, GcClock::Duration MaxAge, std::vector& InOutUsageSlots) { - ZEN_TRACE_CPU("Z$::Disk::Bucket::GetUsageByAccess"); + ZEN_TRACE_CPU("Z$::Bucket::GetUsageByAccess"); size_t SlotCount = InOutUsageSlots.capacity(); RwLock::SharedLockScope _(m_IndexLock); @@ -1388,7 +1396,7 @@ ZenCacheDiskLayer::CacheBucket::GetUsageByAccess(GcClock::TimePoint Now, GcClock bool ZenCacheDiskLayer::CacheBucket::Drop() { - ZEN_TRACE_CPU("Z$::Disk::Bucket::Drop"); + ZEN_TRACE_CPU("Z$::Bucket::Drop"); RwLock::ExclusiveLockScope _(m_IndexLock); @@ -1424,7 +1432,7 @@ ZenCacheDiskLayer::CacheBucket::Drop() void ZenCacheDiskLayer::CacheBucket::Flush() { - ZEN_TRACE_CPU("Z$::Disk::Bucket::Flush"); + ZEN_TRACE_CPU("Z$::Bucket::Flush"); bool Expected = false; if (m_IsFlushing || !m_IsFlushing.compare_exchange_strong(Expected, true)) { @@ -1450,6 +1458,7 @@ ZenCacheDiskLayer::CacheBucket::Flush() void ZenCacheDiskLayer::CacheBucket::SaveSnapshot(const std::function& ClaimDiskReserveFunc) { + ZEN_TRACE_CPU("Z$::Bucket::SaveSnapshot"); try { bool UseLegacyScheme = false; @@ -1624,7 +1633,7 @@ ValidateCacheBucketEntryValue(ZenContentType ContentType, IoBuffer Buffer) void ZenCacheDiskLayer::CacheBucket::ScrubStorage(ScrubContext& Ctx) { - ZEN_TRACE_CPU("Z$::Disk::Bucket::Scrub"); + ZEN_TRACE_CPU("Z$::Bucket::Scrub"); ZEN_INFO("scrubbing '{}'", m_BucketDir); @@ -1864,7 +1873,7 @@ ZenCacheDiskLayer::CacheBucket::ScrubStorage(ScrubContext& Ctx) void ZenCacheDiskLayer::CacheBucket::GatherReferences(GcContext& GcCtx) { - ZEN_TRACE_CPU("Z$::Disk::Bucket::GatherReferences"); + ZEN_TRACE_CPU("Z$::Bucket::GatherReferences"); #define CALCULATE_BLOCKING_TIME 0 @@ -2082,7 +2091,7 @@ ZenCacheDiskLayer::CacheBucket::GatherReferences(GcContext& GcCtx) void ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx) { - ZEN_TRACE_CPU("Z$::Disk::Bucket::CollectGarbage"); + ZEN_TRACE_CPU("Z$::Bucket::CollectGarbage"); ZEN_DEBUG("collecting garbage from '{}'", m_BucketDir); @@ -2182,7 +2191,7 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx) auto FlushingGuard = MakeGuard([&] { m_IsFlushing.store(false); }); { - ZEN_TRACE_CPU("Z$::Disk::Bucket::CollectGarbage::State"); + ZEN_TRACE_CPU("Z$::Bucket::CollectGarbage::State"); RwLock::SharedLockScope IndexLock(m_IndexLock); Stopwatch Timer; @@ -2230,7 +2239,7 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx) if (GcCtx.IsDeletionMode()) { - ZEN_TRACE_CPU("Z$::Disk::Bucket::CollectGarbage::Delete"); + ZEN_TRACE_CPU("Z$::Bucket::CollectGarbage::Delete"); ExtendablePathBuilder<256> Path; @@ -2470,7 +2479,7 @@ ZenCacheDiskLayer::CacheBucket::EnumerateBucketContents( void ZenCacheDiskLayer::CollectGarbage(GcContext& GcCtx) { - ZEN_TRACE_CPU("Z$::Disk::CollectGarbage"); + ZEN_TRACE_CPU("Z$::CollectGarbage"); std::vector Buckets; { @@ -2494,7 +2503,7 @@ ZenCacheDiskLayer::CollectGarbage(GcContext& GcCtx) void ZenCacheDiskLayer::CacheBucket::PutStandaloneCacheValue(const IoHash& HashKey, const ZenCacheValue& Value, std::span References) { - ZEN_TRACE_CPU("Z$::Disk::Bucket::PutStandaloneCacheValue"); + ZEN_TRACE_CPU("Z$::Bucket::PutStandaloneCacheValue"); uint64_t NewFileSize = Value.Value.Size(); @@ -2744,7 +2753,7 @@ ZenCacheDiskLayer::CacheBucket::GetMetaData(RwLock::SharedLockScope&, const Buck void ZenCacheDiskLayer::CacheBucket::PutInlineCacheValue(const IoHash& HashKey, const ZenCacheValue& Value, std::span References) { - ZEN_TRACE_CPU("Z$::Disk::Bucket::PutInlineCacheValue"); + ZEN_TRACE_CPU("Z$::Bucket::PutInlineCacheValue"); uint8_t EntryFlags = 0; @@ -2821,7 +2830,7 @@ public: virtual void CompactStore(GcCtx& Ctx, GcCompactStoreStats& Stats, const std::function& ClaimDiskReserveCallback) override { - ZEN_TRACE_CPU("Z$::Disk::Bucket::CompactStore"); + ZEN_TRACE_CPU("Z$::Bucket::CompactStore"); Stopwatch Timer; const auto _ = MakeGuard([&] { @@ -3044,7 +3053,7 @@ private: GcStoreCompactor* ZenCacheDiskLayer::CacheBucket::RemoveExpiredData(GcCtx& Ctx, GcStats& Stats) { - ZEN_TRACE_CPU("Z$::Disk::Bucket::RemoveExpiredData"); + ZEN_TRACE_CPU("Z$::Bucket::RemoveExpiredData"); size_t TotalEntries = 0; @@ -3185,7 +3194,7 @@ public: virtual void PreCache(GcCtx& Ctx) override { - ZEN_TRACE_CPU("Z$::Disk::Bucket::PreCache"); + ZEN_TRACE_CPU("Z$::Bucket::PreCache"); Stopwatch Timer; const auto _ = MakeGuard([&] { @@ -3406,7 +3415,7 @@ public: virtual void LockState(GcCtx& Ctx) override { - ZEN_TRACE_CPU("Z$::Disk::Bucket::LockState"); + ZEN_TRACE_CPU("Z$::Bucket::LockState"); Stopwatch Timer; const auto _ = MakeGuard([&] { @@ -3479,7 +3488,7 @@ public: virtual void RemoveUsedReferencesFromSet(GcCtx& Ctx, HashSet& IoCids) override { - ZEN_TRACE_CPU("Z$::Disk::Bucket::RemoveUsedReferencesFromSet"); + ZEN_TRACE_CPU("Z$::Bucket::RemoveUsedReferencesFromSet"); ZEN_ASSERT(m_IndexLock); size_t InitialCount = IoCids.size(); @@ -3526,7 +3535,7 @@ public: std::vector ZenCacheDiskLayer::CacheBucket::CreateReferenceCheckers(GcCtx& Ctx) { - ZEN_TRACE_CPU("Z$::Disk::Bucket::CreateReferenceCheckers"); + ZEN_TRACE_CPU("Z$::Bucket::CreateReferenceCheckers"); Stopwatch Timer; const auto _ = MakeGuard([&] { @@ -3551,7 +3560,7 @@ ZenCacheDiskLayer::CacheBucket::CreateReferenceCheckers(GcCtx& Ctx) void ZenCacheDiskLayer::CacheBucket::CompactReferences(RwLock::ExclusiveLockScope&) { - ZEN_TRACE_CPU("Z$::Disk::Bucket::CompactReferences"); + ZEN_TRACE_CPU("Z$::Bucket::CompactReferences"); std::vector FirstReferenceIndex; std::vector NewReferenceHashes; @@ -3734,7 +3743,7 @@ ZenCacheDiskLayer::CacheBucket::CompactState(RwLock::ExclusiveLockScope&, IndexMap& Index, RwLock::ExclusiveLockScope& IndexLock) { - ZEN_TRACE_CPU("Z$::Disk::Bucket::CompactState"); + ZEN_TRACE_CPU("Z$::Bucket::CompactState"); size_t EntryCount = m_Index.size(); Payloads.reserve(EntryCount); @@ -3833,7 +3842,7 @@ ZenCacheDiskLayer::~ZenCacheDiskLayer() ZenCacheDiskLayer::CacheBucket* ZenCacheDiskLayer::GetOrCreateBucket(std::string_view InBucket) { - ZEN_TRACE_CPU("Z$::Disk::GetOrCreateBucket"); + ZEN_TRACE_CPU("Z$::GetOrCreateBucket"); const auto BucketName = std::string(InBucket); { @@ -3880,7 +3889,7 @@ ZenCacheDiskLayer::GetOrCreateBucket(std::string_view InBucket) bool ZenCacheDiskLayer::Get(std::string_view InBucket, const IoHash& HashKey, ZenCacheValue& OutValue) { - ZEN_TRACE_CPU("Z$::Disk::Get"); + ZEN_TRACE_CPU("Z$::Get"); if (CacheBucket* Bucket = GetOrCreateBucket(InBucket); Bucket != nullptr) { @@ -3896,7 +3905,7 @@ ZenCacheDiskLayer::Get(std::string_view InBucket, const IoHash& HashKey, ZenCach void ZenCacheDiskLayer::Put(std::string_view InBucket, const IoHash& HashKey, const ZenCacheValue& Value, std::span References) { - ZEN_TRACE_CPU("Z$::Disk::Put"); + ZEN_TRACE_CPU("Z$::Put"); if (CacheBucket* Bucket = GetOrCreateBucket(InBucket); Bucket != nullptr) { @@ -3908,6 +3917,8 @@ ZenCacheDiskLayer::Put(std::string_view InBucket, const IoHash& HashKey, const Z void ZenCacheDiskLayer::DiscoverBuckets() { + ZEN_TRACE_CPU("Z$::DiscoverBuckets"); + DirectoryContent DirContent; GetDirectoryContent(m_RootDir, DirectoryContent::IncludeDirsFlag, DirContent); @@ -4008,6 +4019,8 @@ ZenCacheDiskLayer::DiscoverBuckets() bool ZenCacheDiskLayer::DropBucket(std::string_view InBucket) { + ZEN_TRACE_CPU("Z$::DropBucket"); + RwLock::ExclusiveLockScope _(m_Lock); auto It = m_Buckets.find(std::string(InBucket)); @@ -4030,6 +4043,8 @@ ZenCacheDiskLayer::DropBucket(std::string_view InBucket) bool ZenCacheDiskLayer::Drop() { + ZEN_TRACE_CPU("Z$::Drop"); + RwLock::ExclusiveLockScope _(m_Lock); std::vector> Buckets; @@ -4051,6 +4066,8 @@ ZenCacheDiskLayer::Drop() void ZenCacheDiskLayer::Flush() { + ZEN_TRACE_CPU("Z$::Flush"); + std::vector Buckets; Stopwatch Timer; const auto _ = MakeGuard([&] { @@ -4092,6 +4109,8 @@ ZenCacheDiskLayer::Flush() void ZenCacheDiskLayer::ScrubStorage(ScrubContext& Ctx) { + ZEN_TRACE_CPU("Z$::ScrubStorage"); + RwLock::SharedLockScope _(m_Lock); { std::vector> Results; @@ -4118,7 +4137,7 @@ ZenCacheDiskLayer::ScrubStorage(ScrubContext& Ctx) void ZenCacheDiskLayer::GatherReferences(GcContext& GcCtx) { - ZEN_TRACE_CPU("Z$::Disk::GatherReferences"); + ZEN_TRACE_CPU("Z$::GatherReferences"); std::vector Buckets; { @@ -4235,7 +4254,7 @@ ZenCacheDiskLayer::GetValueDetails(const std::string_view BucketFilter, const st void ZenCacheDiskLayer::MemCacheTrim() { - ZEN_TRACE_CPU("Z$::Disk::MemCacheTrim"); + ZEN_TRACE_CPU("Z$::MemCacheTrim"); ZEN_ASSERT(m_Configuration.MemCacheTargetFootprintBytes != 0); ZEN_ASSERT(m_Configuration.MemCacheMaxAgeSeconds != 0); -- cgit v1.2.3 From 8e4100aaa4e247270c956af286e62d4bc1b01a18 Mon Sep 17 00:00:00 2001 From: Dan Engelbrecht Date: Wed, 13 Dec 2023 17:36:35 -0500 Subject: Don't use copy of Payloads array when fetching memcached payload in GC (#609) * Don't use copy of Payloads array when fetching memcached payload in GC --- src/zenserver/cache/cachedisklayer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/zenserver/cache/cachedisklayer.cpp') diff --git a/src/zenserver/cache/cachedisklayer.cpp b/src/zenserver/cache/cachedisklayer.cpp index f1aab6093..fc6adb989 100644 --- a/src/zenserver/cache/cachedisklayer.cpp +++ b/src/zenserver/cache/cachedisklayer.cpp @@ -2025,7 +2025,7 @@ ZenCacheDiskLayer::CacheBucket::GatherReferences(GcContext& GcCtx) #endif // CALCULATE_BLOCKING_TIME if (auto It = m_Index.find(Key); It != m_Index.end()) { - const BucketPayload& CachedPayload = Payloads[It->second]; + const BucketPayload& CachedPayload = m_Payloads[It->second]; if (CachedPayload.MemCached) { Buffer = m_MemCachedPayloads[CachedPayload.MemCached].Payload; -- cgit v1.2.3 From b9aa65cfa1495eb5899cecf50d32c6f5ca027ad8 Mon Sep 17 00:00:00 2001 From: Stefan Boberg Date: Tue, 19 Dec 2023 10:23:03 +0100 Subject: fix ChunkIndexToChunkHash indexing (#621) would previously index into a reserved-but-not-sized vector which is bad but not crash-inducing bad --- src/zenserver/cache/cachedisklayer.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/zenserver/cache/cachedisklayer.cpp') diff --git a/src/zenserver/cache/cachedisklayer.cpp b/src/zenserver/cache/cachedisklayer.cpp index fc6adb989..8d046105d 100644 --- a/src/zenserver/cache/cachedisklayer.cpp +++ b/src/zenserver/cache/cachedisklayer.cpp @@ -2307,7 +2307,7 @@ ZenCacheDiskLayer::CacheBucket::CollectGarbage(GcContext& GcCtx) BlockStoreLocation Location = DiskLocation.GetBlockLocation(m_Configuration.PayloadAlignment); size_t ChunkIndex = ChunkLocations.size(); ChunkLocations.push_back(Location); - ChunkIndexToChunkHash[ChunkIndex] = Key; + ChunkIndexToChunkHash.push_back(Key); if (ExpiredCacheKeys.contains(Key)) { continue; -- cgit v1.2.3