aboutsummaryrefslogtreecommitdiff
path: root/src/zenstore/cache/cachedisklayer.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/zenstore/cache/cachedisklayer.cpp')
-rw-r--r--src/zenstore/cache/cachedisklayer.cpp66
1 files changed, 54 insertions, 12 deletions
diff --git a/src/zenstore/cache/cachedisklayer.cpp b/src/zenstore/cache/cachedisklayer.cpp
index 9161905d7..8c93d8a3a 100644
--- a/src/zenstore/cache/cachedisklayer.cpp
+++ b/src/zenstore/cache/cachedisklayer.cpp
@@ -3117,11 +3117,17 @@ ZenCacheDiskLayer::CacheBucket::ReadAttachmentsFromMetaData(uint32_t BlockI
}
bool
-ZenCacheDiskLayer::CacheBucket::GetReferences(GcCtx& Ctx, bool StateIsAlreadyLocked, std::vector<IoHash>& OutReferences)
+ZenCacheDiskLayer::CacheBucket::GetReferences(const LoggerRef& Logger,
+ std::atomic_bool& IsCancelledFlag,
+ bool StateIsAlreadyLocked,
+ bool ReadCacheAttachmentMetaData,
+ bool WriteCacheAttachmentMetaData,
+ std::vector<IoHash>& OutReferences,
+ ReferencesStats* OptionalOutReferencesStats)
{
ZEN_TRACE_CPU("Z$::Bucket::GetReferencesLocked");
- auto Log = [&Ctx]() { return Ctx.Logger; };
+ auto Log = [&Logger]() { return Logger; };
auto GetAttachments = [&](MemoryView Data) -> bool {
if (ValidateCompactBinary(Data, CbValidateMode::Default) == CbValidateError::None)
@@ -3148,7 +3154,7 @@ ZenCacheDiskLayer::CacheBucket::GetReferences(GcCtx& Ctx, bool StateIsAlreadyLoc
}
for (const auto& Entry : m_Index)
{
- if (Ctx.IsCancelledFlag.load())
+ if (IsCancelledFlag.load())
{
return false;
}
@@ -3157,15 +3163,29 @@ ZenCacheDiskLayer::CacheBucket::GetReferences(GcCtx& Ctx, bool StateIsAlreadyLoc
const BucketPayload& Payload = m_Payloads[EntryIndex];
const DiskLocation& Loc = Payload.Location;
+ if (OptionalOutReferencesStats != nullptr)
+ {
+ OptionalOutReferencesStats->ValueSizes.push_back(Loc.Size());
+ }
+
if (!Loc.IsFlagSet(DiskLocation::kStructured))
{
continue;
}
+ if (OptionalOutReferencesStats)
+ {
+ OptionalOutReferencesStats->StructuredValuesCount++;
+ }
+
const IoHash& Key = Entry.first;
if (Loc.IsFlagSet(DiskLocation::kStandaloneFile))
{
StandaloneKeys.push_back(std::make_pair(Key, Loc));
+ if (OptionalOutReferencesStats)
+ {
+ OptionalOutReferencesStats->StandaloneValuesCount++;
+ }
continue;
}
@@ -3188,21 +3208,19 @@ ZenCacheDiskLayer::CacheBucket::GetReferences(GcCtx& Ctx, bool StateIsAlreadyLoc
OutReferences.reserve(OutReferences.size() + InlineKeys.size() +
StandaloneKeys.size()); // Make space for at least one attachment per record
- bool UseMetaData = Ctx.Settings.StoreCacheAttachmentMetaData;
-
for (const std::vector<std::size_t>& ChunkIndexes : InlineBlockChunkIndexes)
{
ZEN_ASSERT(!ChunkIndexes.empty());
uint32_t BlockIndex = InlineLocations[ChunkIndexes[0]].BlockIndex;
- if (!UseMetaData || !ReadAttachmentsFromMetaData(BlockIndex, InlineKeys, ChunkIndexes, OutReferences))
+ if (!ReadCacheAttachmentMetaData || !ReadAttachmentsFromMetaData(BlockIndex, InlineKeys, ChunkIndexes, OutReferences))
{
std::vector<IoHash> Keys;
std::vector<uint32_t> AttachmentCounts;
size_t PrecachedReferencesStart = OutReferences.size();
size_t NextPrecachedReferencesStart = PrecachedReferencesStart;
- bool WriteMetaData = UseMetaData && !m_BlockStore.IsWriting(BlockIndex);
+ bool WriteMetaData = WriteCacheAttachmentMetaData && !m_BlockStore.IsWriting(BlockIndex);
if (WriteMetaData)
{
Keys.reserve(InlineLocations.size());
@@ -3230,12 +3248,12 @@ ZenCacheDiskLayer::CacheBucket::GetReferences(GcCtx& Ctx, bool StateIsAlreadyLoc
[&](size_t ChunkIndex, const void* Data, uint64_t Size) {
ZEN_UNUSED(ChunkIndex);
CaptureAttachments(ChunkIndex, MemoryView(Data, Size));
- return !Ctx.IsCancelledFlag.load();
+ return !IsCancelledFlag.load();
},
[&](size_t ChunkIndex, BlockStoreFile& File, uint64_t Offset, uint64_t Size) {
ZEN_UNUSED(ChunkIndex);
CaptureAttachments(ChunkIndex, File.GetChunk(Offset, Size).GetView());
- return !Ctx.IsCancelledFlag.load();
+ return !IsCancelledFlag.load();
});
if (Continue)
@@ -3260,7 +3278,7 @@ ZenCacheDiskLayer::CacheBucket::GetReferences(GcCtx& Ctx, bool StateIsAlreadyLoc
return false;
}
}
- if (Ctx.IsCancelledFlag.load())
+ if (IsCancelledFlag.load())
{
return false;
}
@@ -3269,7 +3287,7 @@ ZenCacheDiskLayer::CacheBucket::GetReferences(GcCtx& Ctx, bool StateIsAlreadyLoc
for (const auto& It : StandaloneKeys)
{
- if (Ctx.IsCancelledFlag.load())
+ if (IsCancelledFlag.load())
{
return false;
}
@@ -3326,7 +3344,13 @@ public:
m_CacheBucket.m_IndexLock.WithExclusiveLock([&]() { m_CacheBucket.m_TrackedReferences = std::make_unique<std::vector<IoHash>>(); });
- bool Continue = m_CacheBucket.GetReferences(Ctx, /*StateIsAlreadyLocked*/ false, m_PrecachedReferences);
+ bool Continue = m_CacheBucket.GetReferences(Ctx.Logger,
+ Ctx.IsCancelledFlag,
+ /*StateIsAlreadyLocked*/ false,
+ Ctx.Settings.StoreCacheAttachmentMetaData,
+ Ctx.Settings.StoreCacheAttachmentMetaData,
+ m_PrecachedReferences,
+ /*OptionalOutReferencesStats*/ nullptr);
if (!Continue)
{
m_CacheBucket.m_IndexLock.WithExclusiveLock([&]() { m_CacheBucket.m_TrackedReferences.reset(); });
@@ -4188,6 +4212,24 @@ ZenCacheDiskLayer::GetCapturedBuckets()
}
bool
+ZenCacheDiskLayer::GetContentStats(std::string_view BucketName, CacheContentStats& OutContentStats) const
+{
+ std::atomic_bool CancelFlag = false;
+ if (auto It = m_Buckets.find(std::string(BucketName)); It != m_Buckets.end())
+ {
+ CacheBucket::ReferencesStats BucketStats;
+ if (It->second->GetReferences(Log(), CancelFlag, false, true, false, OutContentStats.Attachments, &BucketStats))
+ {
+ OutContentStats.ValueSizes = std::move(BucketStats.ValueSizes);
+ OutContentStats.StructuredValuesCount = BucketStats.StructuredValuesCount;
+ OutContentStats.StandaloneValuesCount = BucketStats.StandaloneValuesCount;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool
ZenCacheDiskLayer::StartAsyncMemCacheTrim()
{
ZEN_TRACE_CPU("Z$::MemCacheTrim");