aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan Engelbrecht <[email protected]>2023-04-21 09:22:03 +0200
committerGitHub <[email protected]>2023-04-21 09:22:03 +0200
commitcda7cb764af09d90c5a1e5fd2a4e55f43e59581a (patch)
tree44b29ffd2d15f340d711eadacdc594e1af60492a
parentMerge branch 'main' of https://github.com/EpicGames/zen (diff)
downloadzen-cda7cb764af09d90c5a1e5fd2a4e55f43e59581a.tar.xz
zen-cda7cb764af09d90c5a1e5fd2a4e55f43e59581a.zip
oplog and cache stats (#244)
* basic oplog stats * add GetValueStats to cache store * RwLock::ExclusiveLockScope -> RwLock::SharedLockScope * add rawhash and attachment count to CacheValueStats * added cache-stats and project-stats commands * add cast to make Mac overload detection happy * fix accept type in cache-stats command * Add options to project-stats command * use resource paths for stats in project store * use resource paths for stats in cache * fix cache-info and project-info url discriminator * more control over details$ output * cleanup * changelog
-rw-r--r--CHANGELOG.md27
-rw-r--r--zen/cmds/cache.cpp142
-rw-r--r--zen/cmds/cache.h32
-rw-r--r--zen/cmds/projectstore.cpp147
-rw-r--r--zen/cmds/projectstore.h33
-rw-r--r--zen/zen.cpp9
-rw-r--r--zenserver/cache/structuredcache.cpp268
-rw-r--r--zenserver/cache/structuredcache.h1
-rw-r--r--zenserver/cache/structuredcachestore.cpp100
-rw-r--r--zenserver/cache/structuredcachestore.h63
-rw-r--r--zenserver/projectstore/projectstore.cpp521
-rw-r--r--zenserver/projectstore/projectstore.h11
-rw-r--r--zenserver/zenserver.cpp2
-rw-r--r--zenstore/blockstore.cpp2
-rw-r--r--zenstore/include/zenstore/blockstore.h4
15 files changed, 1319 insertions, 43 deletions
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 55f6410e0..c57bc2c6c 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -16,6 +16,33 @@
- `--disablelocalhandlerefs` Force disable local references via duplicated file handles in requests
- `--forceallowpartiallocalref` Force the requests to allow local references for files that are not saved as whole files for requests that allow local refs
- `--disablepartiallocalrefs` Force disable local references for files that are not saved as whole files for requests that allow local refs
+- Feature: Zen command line tool `cache-stats` to give stats result about the zen cache
+- Feature: Zen command line tool `project-stats` to give stats result about the zen project store
+- Feature: Zen command line tool `cache-details` to give detail result about the zen cache, defaults to overview information about the cache
+ - `--namespace` Get information about cache values in a namespace
+ - `--bucket` Get information about cache values limited to a specific bucket in a namespace
+ - `--valuekey` Get information about a cache value in a specific bucket in a namespace, valuekey is specified as IoHash hex string
+ - `--details` Get detailed information about each cache record
+ - `--attachmentdetails` Get detailed information about each attachments for each cache record
+ - `--csv` Format the output as a comma delimited CSV file. If not specified it defaults to JSon style response.
+- Feature: Zen command line tool `project-details` to give detail result about the zen project store, defaults to overview information about the project store
+ - `--project` The project id to get information about
+ - `--oplog` The oplog id to get information about
+ - `--opid` The op Oid to get information about
+ - `--details` Get detailed information about the op
+ - `--opdetails` Extract the entire op information (not available in CSV output)
+ - `--attachmentdetails` Get detailed information about each attachments for each op
+ - `--csv` Format the output as a comma delimited CSV file. If not specified it defaults to JSon style response.
+- Feature: New project store stats endpoint `/stats/prj` to get stats info for zen project store
+- Feature: New project store details endpoints `/prj/details$`, `/prj/details$/{project}`, `/prj/details$/{project}/{oplog}`, `/prj/details$/{project}/{oplog}/{op}` to give detail result about the zen project store, defaults to overview information about the project store items
+ - `details=true` Get detailed information about the op
+ - `opdetails=true` Extract the entire op information
+ - `attachmentdetails=true` Get detailed information about each attachments for each op
+ - `csv=true` Format the output as a comma delimited CSV file. If not specified it defaults to JSon style response.
+- Feature: New cache detail endpoints `/z$/details$`, `/z$/details$/{namespace}`, `/z$/details$/{namespace}/{bucket}`, `/z$/details$/{namespace}/{bucket}/{key}` has been added
+ - `details=true` Get detailed information about each cache record
+ - `attachmentdetails=true` Get detailed information about each attachments for each cache record
+ - `csv=true` Format the response as a comma delimited CSV file. If not specified it defaults to CbObject but can auto-format to json
- Feature: `--junit` switch to `xmake test` to generate junit style reports of tests.
- Feature: CI build on GitHub now uploads junit test reports as artifact to the check for PR validation and mainline validation
- Feature: Payloads from zenserver can now be sent using duplicated file handles if caller requests provides client ProcessId (Windows only).
diff --git a/zen/cmds/cache.cpp b/zen/cmds/cache.cpp
index b9e10cbf8..495662d2f 100644
--- a/zen/cmds/cache.cpp
+++ b/zen/cmds/cache.cpp
@@ -131,3 +131,145 @@ CacheInfoCommand::Run(const ZenCliOptions& GlobalOptions, int argc, char** argv)
return 1;
}
+
+CacheStatsCommand::CacheStatsCommand()
+{
+ m_Options.add_options()("h,help", "Print help");
+ m_Options.add_option("", "u", "hosturl", "Host URL", cxxopts::value(m_HostName)->default_value("http://localhost:1337"), "<hosturl>");
+}
+
+CacheStatsCommand::~CacheStatsCommand() = default;
+
+int
+CacheStatsCommand::Run(const ZenCliOptions& GlobalOptions, int argc, char** argv)
+{
+ ZEN_UNUSED(GlobalOptions);
+
+ if (!ParseOptions(argc, argv))
+ {
+ return 0;
+ }
+
+ cpr::Session Session;
+ Session.SetUrl({fmt::format("{}/stats/z$", m_HostName)});
+ Session.SetHeader(cpr::Header{{"Accept", "application/json"}});
+
+ cpr::Response Result = Session.Get();
+
+ if (zen::IsHttpSuccessCode(Result.status_code))
+ {
+ ZEN_CONSOLE("{}", Result.text);
+
+ return 0;
+ }
+
+ if (Result.status_code)
+ {
+ ZEN_ERROR("Info failed: {}: {} ({})", Result.status_code, Result.reason, Result.text);
+ }
+ else
+ {
+ ZEN_ERROR("Info failed: {}", Result.error.message);
+ }
+
+ return 1;
+}
+
+CacheDetailsCommand::CacheDetailsCommand()
+{
+ m_Options.add_options()("h,help", "Print help");
+ m_Options.add_option("", "u", "hosturl", "Host URL", cxxopts::value(m_HostName)->default_value("http://localhost:1337"), "<hosturl>");
+ m_Options.add_option("", "c", "csv", "Info on csv format", cxxopts::value(m_CSV), "<csv>");
+ m_Options.add_option("", "d", "details", "Get detailed information about records", cxxopts::value(m_Details), "<details>");
+ m_Options.add_option("",
+ "a",
+ "attachmentdetails",
+ "Get detailed information about attachments",
+ cxxopts::value(m_AttachmentDetails),
+ "<attachmentdetails>");
+ m_Options.add_option("", "n", "namespace", "Namespace name to get info for", cxxopts::value(m_Namespace), "<namespace>");
+ m_Options.add_option("", "b", "bucket", "Filter on bucket name", cxxopts::value(m_Bucket), "<bucket>");
+ m_Options.add_option("", "v", "valuekey", "Filter on value key hash string", cxxopts::value(m_ValueKey), "<valuekey>");
+}
+
+CacheDetailsCommand::~CacheDetailsCommand() = default;
+
+int
+CacheDetailsCommand::Run(const ZenCliOptions& GlobalOptions, int argc, char** argv)
+{
+ ZEN_UNUSED(GlobalOptions);
+
+ if (!ParseOptions(argc, argv))
+ {
+ return 0;
+ }
+
+ cpr::Session Session;
+ cpr::Parameters Parameters;
+ if (m_Details)
+ {
+ Parameters.Add({"details", "true"});
+ }
+ if (m_AttachmentDetails)
+ {
+ Parameters.Add({"attachmentdetails", "true"});
+ }
+ if (m_CSV)
+ {
+ Parameters.Add({"csv", "true"});
+ }
+ else
+ {
+ Session.SetHeader(cpr::Header{{"Accept", "application/json"}});
+ }
+
+ if (!m_ValueKey.empty())
+ {
+ if (m_Namespace.empty() || m_Bucket.empty())
+ {
+ ZEN_ERROR("Provide namespace and bucket name");
+ ZEN_CONSOLE("{}", m_Options.help({""}).c_str());
+ return 1;
+ }
+ Session.SetUrl({fmt::format("{}/z$/details$/{}/{}/{}", m_HostName, m_Namespace, m_Bucket, m_ValueKey)});
+ }
+ else if (!m_Bucket.empty())
+ {
+ if (m_Namespace.empty())
+ {
+ ZEN_ERROR("Provide namespace name");
+ ZEN_CONSOLE("{}", m_Options.help({""}).c_str());
+ return 1;
+ }
+ Session.SetUrl({fmt::format("{}/z$/details$/{}/{}", m_HostName, m_Namespace, m_Bucket)});
+ }
+ else if (!m_Namespace.empty())
+ {
+ Session.SetUrl({fmt::format("{}/z$/details$/{}", m_HostName, m_Namespace)});
+ }
+ else
+ {
+ Session.SetUrl({fmt::format("{}/z$/details$", m_HostName)});
+ }
+ Session.SetParameters(Parameters);
+
+ cpr::Response Result = Session.Get();
+
+ if (zen::IsHttpSuccessCode(Result.status_code))
+ {
+ ZEN_CONSOLE("{}", Result.text);
+
+ return 0;
+ }
+
+ if (Result.status_code)
+ {
+ ZEN_ERROR("Info failed: {}: {} ({})", Result.status_code, Result.reason, Result.text);
+ }
+ else
+ {
+ ZEN_ERROR("Info failed: {}", Result.error.message);
+ }
+
+ return 1;
+}
diff --git a/zen/cmds/cache.h b/zen/cmds/cache.h
index 915c3d7d3..1f368bdec 100644
--- a/zen/cmds/cache.h
+++ b/zen/cmds/cache.h
@@ -34,3 +34,35 @@ private:
std::string m_NamespaceName;
std::string m_BucketName;
};
+
+class CacheStatsCommand : public ZenCmdBase
+{
+public:
+ CacheStatsCommand();
+ ~CacheStatsCommand();
+ virtual int Run(const ZenCliOptions& GlobalOptions, int argc, char** argv) override;
+ virtual cxxopts::Options& Options() override { return m_Options; }
+
+private:
+ cxxopts::Options m_Options{"cache-stats", "Stats info on cache"};
+ std::string m_HostName;
+};
+
+class CacheDetailsCommand : public ZenCmdBase
+{
+public:
+ CacheDetailsCommand();
+ ~CacheDetailsCommand();
+ virtual int Run(const ZenCliOptions& GlobalOptions, int argc, char** argv) override;
+ virtual cxxopts::Options& Options() override { return m_Options; }
+
+private:
+ cxxopts::Options m_Options{"cache-details", "Detailed info on cache"};
+ std::string m_HostName;
+ bool m_CSV;
+ bool m_Details;
+ bool m_AttachmentDetails;
+ std::string m_Namespace;
+ std::string m_Bucket;
+ std::string m_ValueKey;
+};
diff --git a/zen/cmds/projectstore.cpp b/zen/cmds/projectstore.cpp
index b57277c6b..fe0dd713e 100644
--- a/zen/cmds/projectstore.cpp
+++ b/zen/cmds/projectstore.cpp
@@ -781,3 +781,150 @@ ImportOplogCommand::Run(const ZenCliOptions& GlobalOptions, int argc, char** arg
ZEN_CONSOLE("{}", FormatHttpResponse(Response));
return MapHttpToCommandReturnCode(Response);
}
+
+ProjectStatsCommand::ProjectStatsCommand()
+{
+ m_Options.add_options()("h,help", "Print help");
+ m_Options.add_option("", "u", "hosturl", "Host URL", cxxopts::value(m_HostName)->default_value("http://localhost:1337"), "<hosturl>");
+}
+
+ProjectStatsCommand::~ProjectStatsCommand() = default;
+
+int
+ProjectStatsCommand::Run(const ZenCliOptions& GlobalOptions, int argc, char** argv)
+{
+ ZEN_UNUSED(GlobalOptions);
+
+ if (!ParseOptions(argc, argv))
+ {
+ return 0;
+ }
+
+ cpr::Session Session;
+ Session.SetUrl({fmt::format("{}/stats/prj", m_HostName)});
+ Session.SetHeader(cpr::Header{{"Accept", "application/json"}});
+
+ cpr::Response Result = Session.Get();
+
+ if (zen::IsHttpSuccessCode(Result.status_code))
+ {
+ ZEN_CONSOLE("{}", Result.text);
+
+ return 0;
+ }
+
+ if (Result.status_code)
+ {
+ ZEN_ERROR("Info failed: {}: {} ({})", Result.status_code, Result.reason, Result.text);
+ }
+ else
+ {
+ ZEN_ERROR("Info failed: {}", Result.error.message);
+ }
+
+ return 1;
+}
+
+ProjectDetailsCommand::ProjectDetailsCommand()
+{
+ m_Options.add_options()("h,help", "Print help");
+ m_Options.add_option("", "u", "hosturl", "Host URL", cxxopts::value(m_HostName)->default_value("http://localhost:1337"), "<hosturl>");
+ m_Options.add_option("", "c", "csv", "Output in CSV format (default is JSon)", cxxopts::value(m_CSV), "<csv>");
+ m_Options.add_option("", "d", "details", "Detailed info on opslog", cxxopts::value(m_Details), "<details>");
+ m_Options.add_option("", "o", "opdetails", "Details info on oplog body", cxxopts::value(m_OpDetails), "<opdetails>");
+ m_Options.add_option("", "p", "project", "Project name to get info from", cxxopts::value(m_ProjectName), "<projectid>");
+ m_Options.add_option("", "l", "oplog", "Oplog name to get info from", cxxopts::value(m_OplogName), "<oplogid>");
+ m_Options.add_option("", "i", "opid", "Oid of a specific op info for", cxxopts::value(m_OpId), "<opid>");
+ m_Options.add_option("",
+ "a",
+ "attachmentdetails",
+ "Get detailed information about attachments",
+ cxxopts::value(m_AttachmentDetails),
+ "<attachmentdetails>");
+}
+
+ProjectDetailsCommand::~ProjectDetailsCommand() = default;
+
+int
+ProjectDetailsCommand::Run(const ZenCliOptions& GlobalOptions, int argc, char** argv)
+{
+ ZEN_UNUSED(GlobalOptions);
+
+ if (!ParseOptions(argc, argv))
+ {
+ return 0;
+ }
+
+ cpr::Session Session;
+ cpr::Parameters Parameters;
+ if (m_OpDetails)
+ {
+ Parameters.Add({"opdetails", "true"});
+ }
+ if (m_Details)
+ {
+ Parameters.Add({"details", "true"});
+ }
+ if (m_AttachmentDetails)
+ {
+ Parameters.Add({"attachmentdetails", "true"});
+ }
+ if (m_CSV)
+ {
+ Parameters.Add({"csv", "true"});
+ }
+ else
+ {
+ Session.SetHeader(cpr::Header{{"Accept", "application/json"}});
+ }
+
+ if (!m_OpId.empty())
+ {
+ if (m_ProjectName.empty() || m_OplogName.empty())
+ {
+ ZEN_ERROR("Provide project and oplog name");
+ ZEN_CONSOLE("{}", m_Options.help({""}).c_str());
+ return 1;
+ }
+ Session.SetUrl({fmt::format("{}/prj/details$/{}/{}/{}", m_HostName, m_ProjectName, m_OplogName, m_OpId)});
+ }
+ else if (!m_OplogName.empty())
+ {
+ if (m_ProjectName.empty())
+ {
+ ZEN_ERROR("Provide project name");
+ ZEN_CONSOLE("{}", m_Options.help({""}).c_str());
+ return 1;
+ }
+ Session.SetUrl({fmt::format("{}/prj/details$/{}/{}", m_HostName, m_ProjectName, m_OplogName)});
+ }
+ else if (!m_ProjectName.empty())
+ {
+ Session.SetUrl({fmt::format("{}/prj/details$/{}", m_HostName, m_ProjectName)});
+ }
+ else
+ {
+ Session.SetUrl({fmt::format("{}/prj/details$", m_HostName)});
+ }
+ Session.SetParameters(Parameters);
+
+ cpr::Response Result = Session.Get();
+
+ if (zen::IsHttpSuccessCode(Result.status_code))
+ {
+ ZEN_CONSOLE("{}", Result.text);
+
+ return 0;
+ }
+
+ if (Result.status_code)
+ {
+ ZEN_ERROR("Info failed: {}: {} ({})", Result.status_code, Result.reason, Result.text);
+ }
+ else
+ {
+ ZEN_ERROR("Info failed: {}", Result.error.message);
+ }
+
+ return 1;
+}
diff --git a/zen/cmds/projectstore.h b/zen/cmds/projectstore.h
index 53ba14825..10927a546 100644
--- a/zen/cmds/projectstore.h
+++ b/zen/cmds/projectstore.h
@@ -145,3 +145,36 @@ private:
std::string m_FileDirectoryPath;
std::string m_FileName;
};
+
+class ProjectStatsCommand : public ZenCmdBase
+{
+public:
+ ProjectStatsCommand();
+ ~ProjectStatsCommand();
+ virtual int Run(const ZenCliOptions& GlobalOptions, int argc, char** argv) override;
+ virtual cxxopts::Options& Options() override { return m_Options; }
+
+private:
+ cxxopts::Options m_Options{"project-stats", "Stats info on project store"};
+ std::string m_HostName;
+};
+
+class ProjectDetailsCommand : public ZenCmdBase
+{
+public:
+ ProjectDetailsCommand();
+ ~ProjectDetailsCommand();
+ virtual int Run(const ZenCliOptions& GlobalOptions, int argc, char** argv) override;
+ virtual cxxopts::Options& Options() override { return m_Options; }
+
+private:
+ cxxopts::Options m_Options{"project-details", "Detail info on project store"};
+ std::string m_HostName;
+ bool m_Details;
+ bool m_OpDetails;
+ bool m_AttachmentDetails;
+ bool m_CSV;
+ std::string m_ProjectName;
+ std::string m_OplogName;
+ std::string m_OpId;
+};
diff --git a/zen/zen.cpp b/zen/zen.cpp
index e72619fdc..5a34ffa80 100644
--- a/zen/zen.cpp
+++ b/zen/zen.cpp
@@ -212,7 +212,10 @@ main(int argc, char** argv)
TopCommand TopCmd;
UpCommand UpCmd;
VersionCommand VersionCmd;
-
+ CacheStatsCommand CacheStatsCmd;
+ CacheDetailsCommand CacheDetailsCmd;
+ ProjectStatsCommand ProjectStatsCmd;
+ ProjectDetailsCommand ProjectDetailsCmd;
#if ZEN_WITH_TESTS
RunTestsCommand RunTestsCmd;
#endif
@@ -249,6 +252,10 @@ main(int argc, char** argv)
{"top", &TopCmd, "Monitor zen server activity"},
{"up", &UpCmd, "Bring zen server up"},
{"version", &VersionCmd, "Get zen server version"},
+ {"cache-stats", &CacheStatsCmd, "Stats on cache"},
+ {"cache-details", &CacheDetailsCmd, "Details on cache"},
+ {"project-stats", &ProjectStatsCmd, "Stats on project store"},
+ {"project-details", &ProjectDetailsCmd, "Details on project store"},
#if ZEN_WITH_TESTS
{"runtests", &RunTestsCmd, "Run zen tests"},
#endif
diff --git a/zenserver/cache/structuredcache.cpp b/zenserver/cache/structuredcache.cpp
index 8539d9c16..90e905bf6 100644
--- a/zenserver/cache/structuredcache.cpp
+++ b/zenserver/cache/structuredcache.cpp
@@ -83,6 +83,7 @@ namespace {
static constinit std::string_view HttpZCacheUtilStartRecording = "exec$/start-recording"sv;
static constinit std::string_view HttpZCacheUtilStopRecording = "exec$/stop-recording"sv;
static constinit std::string_view HttpZCacheUtilReplayRecording = "exec$/replay-recording"sv;
+ static constinit std::string_view HttpZCacheDetailsPrefix = "details$"sv;
struct HttpRequestData
{
@@ -366,6 +367,224 @@ HttpStructuredCacheService::Scrub(ScrubContext& Ctx)
}
void
+HttpStructuredCacheService::HandleDetailsRequest(HttpServerRequest& Request)
+{
+ std::string_view Key = Request.RelativeUri();
+ std::vector<std::string> Tokens;
+ uint32_t TokenCount = ForEachStrTok(Key, '/', [&Tokens](std::string_view Token) {
+ Tokens.push_back(std::string(Token));
+ return true;
+ });
+ std::string FilterNamespace;
+ std::string FilterBucket;
+ std::string FilterValue;
+ switch (TokenCount)
+ {
+ case 1:
+ break;
+ case 2:
+ {
+ FilterNamespace = Tokens[1];
+ if (FilterNamespace.empty())
+ {
+ return Request.WriteResponse(HttpResponseCode::BadRequest); // invalid URL
+ }
+ }
+ break;
+ case 3:
+ {
+ FilterNamespace = Tokens[1];
+ if (FilterNamespace.empty())
+ {
+ return Request.WriteResponse(HttpResponseCode::BadRequest); // invalid URL
+ }
+ FilterBucket = Tokens[2];
+ if (FilterBucket.empty())
+ {
+ return Request.WriteResponse(HttpResponseCode::BadRequest); // invalid URL
+ }
+ }
+ break;
+ case 4:
+ {
+ FilterNamespace = Tokens[1];
+ if (FilterNamespace.empty())
+ {
+ return Request.WriteResponse(HttpResponseCode::BadRequest); // invalid URL
+ }
+ FilterBucket = Tokens[2];
+ if (FilterBucket.empty())
+ {
+ return Request.WriteResponse(HttpResponseCode::BadRequest); // invalid URL
+ }
+ FilterValue = Tokens[3];
+ if (FilterValue.empty())
+ {
+ return Request.WriteResponse(HttpResponseCode::BadRequest); // invalid URL
+ }
+ }
+ break;
+ default:
+ return Request.WriteResponse(HttpResponseCode::BadRequest); // invalid URL
+ }
+
+ HttpServerRequest::QueryParams Params = Request.GetQueryParams();
+ bool CSV = Params.GetValue("csv") == "true";
+ bool Details = Params.GetValue("details") == "true";
+ bool AttachmentDetails = Params.GetValue("attachmentdetails") == "true";
+
+ std::chrono::seconds NowSeconds = std::chrono::duration_cast<std::chrono::seconds>(GcClock::Now().time_since_epoch());
+ CacheValueDetails ValueDetails = m_CacheStore.GetValueDetails(FilterNamespace, FilterBucket, FilterValue);
+
+ if (CSV)
+ {
+ ExtendableStringBuilder<4096> CSVWriter;
+ if (AttachmentDetails)
+ {
+ CSVWriter << "Namespace, Bucket, Key, Cid, Size";
+ }
+ else if (Details)
+ {
+ CSVWriter << "Namespace, Bucket, Key, Size, RawSize, RawHash, ContentType, Age, AttachmentsCount, AttachmentsSize";
+ }
+ else
+ {
+ CSVWriter << "Namespace, Bucket, Key";
+ }
+ for (const auto& NamespaceIt : ValueDetails.Namespaces)
+ {
+ const std::string& Namespace = NamespaceIt.first;
+ for (const auto& BucketIt : NamespaceIt.second.Buckets)
+ {
+ const std::string& Bucket = BucketIt.first;
+ for (const auto& ValueIt : BucketIt.second.Values)
+ {
+ if (AttachmentDetails)
+ {
+ for (const IoHash& Hash : ValueIt.second.Attachments)
+ {
+ IoBuffer Payload = m_CidStore.FindChunkByCid(Hash);
+ CSVWriter << "\r\n"
+ << Namespace << "," << Bucket << "," << ValueIt.first.ToHexString() << ", " << Hash.ToHexString()
+ << ", " << gsl::narrow<uint64_t>(Payload.GetSize());
+ }
+ }
+ else if (Details)
+ {
+ std::chrono::seconds LastAccessedSeconds = std::chrono::duration_cast<std::chrono::seconds>(
+ GcClock::TimePointFromTick(ValueIt.second.LastAccess).time_since_epoch());
+ CSVWriter << "\r\n"
+ << Namespace << "," << Bucket << "," << ValueIt.first.ToHexString() << ", " << ValueIt.second.Size << ","
+ << ValueIt.second.RawSize << "," << ValueIt.second.RawHash.ToHexString() << ", "
+ << ToString(ValueIt.second.ContentType) << ", " << (NowSeconds.count() - LastAccessedSeconds.count())
+ << ", " << gsl::narrow<uint64_t>(ValueIt.second.Attachments.size());
+ size_t AttachmentsSize = 0;
+ for (const IoHash& Hash : ValueIt.second.Attachments)
+ {
+ IoBuffer Payload = m_CidStore.FindChunkByCid(Hash);
+ AttachmentsSize += Payload.GetSize();
+ }
+ CSVWriter << ", " << gsl::narrow<uint64_t>(AttachmentsSize);
+ }
+ else
+ {
+ CSVWriter << "\r\n" << Namespace << "," << Bucket << "," << ValueIt.first.ToHexString();
+ }
+ }
+ }
+ }
+ return Request.WriteResponse(HttpResponseCode::OK, HttpContentType::kText, CSVWriter.ToView());
+ }
+ else
+ {
+ CbObjectWriter Cbo;
+ Cbo.BeginArray("namespaces");
+ {
+ for (const auto& NamespaceIt : ValueDetails.Namespaces)
+ {
+ const std::string& Namespace = NamespaceIt.first;
+ Cbo.BeginObject();
+ {
+ Cbo.AddString("name", Namespace);
+ Cbo.BeginArray("buckets");
+ {
+ for (const auto& BucketIt : NamespaceIt.second.Buckets)
+ {
+ const std::string& Bucket = BucketIt.first;
+ Cbo.BeginObject();
+ {
+ Cbo.AddString("name", Bucket);
+ Cbo.BeginArray("values");
+ {
+ for (const auto& ValueIt : BucketIt.second.Values)
+ {
+ std::chrono::seconds LastAccessedSeconds = std::chrono::duration_cast<std::chrono::seconds>(
+ GcClock::TimePointFromTick(ValueIt.second.LastAccess).time_since_epoch());
+ Cbo.BeginObject();
+ {
+ Cbo.AddHash("key", ValueIt.first);
+ if (Details)
+ {
+ Cbo.AddInteger("size", ValueIt.second.Size);
+ if (ValueIt.second.Size > 0 && ValueIt.second.RawSize != 0 &&
+ ValueIt.second.RawSize != ValueIt.second.Size)
+ {
+ Cbo.AddInteger("rawsize", ValueIt.second.RawSize);
+ Cbo.AddHash("rawhash", ValueIt.second.RawHash);
+ }
+ Cbo.AddString("contenttype", ToString(ValueIt.second.ContentType));
+ Cbo.AddInteger("age", NowSeconds.count() - LastAccessedSeconds.count());
+ if (ValueIt.second.Attachments.size() > 0)
+ {
+ if (AttachmentDetails)
+ {
+ Cbo.BeginArray("attachments");
+ {
+ for (const IoHash& Hash : ValueIt.second.Attachments)
+ {
+ Cbo.BeginObject();
+ Cbo.AddHash("cid", Hash);
+ IoBuffer Payload = m_CidStore.FindChunkByCid(Hash);
+ Cbo.AddInteger("size", gsl::narrow<uint64_t>(Payload.GetSize()));
+ Cbo.EndObject();
+ }
+ }
+ Cbo.EndArray();
+ }
+ else
+ {
+ Cbo.AddInteger("attachmentcount",
+ gsl::narrow<uint64_t>(ValueIt.second.Attachments.size()));
+ size_t AttachmentsSize = 0;
+ for (const IoHash& Hash : ValueIt.second.Attachments)
+ {
+ IoBuffer Payload = m_CidStore.FindChunkByCid(Hash);
+ AttachmentsSize += Payload.GetSize();
+ }
+ Cbo.AddInteger("attachmentssize", gsl::narrow<uint64_t>(AttachmentsSize));
+ }
+ }
+ }
+ }
+ Cbo.EndObject();
+ }
+ }
+ Cbo.EndArray();
+ }
+ Cbo.EndObject();
+ }
+ }
+ Cbo.EndArray();
+ }
+ Cbo.EndObject();
+ }
+ }
+ Cbo.EndArray();
+ Request.WriteResponse(HttpResponseCode::OK, Cbo.Save());
+ }
+}
+
+void
HttpStructuredCacheService::HandleRequest(HttpServerRequest& Request)
{
metrics::OperationTiming::Scope $(m_HttpRequests);
@@ -409,6 +628,11 @@ HttpStructuredCacheService::HandleRequest(HttpServerRequest& Request)
Request.WriteResponse(HttpResponseCode::OK);
return;
}
+ if (Key.starts_with(HttpZCacheDetailsPrefix))
+ {
+ HandleDetailsRequest(Request);
+ return;
+ }
HttpRequestData RequestData;
if (!HttpRequestParseRelativeUri(Key, RequestData))
@@ -2784,27 +3008,39 @@ HttpStructuredCacheService::HandleStatsRequest(HttpServerRequest& Request)
const GcStorageSize CacheSize = m_CacheStore.StorageSize();
Cbo.BeginObject("cache");
- Cbo.BeginObject("size");
- Cbo << "disk" << CacheSize.DiskSize;
- Cbo << "memory" << CacheSize.MemorySize;
- Cbo.EndObject();
- Cbo << "upstream_ratio" << (HitCount > 0 ? (double(UpstreamHitCount) / double(HitCount)) : 0.0);
- Cbo << "hits" << HitCount << "misses" << MissCount;
- Cbo << "hit_ratio" << (TotalCount > 0 ? (double(HitCount) / double(TotalCount)) : 0.0);
- Cbo << "upstream_hits" << m_CacheStats.UpstreamHitCount;
- Cbo << "upstream_ratio" << (HitCount > 0 ? (double(UpstreamHitCount) / double(HitCount)) : 0.0);
+ {
+ Cbo.BeginObject("size");
+ {
+ Cbo << "disk" << CacheSize.DiskSize;
+ Cbo << "memory" << CacheSize.MemorySize;
+ }
+ Cbo.EndObject();
+
+ Cbo << "upstream_ratio" << (HitCount > 0 ? (double(UpstreamHitCount) / double(HitCount)) : 0.0);
+ Cbo << "hits" << HitCount << "misses" << MissCount;
+ Cbo << "hit_ratio" << (TotalCount > 0 ? (double(HitCount) / double(TotalCount)) : 0.0);
+ Cbo << "upstream_hits" << m_CacheStats.UpstreamHitCount;
+ Cbo << "upstream_ratio" << (HitCount > 0 ? (double(UpstreamHitCount) / double(HitCount)) : 0.0);
+ }
Cbo.EndObject();
+
Cbo.BeginObject("upstream");
- m_UpstreamCache.GetStatus(Cbo);
+ {
+ m_UpstreamCache.GetStatus(Cbo);
+ }
Cbo.EndObject();
Cbo.BeginObject("cid");
- Cbo.BeginObject("size");
- Cbo << "tiny" << CidSize.TinySize;
- Cbo << "small" << CidSize.SmallSize;
- Cbo << "large" << CidSize.LargeSize;
- Cbo << "total" << CidSize.TotalSize;
- Cbo.EndObject();
+ {
+ Cbo.BeginObject("size");
+ {
+ Cbo << "tiny" << CidSize.TinySize;
+ Cbo << "small" << CidSize.SmallSize;
+ Cbo << "large" << CidSize.LargeSize;
+ Cbo << "total" << CidSize.TotalSize;
+ }
+ Cbo.EndObject();
+ }
Cbo.EndObject();
Request.WriteResponse(HttpResponseCode::OK, Cbo.Save());
diff --git a/zenserver/cache/structuredcache.h b/zenserver/cache/structuredcache.h
index e9c58c3d6..4e7b98ac9 100644
--- a/zenserver/cache/structuredcache.h
+++ b/zenserver/cache/structuredcache.h
@@ -110,6 +110,7 @@ private:
void HandleGetCacheChunk(HttpServerRequest& Request, const CacheRef& Ref, CachePolicy PolicyFromUrl);
void HandlePutCacheChunk(HttpServerRequest& Request, const CacheRef& Ref, CachePolicy PolicyFromUrl);
void HandleRpcRequest(HttpServerRequest& Request);
+ void HandleDetailsRequest(HttpServerRequest& Request);
CbPackage HandleRpcPutCacheRecords(const CbPackage& BatchRequest);
CbPackage HandleRpcGetCacheRecords(CbObjectView BatchRequest);
diff --git a/zenserver/cache/structuredcachestore.cpp b/zenserver/cache/structuredcachestore.cpp
index 55af85ade..44574ae19 100644
--- a/zenserver/cache/structuredcachestore.cpp
+++ b/zenserver/cache/structuredcachestore.cpp
@@ -421,6 +421,12 @@ ZenCacheNamespace::GetBucketInfo(std::string_view Bucket) const
return Info;
}
+CacheValueDetails::NamespaceDetails
+ZenCacheNamespace::GetValueDetails(const std::string_view BucketFilter, const std::string_view ValueFilter) const
+{
+ return m_DiskLayer.GetValueDetails(BucketFilter, ValueFilter);
+}
+
//////////////////////////////////////////////////////////////////////////
ZenCacheMemoryLayer::ZenCacheMemoryLayer()
@@ -589,7 +595,7 @@ ZenCacheMemoryLayer::GetInfo() const
std::optional<ZenCacheMemoryLayer::BucketInfo>
ZenCacheMemoryLayer::GetBucketInfo(std::string_view Bucket) const
{
- RwLock::ExclusiveLockScope _(m_Lock);
+ RwLock::SharedLockScope _(m_Lock);
if (auto It = m_Buckets.find(std::string(Bucket)); It != m_Buckets.end())
{
@@ -1195,7 +1201,7 @@ ZenCacheDiskLayer::CacheBucket::OpenLog(const bool IsNew)
}
void
-ZenCacheDiskLayer::CacheBucket::BuildPath(PathBuilderBase& Path, const IoHash& HashKey)
+ZenCacheDiskLayer::CacheBucket::BuildPath(PathBuilderBase& Path, const IoHash& HashKey) const
{
char HexString[sizeof(HashKey.Hash) * 2];
ToHexBytes(HashKey.Hash, sizeof HashKey.Hash, HexString);
@@ -1212,7 +1218,7 @@ ZenCacheDiskLayer::CacheBucket::BuildPath(PathBuilderBase& Path, const IoHash& H
}
IoBuffer
-ZenCacheDiskLayer::CacheBucket::GetInlineCacheValue(const DiskLocation& Loc)
+ZenCacheDiskLayer::CacheBucket::GetInlineCacheValue(const DiskLocation& Loc) const
{
BlockStoreLocation Location = Loc.GetBlockLocation(m_PayloadAlignment);
@@ -1226,7 +1232,7 @@ ZenCacheDiskLayer::CacheBucket::GetInlineCacheValue(const DiskLocation& Loc)
}
IoBuffer
-ZenCacheDiskLayer::CacheBucket::GetStandaloneCacheValue(const DiskLocation& Loc, const IoHash& HashKey)
+ZenCacheDiskLayer::CacheBucket::GetStandaloneCacheValue(const DiskLocation& Loc, const IoHash& HashKey) const
{
ExtendablePathBuilder<256> DataFilePath;
BuildPath(DataFilePath, HashKey);
@@ -2036,6 +2042,50 @@ ZenCacheDiskLayer::CacheBucket::EntryCount() const
return static_cast<uint64_t>(m_Index.size());
}
+CacheValueDetails::ValueDetails
+ZenCacheDiskLayer::CacheBucket::GetValueDetails(const IoHash& Key, size_t Index) const
+{
+ std::vector<IoHash> Attachments;
+ const BucketPayload& Payload = m_Payloads[Index];
+ if (Payload.Location.IsFlagSet(DiskLocation::kStructured))
+ {
+ IoBuffer Value = Payload.Location.IsFlagSet(DiskLocation::kStandaloneFile) ? GetStandaloneCacheValue(Payload.Location, Key)
+ : GetInlineCacheValue(Payload.Location);
+ CbObject Obj(SharedBuffer{Value});
+ Obj.IterateAttachments([&Attachments](CbFieldView Field) { Attachments.emplace_back(Field.AsAttachment()); });
+ }
+ return CacheValueDetails::ValueDetails{.Size = Payload.Location.Size(),
+ .RawSize = Payload.RawSize,
+ .RawHash = Payload.RawHash,
+ .LastAccess = m_AccessTimes[Index],
+ .Attachments = std::move(Attachments),
+ .ContentType = Payload.Location.GetContentType()};
+}
+
+CacheValueDetails::BucketDetails
+ZenCacheDiskLayer::CacheBucket::GetValueDetails(const std::string_view ValueFilter) const
+{
+ CacheValueDetails::BucketDetails Details;
+ RwLock::SharedLockScope _(m_IndexLock);
+ if (ValueFilter.empty())
+ {
+ Details.Values.reserve(m_Index.size());
+ for (const auto& It : m_Index)
+ {
+ Details.Values.insert_or_assign(It.first, GetValueDetails(It.first, It.second));
+ }
+ }
+ else
+ {
+ IoHash Key = IoHash::FromHexString(ValueFilter);
+ if (auto It = m_Index.find(Key); It != m_Index.end())
+ {
+ Details.Values.insert_or_assign(It->first, GetValueDetails(It->first, It->second));
+ }
+ }
+ return Details;
+}
+
void
ZenCacheDiskLayer::CollectGarbage(GcContext& GcCtx)
{
@@ -2505,7 +2555,7 @@ ZenCacheDiskLayer::GetInfo() const
std::optional<ZenCacheDiskLayer::BucketInfo>
ZenCacheDiskLayer::GetBucketInfo(std::string_view Bucket) const
{
- RwLock::ExclusiveLockScope _(m_Lock);
+ RwLock::SharedLockScope _(m_Lock);
if (auto It = m_Buckets.find(std::string(Bucket)); It != m_Buckets.end())
{
@@ -2514,6 +2564,26 @@ ZenCacheDiskLayer::GetBucketInfo(std::string_view Bucket) const
return {};
}
+CacheValueDetails::NamespaceDetails
+ZenCacheDiskLayer::GetValueDetails(const std::string_view BucketFilter, const std::string_view ValueFilter) const
+{
+ RwLock::SharedLockScope _(m_Lock);
+ CacheValueDetails::NamespaceDetails Details;
+ if (BucketFilter.empty())
+ {
+ Details.Buckets.reserve(BucketFilter.empty() ? m_Buckets.size() : 1);
+ for (auto& Kv : m_Buckets)
+ {
+ Details.Buckets[Kv.first] = Kv.second->GetValueDetails(ValueFilter);
+ }
+ }
+ else if (auto It = m_Buckets.find(std::string(BucketFilter)); It != m_Buckets.end())
+ {
+ Details.Buckets[It->first] = It->second->GetValueDetails(ValueFilter);
+ }
+ return Details;
+}
+
//////////////////////////// ZenCacheStore
static constexpr std::string_view UE4DDCNamespaceName = "ue4.ddc";
@@ -2568,6 +2638,7 @@ ZenCacheStore::Get(std::string_view Namespace, std::string_view Bucket, const Io
return Store->Get(Bucket, HashKey, OutValue);
}
ZEN_WARN("request for unknown namespace '{}' in ZenCacheStore::Get, bucket '{}', key '{}'", Namespace, Bucket, HashKey.ToHexString());
+
return false;
}
@@ -2619,6 +2690,25 @@ ZenCacheStore::Scrub(ScrubContext& Ctx)
IterateNamespaces([&](std::string_view, ZenCacheNamespace& Store) { Store.Scrub(Ctx); });
}
+CacheValueDetails
+ZenCacheStore::GetValueDetails(const std::string_view NamespaceFilter,
+ const std::string_view BucketFilter,
+ const std::string_view ValueFilter) const
+{
+ CacheValueDetails Details;
+ if (NamespaceFilter.empty())
+ {
+ IterateNamespaces([&](std::string_view Namespace, ZenCacheNamespace& Store) {
+ Details.Namespaces[std::string(Namespace)] = Store.GetValueDetails(BucketFilter, ValueFilter);
+ });
+ }
+ else if (const ZenCacheNamespace* Store = FindNamespace(NamespaceFilter); Store != nullptr)
+ {
+ Details.Namespaces[std::string(NamespaceFilter)] = Store->GetValueDetails(BucketFilter, ValueFilter);
+ }
+ return Details;
+}
+
ZenCacheNamespace*
ZenCacheStore::GetNamespace(std::string_view Namespace)
{
diff --git a/zenserver/cache/structuredcachestore.h b/zenserver/cache/structuredcachestore.h
index fe83f3c97..2ef4c4dcb 100644
--- a/zenserver/cache/structuredcachestore.h
+++ b/zenserver/cache/structuredcachestore.h
@@ -67,6 +67,31 @@ struct ZenCacheValue
IoHash RawHash = IoHash::Zero;
};
+struct CacheValueDetails
+{
+ struct ValueDetails
+ {
+ uint64_t Size;
+ uint64_t RawSize;
+ IoHash RawHash;
+ GcClock::Tick LastAccess{};
+ std::vector<IoHash> Attachments;
+ ZenContentType ContentType;
+ };
+
+ struct BucketDetails
+ {
+ std::unordered_map<IoHash, ValueDetails, IoHash::Hasher> Values;
+ };
+
+ struct NamespaceDetails
+ {
+ std::unordered_map<std::string, BucketDetails> Buckets;
+ };
+
+ std::unordered_map<std::string, NamespaceDetails> Namespaces;
+};
+
//////////////////////////////////////////////////////////////////////////
#pragma pack(push)
@@ -301,6 +326,8 @@ public:
Info GetInfo() const;
std::optional<BucketInfo> GetBucketInfo(std::string_view Bucket) const;
+ CacheValueDetails::NamespaceDetails GetValueDetails(const std::string_view BucketFilter, const std::string_view ValueFilter) const;
+
private:
/** A cache bucket manages a single directory containing
metadata and data for that bucket
@@ -323,6 +350,8 @@ private:
inline uint64_t TotalSize() const { return m_TotalStandaloneSize.load(std::memory_order::relaxed) + m_BlockStore.TotalSize(); }
uint64_t EntryCount() const;
+ CacheValueDetails::BucketDetails GetValueDetails(const std::string_view ValueFilter) const;
+
private:
const uint64_t MaxBlockSize = 1ull << 30;
uint64_t m_PayloadAlignment = 1ull << 4;
@@ -360,18 +389,18 @@ private:
std::atomic_uint64_t m_TotalStandaloneSize{};
- void BuildPath(PathBuilderBase& Path, const IoHash& HashKey);
- void PutStandaloneCacheValue(const IoHash& HashKey, const ZenCacheValue& Value);
- IoBuffer GetStandaloneCacheValue(const DiskLocation& Loc, const IoHash& HashKey);
- void PutInlineCacheValue(const IoHash& HashKey, const ZenCacheValue& Value);
- IoBuffer GetInlineCacheValue(const DiskLocation& Loc);
- void MakeIndexSnapshot();
- uint64_t ReadIndexFile(const std::filesystem::path& IndexPath, uint32_t& OutVersion);
- uint64_t ReadLogV2(const std::filesystem::path& LogPath, uint64_t LogPosition);
- uint64_t ReadLog(const std::filesystem::path& LogPath, uint64_t LogPosition);
- void OpenLog(const bool IsNew);
- void SaveManifest();
-
+ void BuildPath(PathBuilderBase& Path, const IoHash& HashKey) const;
+ void PutStandaloneCacheValue(const IoHash& HashKey, const ZenCacheValue& Value);
+ IoBuffer GetStandaloneCacheValue(const DiskLocation& Loc, const IoHash& HashKey) const;
+ void PutInlineCacheValue(const IoHash& HashKey, const ZenCacheValue& Value);
+ IoBuffer GetInlineCacheValue(const DiskLocation& Loc) const;
+ void MakeIndexSnapshot();
+ uint64_t ReadIndexFile(const std::filesystem::path& IndexPath, uint32_t& OutVersion);
+ uint64_t ReadLogV2(const std::filesystem::path& LogPath, uint64_t LogPosition);
+ uint64_t ReadLog(const std::filesystem::path& LogPath, uint64_t LogPosition);
+ void OpenLog(const bool IsNew);
+ void SaveManifest();
+ CacheValueDetails::ValueDetails GetValueDetails(const IoHash& Key, size_t Index) const;
// These locks are here to avoid contention on file creation, therefore it's sufficient
// that we take the same lock for the same hash
//
@@ -379,8 +408,8 @@ private:
// but we don't currently access them at particularly high frequency so it should not be
// an issue in practice
- RwLock m_ShardedLocks[256];
- inline RwLock& LockForHash(const IoHash& Hash) { return m_ShardedLocks[Hash.Hash[19]]; }
+ mutable RwLock m_ShardedLocks[256];
+ inline RwLock& LockForHash(const IoHash& Hash) const { return m_ShardedLocks[Hash.Hash[19]]; }
};
std::filesystem::path m_RootDir;
@@ -429,6 +458,8 @@ public:
Info GetInfo() const;
std::optional<BucketInfo> GetBucketInfo(std::string_view Bucket) const;
+ CacheValueDetails::NamespaceDetails GetValueDetails(const std::string_view BucketFilter, const std::string_view ValueFilter) const;
+
private:
std::filesystem::path m_RootDir;
ZenCacheMemoryLayer m_MemLayer;
@@ -476,6 +507,10 @@ public:
void Flush();
void Scrub(ScrubContext& Ctx);
+ CacheValueDetails GetValueDetails(const std::string_view NamespaceFilter,
+ const std::string_view BucketFilter,
+ const std::string_view ValueFilter) const;
+
GcStorageSize StorageSize() const;
// const Configuration& GetConfiguration() const { return m_Configuration; }
diff --git a/zenserver/projectstore/projectstore.cpp b/zenserver/projectstore/projectstore.cpp
index 5aa8cad26..db5cae503 100644
--- a/zenserver/projectstore/projectstore.cpp
+++ b/zenserver/projectstore/projectstore.cpp
@@ -201,6 +201,204 @@ namespace {
: fmt::format("{}. Reason: '{}'", Result.Text, Result.Reason)};
}
+ void CSVHeader(bool Details, bool AttachmentDetails, StringBuilderBase& CSVWriter)
+ {
+ if (AttachmentDetails)
+ {
+ CSVWriter << "Project, Oplog, LSN, Key, Cid, Size";
+ }
+ else if (Details)
+ {
+ CSVWriter << "Project, Oplog, LSN, Key, Size, AttachmentCount, AttachmentsSize";
+ }
+ else
+ {
+ CSVWriter << "Project, Oplog, Key";
+ }
+ }
+
+ void CSVWriteOp(CidStore& CidStore,
+ std::string_view ProjectId,
+ std::string_view OplogId,
+ bool Details,
+ bool AttachmentDetails,
+ int LSN,
+ const Oid& Key,
+ CbObject Op,
+ StringBuilderBase& CSVWriter)
+ {
+ StringBuilder<32> KeyStringBuilder;
+ Key.ToString(KeyStringBuilder);
+ const std::string_view KeyString = KeyStringBuilder.ToView();
+
+ SharedBuffer Buffer = Op.GetBuffer();
+ if (AttachmentDetails)
+ {
+ Op.IterateAttachments([&CidStore, &CSVWriter, &ProjectId, &OplogId, LSN, &KeyString](CbFieldView FieldView) {
+ const IoHash AttachmentHash = FieldView.AsAttachment();
+ IoBuffer Attachment = CidStore.FindChunkByCid(AttachmentHash);
+ CSVWriter << "\r\n"
+ << ProjectId << ", " << OplogId << ", " << LSN << ", " << KeyString << ", " << AttachmentHash.ToHexString()
+ << ", " << gsl::narrow<uint64_t>(Attachment.GetSize());
+ });
+ }
+ else if (Details)
+ {
+ uint64_t AttachmentCount = 0;
+ size_t AttachmentsSize = 0;
+ Op.IterateAttachments([&CidStore, &AttachmentCount, &AttachmentsSize](CbFieldView FieldView) {
+ const IoHash AttachmentHash = FieldView.AsAttachment();
+ AttachmentCount++;
+ IoBuffer Attachment = CidStore.FindChunkByCid(AttachmentHash);
+ AttachmentsSize += Attachment.GetSize();
+ });
+ CSVWriter << "\r\n"
+ << ProjectId << ", " << OplogId << ", " << LSN << ", " << KeyString << ", " << gsl::narrow<uint64_t>(Buffer.GetSize())
+ << ", " << AttachmentCount << ", " << gsl::narrow<uint64_t>(AttachmentsSize);
+ }
+ else
+ {
+ CSVWriter << "\r\n" << ProjectId << ", " << OplogId << ", " << KeyString;
+ }
+ };
+
+ void CbWriteOp(CidStore& CidStore,
+ bool Details,
+ bool OpDetails,
+ bool AttachmentDetails,
+ int LSN,
+ const Oid& Key,
+ CbObject Op,
+ CbObjectWriter& CbWriter)
+ {
+ CbWriter.BeginObject();
+ {
+ SharedBuffer Buffer = Op.GetBuffer();
+ CbWriter.AddObjectId("key", Key);
+ if (Details)
+ {
+ CbWriter.AddInteger("lsn", LSN);
+ CbWriter.AddInteger("size", gsl::narrow<uint64_t>(Buffer.GetSize()));
+ }
+ if (AttachmentDetails)
+ {
+ CbWriter.BeginArray("attachments");
+ Op.IterateAttachments([&CidStore, &CbWriter](CbFieldView FieldView) {
+ const IoHash AttachmentHash = FieldView.AsAttachment();
+ CbWriter.BeginObject();
+ {
+ IoBuffer Attachment = CidStore.FindChunkByCid(AttachmentHash);
+ CbWriter.AddString("cid", AttachmentHash.ToHexString());
+ CbWriter.AddInteger("size", gsl::narrow<uint64_t>(Attachment.GetSize()));
+ }
+ CbWriter.EndObject();
+ });
+ CbWriter.EndArray();
+ }
+ else if (Details)
+ {
+ uint64_t AttachmentCount = 0;
+ size_t AttachmentsSize = 0;
+ Op.IterateAttachments([&CidStore, &AttachmentCount, &AttachmentsSize](CbFieldView FieldView) {
+ const IoHash AttachmentHash = FieldView.AsAttachment();
+ AttachmentCount++;
+ IoBuffer Attachment = CidStore.FindChunkByCid(AttachmentHash);
+ AttachmentsSize += Attachment.GetSize();
+ });
+ if (AttachmentCount > 0)
+ {
+ CbWriter.AddInteger("attachments", AttachmentCount);
+ CbWriter.AddInteger("attachmentssize", gsl::narrow<uint64_t>(AttachmentsSize));
+ }
+ }
+ if (OpDetails)
+ {
+ CbWriter.BeginObject("op");
+ for (const CbFieldView& Field : Op)
+ {
+ if (!Field.HasName())
+ {
+ CbWriter.AddField(Field);
+ continue;
+ }
+ std::string_view FieldName = Field.GetName();
+ CbWriter.AddField(FieldName, Field);
+ }
+ CbWriter.EndObject();
+ }
+ }
+ CbWriter.EndObject();
+ };
+
+ void CbWriteOplogOps(CidStore& CidStore,
+ ProjectStore::Oplog& Oplog,
+ bool Details,
+ bool OpDetails,
+ bool AttachmentDetails,
+ CbObjectWriter& Cbo)
+ {
+ Cbo.BeginArray("ops");
+ {
+ Oplog.IterateOplogWithKey([&Cbo, &CidStore, Details, OpDetails, AttachmentDetails](int LSN, const Oid& Key, CbObject Op) {
+ CbWriteOp(CidStore, Details, OpDetails, AttachmentDetails, LSN, Key, Op, Cbo);
+ });
+ }
+ Cbo.EndArray();
+ }
+
+ void CbWriteOplog(CidStore& CidStore,
+ ProjectStore::Oplog& Oplog,
+ bool Details,
+ bool OpDetails,
+ bool AttachmentDetails,
+ CbObjectWriter& Cbo)
+ {
+ Cbo.BeginObject();
+ {
+ Cbo.AddString("name", Oplog.OplogId());
+ CbWriteOplogOps(CidStore, Oplog, Details, OpDetails, AttachmentDetails, Cbo);
+ }
+ Cbo.EndObject();
+ }
+
+ void CbWriteOplogs(CidStore& CidStore,
+ ProjectStore::Project& Project,
+ std::vector<std::string> OpLogs,
+ bool Details,
+ bool OpDetails,
+ bool AttachmentDetails,
+ CbObjectWriter& Cbo)
+ {
+ Cbo.BeginArray("oplogs");
+ {
+ for (const std::string& OpLogId : OpLogs)
+ {
+ ProjectStore::Oplog* Oplog = Project.OpenOplog(OpLogId);
+ if (Oplog != nullptr)
+ {
+ CbWriteOplog(CidStore, *Oplog, Details, OpDetails, AttachmentDetails, Cbo);
+ }
+ }
+ }
+ Cbo.EndArray();
+ }
+
+ void CbWriteProject(CidStore& CidStore,
+ ProjectStore::Project& Project,
+ std::vector<std::string> OpLogs,
+ bool Details,
+ bool OpDetails,
+ bool AttachmentDetails,
+ CbObjectWriter& Cbo)
+ {
+ Cbo.BeginObject();
+ {
+ Cbo.AddString("name", Project.Identifier);
+ CbWriteOplogs(CidStore, Project, OpLogs, Details, OpDetails, AttachmentDetails, Cbo);
+ }
+ Cbo.EndObject();
+ }
+
} // namespace
//////////////////////////////////////////////////////////////////////////
@@ -681,6 +879,69 @@ ProjectStore::Oplog::IterateOplog(std::function<void(CbObject)>&& Handler)
m_Storage->ReplayLog(Entries, [&](CbObject Op) { Handler(Op); });
}
+void
+ProjectStore::Oplog::IterateOplogWithKey(std::function<void(int, const Oid&, CbObject)>&& Handler)
+{
+ RwLock::SharedLockScope _(m_OplogLock);
+ if (!m_Storage)
+ {
+ return;
+ }
+
+ std::vector<size_t> EntryIndexes;
+ std::vector<OplogEntryAddress> Entries;
+ std::vector<Oid> Keys;
+ std::vector<int> LSNs;
+ Entries.reserve(m_LatestOpMap.size());
+ EntryIndexes.reserve(m_LatestOpMap.size());
+ Keys.reserve(m_LatestOpMap.size());
+ LSNs.reserve(m_LatestOpMap.size());
+
+ for (const auto& Kv : m_LatestOpMap)
+ {
+ const auto AddressEntry = m_OpAddressMap.find(Kv.second);
+ ZEN_ASSERT(AddressEntry != m_OpAddressMap.end());
+
+ Entries.push_back(AddressEntry->second);
+ Keys.push_back(Kv.first);
+ LSNs.push_back(Kv.second);
+ EntryIndexes.push_back(EntryIndexes.size());
+ }
+
+ std::sort(EntryIndexes.begin(), EntryIndexes.end(), [&Entries](const size_t& Lhs, const size_t& Rhs) {
+ const OplogEntryAddress& LhsEntry = Entries[Lhs];
+ const OplogEntryAddress& RhsEntry = Entries[Rhs];
+ return LhsEntry.Offset < RhsEntry.Offset;
+ });
+ std::vector<OplogEntryAddress> SortedEntries;
+ SortedEntries.reserve(EntryIndexes.size());
+ for (size_t Index : EntryIndexes)
+ {
+ SortedEntries.push_back(Entries[Index]);
+ }
+
+ size_t EntryIndex = 0;
+ m_Storage->ReplayLog(SortedEntries, [&](CbObject Op) {
+ Handler(LSNs[EntryIndex], Keys[EntryIndex], Op);
+ EntryIndex++;
+ });
+}
+
+int
+ProjectStore::Oplog::GetOpIndexByKey(const Oid& Key)
+{
+ RwLock::SharedLockScope _(m_OplogLock);
+ if (!m_Storage)
+ {
+ return {};
+ }
+ if (const auto LatestOp = m_LatestOpMap.find(Key); LatestOp != m_LatestOpMap.end())
+ {
+ return LatestOp->second;
+ }
+ return -1;
+}
+
std::optional<CbObject>
ProjectStore::Oplog::GetOpByKey(const Oid& Key)
{
@@ -2240,14 +2501,17 @@ ProjectStore::Import(ProjectStore::Project& Project, ProjectStore::Oplog& Oplog,
//////////////////////////////////////////////////////////////////////////
-HttpProjectService::HttpProjectService(CidStore& Store, ProjectStore* Projects, AuthMgr& AuthMgr)
+HttpProjectService::HttpProjectService(CidStore& Store, ProjectStore* Projects, HttpStatsService& StatsService, AuthMgr& AuthMgr)
: m_Log(logging::Get("project"))
, m_CidStore(Store)
, m_ProjectStore(Projects)
+, m_StatsService(StatsService)
, m_AuthMgr(AuthMgr)
{
using namespace std::literals;
+ m_StatsService.RegisterHandler("prj", *this);
+
m_Router.AddPattern("project", "([[:alnum:]_.]+)");
m_Router.AddPattern("log", "([[:alnum:]_.]+)");
m_Router.AddPattern("op", "([[:digit:]]+?)");
@@ -3174,10 +3438,231 @@ HttpProjectService::HttpProjectService(CidStore& Store, ProjectStore* Projects,
m_ProjectStore->Rpc(HttpReq, ProjectId, OplogId, std::move(Payload), m_AuthMgr);
},
HttpVerb::kPost);
+
+ m_Router.RegisterRoute(
+ "details\\$",
+ [this](HttpRouterRequest& Req) {
+ HttpServerRequest& HttpReq = Req.ServerRequest();
+
+ HttpServerRequest::QueryParams Params = HttpReq.GetQueryParams();
+ bool CSV = Params.GetValue("csv") == "true";
+ bool Details = Params.GetValue("details") == "true";
+ bool OpDetails = Params.GetValue("opdetails") == "true";
+ bool AttachmentDetails = Params.GetValue("attachmentdetails") == "true";
+
+ if (CSV)
+ {
+ ExtendableStringBuilder<4096> CSVWriter;
+ CSVHeader(Details, AttachmentDetails, CSVWriter);
+
+ m_ProjectStore->IterateProjects([&](ProjectStore::Project& Project) {
+ Project.IterateOplogs([&](ProjectStore::Oplog& Oplog) {
+ Oplog.IterateOplogWithKey(
+ [this, &Project, &Oplog, &CSVWriter, Details, AttachmentDetails](int LSN, const Oid& Key, CbObject Op) {
+ CSVWriteOp(m_CidStore,
+ Project.Identifier,
+ Oplog.OplogId(),
+ Details,
+ AttachmentDetails,
+ LSN,
+ Key,
+ Op,
+ CSVWriter);
+ });
+ });
+ });
+
+ HttpReq.WriteResponse(HttpResponseCode::OK, HttpContentType::kText, CSVWriter.ToView());
+ }
+ else
+ {
+ CbObjectWriter Cbo;
+ Cbo.BeginArray("projects");
+ {
+ m_ProjectStore->DiscoverProjects();
+
+ m_ProjectStore->IterateProjects([&](ProjectStore::Project& Project) {
+ std::vector<std::string> OpLogs = Project.ScanForOplogs();
+ CbWriteProject(m_CidStore, Project, OpLogs, Details, OpDetails, AttachmentDetails, Cbo);
+ });
+ }
+ Cbo.EndArray();
+ HttpReq.WriteResponse(HttpResponseCode::OK, Cbo.Save());
+ }
+ },
+ HttpVerb::kGet);
+
+ m_Router.RegisterRoute(
+ "details\\$/{project}",
+ [this](HttpRouterRequest& Req) {
+ HttpServerRequest& HttpReq = Req.ServerRequest();
+ const auto& ProjectId = Req.GetCapture(1);
+
+ HttpServerRequest::QueryParams Params = HttpReq.GetQueryParams();
+ bool CSV = Params.GetValue("csv") == "true";
+ bool Details = Params.GetValue("details") == "true";
+ bool OpDetails = Params.GetValue("opdetails") == "true";
+ bool AttachmentDetails = Params.GetValue("attachmentdetails") == "true";
+
+ Ref<ProjectStore::Project> FoundProject = m_ProjectStore->OpenProject(ProjectId);
+ if (!FoundProject)
+ {
+ return HttpReq.WriteResponse(HttpResponseCode::NotFound);
+ }
+ ProjectStore::Project& Project = *FoundProject.Get();
+ if (CSV)
+ {
+ ExtendableStringBuilder<4096> CSVWriter;
+ CSVHeader(Details, AttachmentDetails, CSVWriter);
+
+ FoundProject->IterateOplogs([&](ProjectStore::Oplog& Oplog) {
+ Oplog.IterateOplogWithKey([this, &Project, &Oplog, &CSVWriter, Details, AttachmentDetails](int LSN,
+ const Oid& Key,
+ CbObject Op) {
+ CSVWriteOp(m_CidStore, Project.Identifier, Oplog.OplogId(), Details, AttachmentDetails, LSN, Key, Op, CSVWriter);
+ });
+ });
+ HttpReq.WriteResponse(HttpResponseCode::OK, HttpContentType::kText, CSVWriter.ToView());
+ }
+ else
+ {
+ CbObjectWriter Cbo;
+ std::vector<std::string> OpLogs = FoundProject->ScanForOplogs();
+ Cbo.BeginArray("projects");
+ {
+ CbWriteProject(m_CidStore, Project, OpLogs, Details, OpDetails, AttachmentDetails, Cbo);
+ }
+ Cbo.EndArray();
+ HttpReq.WriteResponse(HttpResponseCode::OK, Cbo.Save());
+ }
+ },
+ HttpVerb::kGet);
+
+ m_Router.RegisterRoute(
+ "details\\$/{project}/{log}",
+ [this](HttpRouterRequest& Req) {
+ HttpServerRequest& HttpReq = Req.ServerRequest();
+ const auto& ProjectId = Req.GetCapture(1);
+ const auto& OplogId = Req.GetCapture(2);
+
+ HttpServerRequest::QueryParams Params = HttpReq.GetQueryParams();
+ bool CSV = Params.GetValue("csv") == "true";
+ bool Details = Params.GetValue("details") == "true";
+ bool OpDetails = Params.GetValue("opdetails") == "true";
+ bool AttachmentDetails = Params.GetValue("attachmentdetails") == "true";
+
+ Ref<ProjectStore::Project> FoundProject = m_ProjectStore->OpenProject(ProjectId);
+ if (!FoundProject)
+ {
+ return HttpReq.WriteResponse(HttpResponseCode::NotFound);
+ }
+ ProjectStore::Oplog* FoundLog = FoundProject->OpenOplog(OplogId);
+
+ if (!FoundLog)
+ {
+ return HttpReq.WriteResponse(HttpResponseCode::NotFound);
+ }
+
+ ProjectStore::Project& Project = *FoundProject.Get();
+ ProjectStore::Oplog& Oplog = *FoundLog;
+ if (CSV)
+ {
+ ExtendableStringBuilder<4096> CSVWriter;
+ CSVHeader(Details, AttachmentDetails, CSVWriter);
+
+ Oplog.IterateOplogWithKey(
+ [this, &Project, &Oplog, &CSVWriter, Details, AttachmentDetails](int LSN, const Oid& Key, CbObject Op) {
+ CSVWriteOp(m_CidStore, Project.Identifier, Oplog.OplogId(), Details, AttachmentDetails, LSN, Key, Op, CSVWriter);
+ });
+ HttpReq.WriteResponse(HttpResponseCode::OK, HttpContentType::kText, CSVWriter.ToView());
+ }
+ else
+ {
+ CbObjectWriter Cbo;
+ Cbo.BeginArray("oplogs");
+ {
+ CbWriteOplog(m_CidStore, Oplog, Details, OpDetails, AttachmentDetails, Cbo);
+ }
+ Cbo.EndArray();
+ HttpReq.WriteResponse(HttpResponseCode::OK, Cbo.Save());
+ }
+ },
+ HttpVerb::kGet);
+
+ m_Router.RegisterRoute(
+ "details\\$/{project}/{log}/{chunk}",
+ [this](HttpRouterRequest& Req) {
+ HttpServerRequest& HttpReq = Req.ServerRequest();
+ const auto& ProjectId = Req.GetCapture(1);
+ const auto& OplogId = Req.GetCapture(2);
+ const auto& ChunkId = Req.GetCapture(3);
+
+ HttpServerRequest::QueryParams Params = HttpReq.GetQueryParams();
+ bool CSV = Params.GetValue("csv") == "true";
+ bool Details = Params.GetValue("details") == "true";
+ bool OpDetails = Params.GetValue("opdetails") == "true";
+ bool AttachmentDetails = Params.GetValue("attachmentdetails") == "true";
+
+ Ref<ProjectStore::Project> FoundProject = m_ProjectStore->OpenProject(ProjectId);
+ if (!FoundProject)
+ {
+ return HttpReq.WriteResponse(HttpResponseCode::NotFound);
+ }
+ ProjectStore::Oplog* FoundLog = FoundProject->OpenOplog(OplogId);
+
+ if (!FoundLog)
+ {
+ return HttpReq.WriteResponse(HttpResponseCode::NotFound);
+ }
+
+ if (ChunkId.size() != 2 * sizeof(Oid::OidBits))
+ {
+ return HttpReq.WriteResponse(
+ HttpResponseCode::BadRequest,
+ HttpContentType::kText,
+ fmt::format("Chunk info request for invalid chunk id '{}/{}'/'{}'", ProjectId, OplogId, ChunkId));
+ }
+
+ const Oid ObjId = Oid::FromHexString(ChunkId);
+ ProjectStore::Project& Project = *FoundProject.Get();
+ ProjectStore::Oplog& Oplog = *FoundLog;
+
+ int LSN = Oplog.GetOpIndexByKey(ObjId);
+ if (LSN == -1)
+ {
+ return HttpReq.WriteResponse(HttpResponseCode::NotFound);
+ }
+ std::optional<CbObject> Op = Oplog.GetOpByIndex(LSN);
+ if (!Op.has_value())
+ {
+ return HttpReq.WriteResponse(HttpResponseCode::NotFound);
+ }
+
+ if (CSV)
+ {
+ ExtendableStringBuilder<4096> CSVWriter;
+ CSVHeader(Details, AttachmentDetails, CSVWriter);
+
+ CSVWriteOp(m_CidStore, Project.Identifier, Oplog.OplogId(), Details, AttachmentDetails, LSN, ObjId, Op.value(), CSVWriter);
+ HttpReq.WriteResponse(HttpResponseCode::OK, HttpContentType::kText, CSVWriter.ToView());
+ }
+ else
+ {
+ CbObjectWriter Cbo;
+ Cbo.BeginArray("ops");
+ {
+ CbWriteOp(m_CidStore, Details, OpDetails, AttachmentDetails, LSN, ObjId, Op.value(), Cbo);
+ }
+ Cbo.EndArray();
+ HttpReq.WriteResponse(HttpResponseCode::OK, Cbo.Save());
+ }
+ },
+ HttpVerb::kGet);
}
HttpProjectService::~HttpProjectService()
{
+ m_StatsService.UnregisterHandler("prj", *this);
}
const char*
@@ -3195,6 +3680,40 @@ HttpProjectService::HandleRequest(HttpServerRequest& Request)
}
}
+void
+HttpProjectService::HandleStatsRequest(HttpServerRequest& HttpReq)
+{
+ const GcStorageSize StoreSize = m_ProjectStore->StorageSize();
+ const CidStoreSize CidSize = m_CidStore.TotalSize();
+
+ CbObjectWriter Cbo;
+ Cbo.BeginObject("store");
+ {
+ Cbo.BeginObject("size");
+ {
+ Cbo << "disk" << StoreSize.DiskSize;
+ Cbo << "memory" << StoreSize.MemorySize;
+ }
+ Cbo.EndObject();
+ }
+ Cbo.EndObject();
+
+ Cbo.BeginObject("cid");
+ {
+ Cbo.BeginObject("size");
+ {
+ Cbo << "tiny" << CidSize.TinySize;
+ Cbo << "small" << CidSize.SmallSize;
+ Cbo << "large" << CidSize.LargeSize;
+ Cbo << "total" << CidSize.TotalSize;
+ }
+ Cbo.EndObject();
+ }
+ Cbo.EndObject();
+
+ return HttpReq.WriteResponse(HttpResponseCode::OK, Cbo.Save());
+}
+
//////////////////////////////////////////////////////////////////////////
#if ZEN_WITH_TESTS
diff --git a/zenserver/projectstore/projectstore.h b/zenserver/projectstore/projectstore.h
index 928a74f59..e4f664b85 100644
--- a/zenserver/projectstore/projectstore.h
+++ b/zenserver/projectstore/projectstore.h
@@ -7,6 +7,8 @@
#include <zenhttp/httpserver.h>
#include <zenstore/gc.h>
+#include "monitoring/httpstats.h"
+
ZEN_THIRD_PARTY_INCLUDES_START
#include <tsl/robin_map.h>
ZEN_THIRD_PARTY_INCLUDES_END
@@ -80,8 +82,10 @@ public:
void IterateFileMap(std::function<void(const Oid&, const std::string_view& ServerPath, const std::string_view& ClientPath)>&& Fn);
void IterateOplog(std::function<void(CbObject)>&& Fn);
+ void IterateOplogWithKey(std::function<void(int, const Oid&, CbObject)>&& Fn);
std::optional<CbObject> GetOpByKey(const Oid& Key);
std::optional<CbObject> GetOpByIndex(int Index);
+ int GetOpIndexByKey(const Oid& Key);
IoBuffer FindChunk(Oid ChunkId);
@@ -341,15 +345,17 @@ private:
// refs:
//
-class HttpProjectService : public HttpService
+class HttpProjectService : public HttpService, public IHttpStatsProvider
{
public:
- HttpProjectService(CidStore& Store, ProjectStore* InProjectStore, AuthMgr& AuthMgr);
+ HttpProjectService(CidStore& Store, ProjectStore* InProjectStore, HttpStatsService& StatsService, AuthMgr& AuthMgr);
~HttpProjectService();
virtual const char* BaseUri() const override;
virtual void HandleRequest(HttpServerRequest& Request) override;
+ virtual void HandleStatsRequest(HttpServerRequest& Request) override;
+
private:
inline spdlog::logger& Log() { return m_Log; }
@@ -357,6 +363,7 @@ private:
CidStore& m_CidStore;
HttpRequestRouter m_Router;
Ref<ProjectStore> m_ProjectStore;
+ HttpStatsService& m_StatsService;
AuthMgr& m_AuthMgr;
};
diff --git a/zenserver/zenserver.cpp b/zenserver/zenserver.cpp
index e3b364ea1..749f762f0 100644
--- a/zenserver/zenserver.cpp
+++ b/zenserver/zenserver.cpp
@@ -263,7 +263,7 @@ public:
ZEN_INFO("instantiating project service");
m_ProjectStore = new zen::ProjectStore(*m_CidStore, m_DataRoot / "projects", m_GcManager);
- m_HttpProjectService.reset(new zen::HttpProjectService{*m_CidStore, m_ProjectStore, *m_AuthMgr});
+ m_HttpProjectService.reset(new zen::HttpProjectService{*m_CidStore, m_ProjectStore, m_StatsService, *m_AuthMgr});
#if ZEN_WITH_COMPUTE_SERVICES
if (ServerOptions.ComputeServiceEnabled)
diff --git a/zenstore/blockstore.cpp b/zenstore/blockstore.cpp
index d743c431f..0e1c5b2c7 100644
--- a/zenstore/blockstore.cpp
+++ b/zenstore/blockstore.cpp
@@ -280,7 +280,7 @@ BlockStore::GetReclaimSnapshotState()
}
IoBuffer
-BlockStore::TryGetChunk(const BlockStoreLocation& Location)
+BlockStore::TryGetChunk(const BlockStoreLocation& Location) const
{
RwLock::SharedLockScope InsertLock(m_InsertLock);
if (auto BlockIt = m_ChunkBlocks.find(Location.BlockIndex); BlockIt != m_ChunkBlocks.end())
diff --git a/zenstore/include/zenstore/blockstore.h b/zenstore/include/zenstore/blockstore.h
index 5ef2d4694..857ccae38 100644
--- a/zenstore/include/zenstore/blockstore.h
+++ b/zenstore/include/zenstore/blockstore.h
@@ -132,7 +132,7 @@ public:
void WriteChunk(const void* Data, uint64_t Size, uint64_t Alignment, const WriteChunkCallback& Callback);
- IoBuffer TryGetChunk(const BlockStoreLocation& Location);
+ IoBuffer TryGetChunk(const BlockStoreLocation& Location) const;
void Flush();
ReclaimSnapshotState GetReclaimSnapshotState();
@@ -157,7 +157,7 @@ public:
private:
std::unordered_map<uint32_t, Ref<BlockStoreFile>> m_ChunkBlocks;
- RwLock m_InsertLock; // used to serialize inserts
+ mutable RwLock m_InsertLock; // used to serialize inserts
Ref<BlockStoreFile> m_WriteBlock;
std::uint64_t m_CurrentInsertOffset = 0;
std::atomic_uint32_t m_WriteBlockIndex{};