aboutsummaryrefslogtreecommitdiff
path: root/src/zenserver/frontend/html/indexer
diff options
context:
space:
mode:
authorMartin Ridgers <[email protected]>2024-11-26 08:53:06 +0100
committerGitHub Enterprise <[email protected]>2024-11-26 08:53:06 +0100
commit642ef2d3606b6a89d0750a88d0f40585965b989d (patch)
tree253f58083014ee902016e42cfe7bda93ea778200 /src/zenserver/frontend/html/indexer
parent5.5.14-pre1 (diff)
downloadzen-642ef2d3606b6a89d0750a88d0f40585965b989d.tar.xz
zen-642ef2d3606b6a89d0750a88d0f40585965b989d.zip
Dashboard: display package data sizes in oplog entry and tree views. (#232)
* Wrong divisor for friendly giga-values * We want Explorer style for kilo/kibi units; round up * var -> const - zero idea if this matters * Include sum of an entry's package data sizes in index * Method to enurate all properties of a loaded oplog index * Include bulkdata size in an oplog index * Found a space that was missing * Show package data sizes when viewing an oplog entry * Navigating a component tree would error out at the end of the chain * Parameterise friendly rounding * Added size and rawsize columns to oplog tree view * Sort of parameterised indexer's worker count and page size * Right-align size columns on entry view page * Updated frontend .zip archive * A changelog update
Diffstat (limited to 'src/zenserver/frontend/html/indexer')
-rw-r--r--src/zenserver/frontend/html/indexer/indexer.js12
-rw-r--r--src/zenserver/frontend/html/indexer/worker.js45
2 files changed, 46 insertions, 11 deletions
diff --git a/src/zenserver/frontend/html/indexer/indexer.js b/src/zenserver/frontend/html/indexer/indexer.js
index 5bbb7c352..4412e3a57 100644
--- a/src/zenserver/frontend/html/indexer/indexer.js
+++ b/src/zenserver/frontend/html/indexer/indexer.js
@@ -55,6 +55,13 @@ class Indexer
for (const [_, name] of page)
yield name;
}
+
+ *enum_all()
+ {
+ for (const page of this._pages)
+ for (const [_, name, size, raw_size] of page)
+ yield [name, size|0, raw_size|0];
+ }
}
@@ -90,14 +97,13 @@ async function save(progress_cb, oplog_info, pages)
}
////////////////////////////////////////////////////////////////////////////////
-async function build(progress_cb, oplog_info)
+async function build(progress_cb, oplog_info, max_workers=6, page_size=48 << 10)
{
const project_id = oplog_info["project"];
const oplog = oplog_info["id"];
const init_msg = Message.create(Message.Init, project_id, oplog);
- const worker_n = Math.min(navigator.hardwareConcurrency / 2, 6);
- const page_size = 48 << 10;
+ const worker_n = Math.min(navigator.hardwareConcurrency / 2, max_workers);
const stride = page_size * worker_n;
const end = oplog_info["opcount"];
var entry_count = 0;
diff --git a/src/zenserver/frontend/html/indexer/worker.js b/src/zenserver/frontend/html/indexer/worker.js
index b8183cc6f..25c8d7671 100644
--- a/src/zenserver/frontend/html/indexer/worker.js
+++ b/src/zenserver/frontend/html/indexer/worker.js
@@ -31,7 +31,7 @@ async function map_id_to_key(project_id, oplog, start, end, page_size, stride)
.resource(uri)
.param("start", index)
.param("count", page_size)
- .param("fieldfilter", "packagedata,key")
+ .param("fieldfilter", "packagedata,bulkdata,key")
.cbo()
const entry_count = Math.min(page_size, -(index - end));
@@ -66,34 +66,63 @@ async function map_id_to_key(project_id, oplog, start, end, page_size, stride)
var key = undefined;
var pkg_data = undefined;
+ var bulk_data = undefined;
for (const field of entry)
{
- if (field.is_named("key")) key = field;
+ if (field.is_named("key")) key = field;
else if (field.is_named("packagedata")) pkg_data = field;
+ else if (field.is_named("bulkdata")) bulk_data = field;
}
if (key == undefined || pkg_data == undefined)
continue;
var id = 0n;
- for (var item of pkg_data.as_array())
+ var size = 0;
+ var raw_size = 0;
+ for (const item of pkg_data.as_array())
{
- var pkg_id = item.as_object().find("id");
- if (pkg_id == undefined)
+ var found = 0, pkg_id;
+ for (const field of item.as_object())
+ {
+ if (!id && field.is_named("id")) pkg_id = field.as_value();
+ else if (field.is_named("size")) size += field.as_value();
+ else if (field.is_named("rawsize")) raw_size += field.as_value();
+ else continue;
+ if (found++ >= 3)
+ break;
+ }
+
+ if (pkg_id === undefined)
continue;
- pkg_id = pkg_id.as_value().subarray(0, 8);
+ pkg_id = pkg_id.subarray(0, 8);
for (var i = 7; i >= 0; --i)
{
id <<= 8n;
id |= BigInt(pkg_id[i]);
}
- break;
+ }
+
+ if (bulk_data)
+ {
+ for (const item of bulk_data.as_array())
+ {
+ var found = 0;
+ for (const field of item.as_object())
+ {
+ if (field.is_named("size")) size += field.as_value();
+ else if (field.is_named("rawsize")) raw_size += field.as_value();
+ else continue;
+ if (found++ >= 2)
+ break;
+ }
+ }
}
if (id == 0)
continue;
- result[count] = [id, key.as_value()];
+ result[count] = [id, key.as_value(), size, raw_size];
count++;
}