aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorStefan Boberg <[email protected]>2026-04-11 12:46:01 +0200
committerGitHub Enterprise <[email protected]>2026-04-11 12:46:01 +0200
commitdc742b88d908d23e0c5c5d1d95994637658db2b2 (patch)
tree6fb25b88b64c92c503c239cf3cef497ed18ee172
parentReduce short-lived heap allocations in zenhttp (diff)
parenthub deprovision all (#938) (diff)
downloadzen-sb/reduce-allocs.tar.xz
zen-sb/reduce-allocs.zip
Merge branch 'main' into sb/reduce-allocssb/reduce-allocs
-rw-r--r--CHANGELOG.md7
-rw-r--r--src/zenremotestore/builds/buildstorageoperations.cpp5
-rw-r--r--src/zenserver-test/hub-tests.cpp33
-rw-r--r--src/zenserver/frontend/html/pages/cache.js5
-rw-r--r--src/zenserver/frontend/html/pages/hub.js66
-rw-r--r--src/zenserver/frontend/html/pages/projects.js5
-rw-r--r--src/zenserver/frontend/html/pages/start.js10
-rw-r--r--src/zenserver/frontend/html/util/widgets.js64
-rw-r--r--src/zenserver/frontend/html/zen.css47
-rw-r--r--src/zenserver/hub/httphubservice.cpp80
-rw-r--r--src/zenserver/hub/httphubservice.h1
-rw-r--r--thirdparty/VERSIONS.md1
-rw-r--r--thirdparty/rpmalloc/rpmalloc.c221
-rw-r--r--thirdparty/rpmalloc/rpmalloc.h84
-rw-r--r--thirdparty/xmake.lua6
15 files changed, 528 insertions, 107 deletions
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 2beee79d4..01f340f59 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -7,6 +7,13 @@
- Single-byte ranges (`bytes=N-N`) are now correctly accepted (were previously rejected)
- Range byte positions widened from 32-bit to 64-bit; RFC 7233 imposes no size limit on byte range values
- Build store binary GET requests with a Range header now return 206 Partial Content with `Content-Range` (previously returned 200 OK without it)
+- Improvement: Updated rpmalloc to develop branch commit 262c698d7019 (2026-04-10), which fixes memory ordering on weak architectures and avoids assert on mmap failure with callback
+- Improvement: Increased rpmalloc page decommit thresholds to reduce commit/decommit churn under high allocation turnover
+- Improvement: Disk full error message for `builds download` now shows human-readable sizes and available free space
+- Improvement: Dashboard paginated lists now include a search input that jumps to the page containing the first match and highlights the row
+- Improvement: Dashboard paginated lists show a loading indicator while fetching data
+- Improvement: Hub dashboard navigates to and highlights newly provisioned instances
+- Feature: Hub bulk deprovision endpoint (`POST /hub/deprovision`) tears down all provisioned and hibernated modules in a single request
- Bugfix: Added logic to shared memory instance state management to ensure unclean shutdown followed by restart with identical pid doesn't lead to errors. Particularly likely to happen when running on k8s
## 5.8.3
diff --git a/src/zenremotestore/builds/buildstorageoperations.cpp b/src/zenremotestore/builds/buildstorageoperations.cpp
index c8cf3212c..1f8b96cc4 100644
--- a/src/zenremotestore/builds/buildstorageoperations.cpp
+++ b/src/zenremotestore/builds/buildstorageoperations.cpp
@@ -3036,7 +3036,10 @@ BuildsOperationUpdateFolder::CheckRequiredDiskSpace(const tsl::robin_map<std::st
if (Space.Free < (RequiredSpace + 16u * 1024u * 1024u))
{
throw std::runtime_error(
- fmt::format("Not enough free space for target path '{}', {} of free space is needed", m_Path, RequiredSpace));
+ fmt::format("Not enough free space for target path '{}', {} of free space is needed but only {} is available",
+ m_Path,
+ NiceBytes(RequiredSpace),
+ NiceBytes(Space.Free)));
}
}
diff --git a/src/zenserver-test/hub-tests.cpp b/src/zenserver-test/hub-tests.cpp
index 487e22b4b..35a840e5d 100644
--- a/src/zenserver-test/hub-tests.cpp
+++ b/src/zenserver-test/hub-tests.cpp
@@ -329,17 +329,36 @@ TEST_CASE("hub.lifecycle.children")
CHECK_EQ(Result.AsText(), "GhijklmNop"sv);
}
- Result = Client.Post("modules/abc/deprovision");
+ // Deprovision all modules at once
+ Result = Client.Post("deprovision");
REQUIRE(Result);
+ CHECK_EQ(Result.StatusCode, HttpResponseCode::Accepted);
+ {
+ CbObject Body = Result.AsObject();
+ CbArrayView AcceptedArr = Body["Accepted"].AsArrayView();
+ CHECK_EQ(AcceptedArr.Num(), 2u);
+ bool FoundAbc = false;
+ bool FoundDef = false;
+ for (CbFieldView F : AcceptedArr)
+ {
+ if (F.AsString() == "abc"sv)
+ {
+ FoundAbc = true;
+ }
+ else if (F.AsString() == "def"sv)
+ {
+ FoundDef = true;
+ }
+ }
+ CHECK(FoundAbc);
+ CHECK(FoundDef);
+ }
REQUIRE(WaitForModuleGone(Client, "abc"));
+ REQUIRE(WaitForModuleGone(Client, "def"));
{
HttpClient ModClient(fmt::format("http://localhost:{}", AbcPort), kFastTimeout);
CHECK(WaitForPortUnreachable(ModClient));
}
-
- Result = Client.Post("modules/def/deprovision");
- REQUIRE(Result);
- REQUIRE(WaitForModuleGone(Client, "def"));
{
HttpClient ModClient(fmt::format("http://localhost:{}", DefPort), kFastTimeout);
CHECK(WaitForPortUnreachable(ModClient));
@@ -349,6 +368,10 @@ TEST_CASE("hub.lifecycle.children")
Result = Client.Get("status");
REQUIRE(Result);
CHECK_EQ(Result.AsObject()["modules"].AsArrayView().Num(), 0u);
+
+ // Deprovision-all with no modules
+ Result = Client.Post("deprovision");
+ CHECK(Result);
}
static bool
diff --git a/src/zenserver/frontend/html/pages/cache.js b/src/zenserver/frontend/html/pages/cache.js
index 93059b81c..c6567f0be 100644
--- a/src/zenserver/frontend/html/pages/cache.js
+++ b/src/zenserver/frontend/html/pages/cache.js
@@ -56,7 +56,8 @@ export class Page extends ZenPage
this._cache_table = section.add_widget(Table, columns, Table.Flag_FitLeft|Table.Flag_PackRight|Table.Flag_AlignNumeric);
- this._cache_pager = new Pager(section, 25, () => this._render_cache_page());
+ this._cache_pager = new Pager(section, 25, () => this._render_cache_page(),
+ Pager.make_search_fn(() => this._cache_data, item => item.namespace));
const cache_drop_link = document.createElement("span");
cache_drop_link.className = "dropall zen_action";
cache_drop_link.style.position = "static";
@@ -64,6 +65,7 @@ export class Page extends ZenPage
cache_drop_link.addEventListener("click", () => this.drop_all());
this._cache_pager.prepend(cache_drop_link);
+ const loading = Pager.loading(section);
const zcache_info = await new Fetcher().resource("/z$/").json();
const namespaces = zcache_info["Namespaces"] || [];
const results = await Promise.allSettled(
@@ -75,6 +77,7 @@ export class Page extends ZenPage
.sort((a, b) => a.namespace.localeCompare(b.namespace));
this._cache_pager.set_total(this._cache_data.length);
this._render_cache_page();
+ loading.remove();
// Namespace detail area (inside namespaces section so it collapses together)
this._namespace_host = section;
diff --git a/src/zenserver/frontend/html/pages/hub.js b/src/zenserver/frontend/html/pages/hub.js
index 7ae1deb5c..3cbfe6092 100644
--- a/src/zenserver/frontend/html/pages/hub.js
+++ b/src/zenserver/frontend/html/pages/hub.js
@@ -6,6 +6,7 @@ import { ZenPage } from "./page.js"
import { Fetcher } from "../util/fetcher.js"
import { Friendly } from "../util/friendly.js"
import { Modal } from "../util/modal.js"
+import { flash_highlight } from "../util/widgets.js"
////////////////////////////////////////////////////////////////////////////////
const STABLE_STATES = new Set(["provisioned", "hibernated", "crashed"]);
@@ -159,8 +160,36 @@ export class Page extends ZenPage
this._btn_next.addEventListener("click", () => this._go_page(this._page + 1));
this._btn_provision = _make_bulk_btn("+", "Provision", () => this._show_provision_modal());
this._btn_obliterate = _make_bulk_btn("\uD83D\uDD25", "Obliterate", () => this._show_obliterate_modal());
+ this._search_input = document.createElement("input");
+ this._search_input.type = "text";
+ this._search_input.className = "module-pager-search";
+ this._search_input.placeholder = "Search module\u2026";
+ this._search_input.addEventListener("keydown", (e) =>
+ {
+ if (e.key === "Enter")
+ {
+ const term = this._search_input.value.trim().toLowerCase();
+ if (!term) { return; }
+ const idx = this._modules_data.findIndex(m =>
+ (m.moduleId || "").toLowerCase().includes(term)
+ );
+ if (idx >= 0)
+ {
+ const id = this._modules_data[idx].moduleId;
+ this._navigate_to_module(id);
+ this._flash_module(id);
+ }
+ else
+ {
+ this._search_input.style.outline = "2px solid var(--theme_fail)";
+ setTimeout(() => { this._search_input.style.outline = ""; }, 1000);
+ }
+ }
+ });
+
pager.appendChild(this._btn_provision);
pager.appendChild(this._btn_obliterate);
+ pager.appendChild(this._search_input);
pager.appendChild(this._btn_prev);
pager.appendChild(this._pager_label);
pager.appendChild(this._btn_next);
@@ -173,8 +202,11 @@ export class Page extends ZenPage
this._row_cache = new Map(); // moduleId → row refs, for in-place DOM updates
this._updating = false;
this._page = 0;
- this._page_size = 50;
+ this._page_size = 25;
this._expanded = new Set(); // moduleIds with open metrics panel
+ this._pending_highlight = null; // moduleId to navigate+flash after next poll
+ this._pending_highlight_timer = null;
+ this._loading = mod_section.tag().classify("pager-loading").text("Loading\u2026").inner();
await this._update();
this._poll_timer = setInterval(() => this._update(), 2000);
@@ -193,6 +225,15 @@ export class Page extends ZenPage
this._render_capacity(stats);
this._render_modules(status);
+ if (this._loading) { this._loading.remove(); this._loading = null; }
+ if (this._pending_highlight && this._module_map.has(this._pending_highlight))
+ {
+ const id = this._pending_highlight;
+ this._pending_highlight = null;
+ clearTimeout(this._pending_highlight_timer);
+ this._navigate_to_module(id);
+ this._flash_module(id);
+ }
}
catch (e) { /* service unavailable */ }
finally { this._updating = false; }
@@ -844,14 +885,19 @@ export class Page extends ZenPage
submit_label: "Provision",
on_submit: async (moduleId) => {
const resp = await fetch(`/hub/modules/${encodeURIComponent(moduleId)}/provision`, { method: "POST" });
- if (resp.ok)
+ if (!resp.ok)
{
- this._navigate_to_module(moduleId);
- return true;
+ const msg = await resp.text();
+ error_div.textContent = msg || ("HTTP " + resp.status);
+ return false;
}
- const msg = await resp.text();
- error_div.textContent = msg || ("HTTP " + resp.status);
- return false;
+ // Endpoint returns compact binary (CbObjectWriter), not text
+ if (resp.status === 200 || resp.status === 202)
+ {
+ this._pending_highlight = moduleId;
+ this._pending_highlight_timer = setTimeout(() => { this._pending_highlight = null; }, 5000);
+ }
+ return true;
}
});
}
@@ -885,4 +931,10 @@ export class Page extends ZenPage
}
}
+ _flash_module(id)
+ {
+ const cached = this._row_cache.get(id);
+ if (cached) { flash_highlight(cached.tr); }
+ }
+
}
diff --git a/src/zenserver/frontend/html/pages/projects.js b/src/zenserver/frontend/html/pages/projects.js
index 52d5dbb88..e613086a9 100644
--- a/src/zenserver/frontend/html/pages/projects.js
+++ b/src/zenserver/frontend/html/pages/projects.js
@@ -49,7 +49,8 @@ export class Page extends ZenPage
this._project_table = section.add_widget(Table, columns, Table.Flag_FitLeft|Table.Flag_PackRight|Table.Flag_Sortable|Table.Flag_AlignNumeric);
- this._project_pager = new Pager(section, 25, () => this._render_projects_page());
+ this._project_pager = new Pager(section, 25, () => this._render_projects_page(),
+ Pager.make_search_fn(() => this._projects_data, p => p.Id));
const drop_link = document.createElement("span");
drop_link.className = "dropall zen_action";
drop_link.style.position = "static";
@@ -57,10 +58,12 @@ export class Page extends ZenPage
drop_link.addEventListener("click", () => this.drop_all());
this._project_pager.prepend(drop_link);
+ const loading = Pager.loading(section);
this._projects_data = await new Fetcher().resource("/prj/list").json();
this._projects_data.sort((a, b) => a.Id.localeCompare(b.Id));
this._project_pager.set_total(this._projects_data.length);
this._render_projects_page();
+ loading.remove();
// Project detail area (inside projects section so it collapses together)
this._project_host = section;
diff --git a/src/zenserver/frontend/html/pages/start.js b/src/zenserver/frontend/html/pages/start.js
index 14ec4bd4a..9a3eb6de3 100644
--- a/src/zenserver/frontend/html/pages/start.js
+++ b/src/zenserver/frontend/html/pages/start.js
@@ -62,7 +62,8 @@ export class Page extends ZenPage
];
this._project_table = section.add_widget(Table, columns);
- this._project_pager = new Pager(section, 25, () => this._render_projects_page());
+ this._project_pager = new Pager(section, 25, () => this._render_projects_page(),
+ Pager.make_search_fn(() => this._projects_data, p => p.Id));
const drop_link = document.createElement("span");
drop_link.className = "dropall zen_action";
drop_link.style.position = "static";
@@ -70,10 +71,12 @@ export class Page extends ZenPage
drop_link.addEventListener("click", () => this.drop_all("projects"));
this._project_pager.prepend(drop_link);
+ const prj_loading = Pager.loading(section);
this._projects_data = await new Fetcher().resource("/prj/list").json();
this._projects_data.sort((a, b) => a.Id.localeCompare(b.Id));
this._project_pager.set_total(this._projects_data.length);
this._render_projects_page();
+ prj_loading.remove();
}
// cache
@@ -92,7 +95,8 @@ export class Page extends ZenPage
];
this._cache_table = section.add_widget(Table, columns, Table.Flag_FitLeft|Table.Flag_PackRight);
- this._cache_pager = new Pager(section, 25, () => this._render_cache_page());
+ this._cache_pager = new Pager(section, 25, () => this._render_cache_page(),
+ Pager.make_search_fn(() => this._cache_data, item => item.namespace));
const cache_drop_link = document.createElement("span");
cache_drop_link.className = "dropall zen_action";
cache_drop_link.style.position = "static";
@@ -100,6 +104,7 @@ export class Page extends ZenPage
cache_drop_link.addEventListener("click", () => this.drop_all("z$"));
this._cache_pager.prepend(cache_drop_link);
+ const cache_loading = Pager.loading(section);
const zcache_info = await new Fetcher().resource("/z$/").json();
const namespaces = zcache_info["Namespaces"] || [];
const results = await Promise.allSettled(
@@ -111,6 +116,7 @@ export class Page extends ZenPage
.sort((a, b) => a.namespace.localeCompare(b.namespace));
this._cache_pager.set_total(this._cache_data.length);
this._render_cache_page();
+ cache_loading.remove();
}
// version
diff --git a/src/zenserver/frontend/html/util/widgets.js b/src/zenserver/frontend/html/util/widgets.js
index 33d6755ac..b8fc720c1 100644
--- a/src/zenserver/frontend/html/util/widgets.js
+++ b/src/zenserver/frontend/html/util/widgets.js
@@ -6,6 +6,14 @@ import { Component } from "./component.js"
import { Friendly } from "../util/friendly.js"
////////////////////////////////////////////////////////////////////////////////
+export function flash_highlight(element)
+{
+ if (!element) { return; }
+ element.classList.add("pager-search-highlight");
+ setTimeout(() => { element.classList.remove("pager-search-highlight"); }, 1500);
+}
+
+////////////////////////////////////////////////////////////////////////////////
class Widget extends Component
{
}
@@ -404,12 +412,14 @@ export class ProgressBar extends Widget
////////////////////////////////////////////////////////////////////////////////
export class Pager
{
- constructor(section, page_size, on_change)
+ constructor(section, page_size, on_change, search_fn)
{
this._page = 0;
this._page_size = page_size;
this._total = 0;
this._on_change = on_change;
+ this._search_fn = search_fn || null;
+ this._search_input = null;
const pager = section.tag().classify("module-pager").inner();
this._btn_prev = document.createElement("button");
@@ -422,6 +432,23 @@ export class Pager
this._btn_next.className = "module-pager-btn";
this._btn_next.textContent = "Next \u2192";
this._btn_next.addEventListener("click", () => this._go_page(this._page + 1));
+
+ if (this._search_fn)
+ {
+ this._search_input = document.createElement("input");
+ this._search_input.type = "text";
+ this._search_input.className = "module-pager-search";
+ this._search_input.placeholder = "Search\u2026";
+ this._search_input.addEventListener("keydown", (e) =>
+ {
+ if (e.key === "Enter")
+ {
+ this._do_search(this._search_input.value.trim());
+ }
+ });
+ pager.appendChild(this._search_input);
+ }
+
pager.appendChild(this._btn_prev);
pager.appendChild(this._label);
pager.appendChild(this._btn_next);
@@ -432,7 +459,8 @@ export class Pager
prepend(element)
{
- this._pager.insertBefore(element, this._btn_prev);
+ const ref = this._search_input || this._btn_prev;
+ this._pager.insertBefore(element, ref);
}
set_total(n)
@@ -461,6 +489,23 @@ export class Pager
this._on_change();
}
+ _do_search(term)
+ {
+ if (!term || !this._search_fn)
+ {
+ return;
+ }
+ const result = this._search_fn(term);
+ if (!result)
+ {
+ this._search_input.style.outline = "2px solid var(--theme_fail)";
+ setTimeout(() => { this._search_input.style.outline = ""; }, 1000);
+ return;
+ }
+ this._go_page(Math.floor(result.index / this._page_size));
+ flash_highlight(this._pager.parentNode.querySelector(`[zs_name="${CSS.escape(result.name)}"]`));
+ }
+
_update_ui()
{
const total = this._total;
@@ -474,6 +519,21 @@ export class Pager
? "No items"
: `${start}\u2013${end} of ${total}`;
}
+
+ static make_search_fn(get_data, get_key)
+ {
+ return (term) => {
+ const t = term.toLowerCase();
+ const data = get_data();
+ const i = data.findIndex(item => get_key(item).toLowerCase().includes(t));
+ return i < 0 ? null : { index: i, name: get_key(data[i]) };
+ };
+ }
+
+ static loading(section)
+ {
+ return section.tag().classify("pager-loading").text("Loading\u2026").inner();
+ }
}
diff --git a/src/zenserver/frontend/html/zen.css b/src/zenserver/frontend/html/zen.css
index ca577675b..8d4e60472 100644
--- a/src/zenserver/frontend/html/zen.css
+++ b/src/zenserver/frontend/html/zen.css
@@ -1749,6 +1749,53 @@ tr:last-child td {
text-align: center;
}
+.module-pager-search {
+ font-size: 12px;
+ padding: 4px 8px;
+ width: 14em;
+ border: 1px solid var(--theme_g2);
+ border-radius: 4px;
+ background: var(--theme_g4);
+ color: var(--theme_g0);
+ outline: none;
+ transition: border-color 0.15s, outline 0.3s;
+}
+
+.module-pager-search:focus {
+ border-color: var(--theme_p0);
+}
+
+.module-pager-search::placeholder {
+ color: var(--theme_g1);
+}
+
+@keyframes pager-search-flash {
+ from { box-shadow: inset 0 0 0 100px var(--theme_p2); }
+ to { box-shadow: inset 0 0 0 100px transparent; }
+}
+
+.zen_table > .pager-search-highlight > div {
+ animation: pager-search-flash 1s linear forwards;
+}
+
+.module-table .pager-search-highlight td {
+ animation: pager-search-flash 1s linear forwards;
+}
+
+@keyframes pager-loading-pulse {
+ 0%, 100% { opacity: 0.6; }
+ 50% { opacity: 0.2; }
+}
+
+.pager-loading {
+ color: var(--theme_g1);
+ font-style: italic;
+ font-size: 14px;
+ font-weight: 600;
+ padding: 12px 0;
+ animation: pager-loading-pulse 1.5s ease-in-out infinite;
+}
+
.module-table td, .module-table th {
padding-top: 4px;
padding-bottom: 4px;
diff --git a/src/zenserver/hub/httphubservice.cpp b/src/zenserver/hub/httphubservice.cpp
index e6a900066..e4b0c28d0 100644
--- a/src/zenserver/hub/httphubservice.cpp
+++ b/src/zenserver/hub/httphubservice.cpp
@@ -121,6 +121,11 @@ HttpHubService::HttpHubService(Hub& Hub, HttpProxyHandler& Proxy, HttpStatsServi
HttpVerb::kGet);
m_Router.RegisterRoute(
+ "deprovision",
+ [this](HttpRouterRequest& Req) { HandleDeprovisionAll(Req.ServerRequest()); },
+ HttpVerb::kPost);
+
+ m_Router.RegisterRoute(
"modules/{moduleid}",
[this](HttpRouterRequest& Req) {
std::string_view ModuleId = Req.GetCapture(1);
@@ -371,6 +376,81 @@ HttpHubService::GetActivityCounter()
}
void
+HttpHubService::HandleDeprovisionAll(HttpServerRequest& Request)
+{
+ std::vector<std::string> ModulesToDeprovision;
+ m_Hub.EnumerateModules([&ModulesToDeprovision](std::string_view ModuleId, const Hub::InstanceInfo& InstanceInfo) {
+ if (InstanceInfo.State == HubInstanceState::Provisioned || InstanceInfo.State == HubInstanceState::Hibernated)
+ {
+ ModulesToDeprovision.push_back(std::string(ModuleId));
+ }
+ });
+
+ if (ModulesToDeprovision.empty())
+ {
+ return Request.WriteResponse(HttpResponseCode::OK);
+ }
+ std::vector<std::string> Rejected;
+ std::vector<std::string> Accepted;
+ std::vector<std::string> Completed;
+ for (const std::string& ModuleId : ModulesToDeprovision)
+ {
+ Hub::Response Response = m_Hub.Deprovision(ModuleId);
+ switch (Response.ResponseCode)
+ {
+ case Hub::EResponseCode::NotFound:
+ // Ignore
+ break;
+ case Hub::EResponseCode::Rejected:
+ Rejected.push_back(ModuleId);
+ break;
+ case Hub::EResponseCode::Accepted:
+ Accepted.push_back(ModuleId);
+ break;
+ case Hub::EResponseCode::Completed:
+ Completed.push_back(ModuleId);
+ break;
+ }
+ }
+ if (Rejected.empty() && Accepted.empty() && Completed.empty())
+ {
+ return Request.WriteResponse(HttpResponseCode::OK);
+ }
+ HttpResponseCode Response = HttpResponseCode::OK;
+ CbObjectWriter Writer;
+ if (!Completed.empty())
+ {
+ Writer.BeginArray("Completed");
+ for (const std::string& ModuleId : Completed)
+ {
+ Writer.AddString(ModuleId);
+ }
+ Writer.EndArray(); // Completed
+ }
+ if (!Accepted.empty())
+ {
+ Writer.BeginArray("Accepted");
+ for (const std::string& ModuleId : Accepted)
+ {
+ Writer.AddString(ModuleId);
+ }
+ Writer.EndArray(); // Accepted
+ Response = HttpResponseCode::Accepted;
+ }
+ if (!Rejected.empty())
+ {
+ Writer.BeginArray("Rejected");
+ for (const std::string& ModuleId : Rejected)
+ {
+ Writer.AddString(ModuleId);
+ }
+ Writer.EndArray(); // Rejected
+ Response = HttpResponseCode::Conflict;
+ }
+ Request.WriteResponse(Response, Writer.Save());
+}
+
+void
HttpHubService::HandleModuleGet(HttpServerRequest& Request, std::string_view ModuleId)
{
Hub::InstanceInfo InstanceInfo;
diff --git a/src/zenserver/hub/httphubservice.h b/src/zenserver/hub/httphubservice.h
index ff2cb0029..f4d1b0b89 100644
--- a/src/zenserver/hub/httphubservice.h
+++ b/src/zenserver/hub/httphubservice.h
@@ -53,6 +53,7 @@ private:
HttpStatsService& m_StatsService;
HttpStatusService& m_StatusService;
+ void HandleDeprovisionAll(HttpServerRequest& Request);
void HandleModuleGet(HttpServerRequest& Request, std::string_view ModuleId);
void HandleModuleDelete(HttpServerRequest& Request, std::string_view ModuleId);
diff --git a/thirdparty/VERSIONS.md b/thirdparty/VERSIONS.md
index 38a1415d3..9b3dcd103 100644
--- a/thirdparty/VERSIONS.md
+++ b/thirdparty/VERSIONS.md
@@ -19,6 +19,7 @@ dependency.
* doctest - v2.4.12 from https://github.com/doctest/doctest/releases/download/v2.4.12/doctest.h
* fmt - v12.0.0 from https://github.com/fmtlib/fmt/archive/refs/tags/12.0.0.tar.gz
* robin-map - v1.4.0 from https://github.com/Tessil/robin-map/archive/refs/tags/v1.4.0.tar.gz
+* rpmalloc - 1.5.0-dev (develop branch commit 262c698d7019, 2026-04-10) from https://github.com/mjansson/rpmalloc (`global_page_free_overflow` and `global_page_free_retain` manually tweaked)
* ryml - v0.5.0 from https://github.com/biojppm/rapidyaml (note that there are submodules here which have also been fetched, after stripping all `.git` metadata, for future updates it's probably easier to just grab the .zip/.tar.gz since it includes all submodules)
* sol2 - v3.5.0 from https://github.com/ThePhD/sol2/archive/refs/tags/v3.5.0.tar.gz (single/single.py generates the headers)
* spdlog - v1.16.0 from https://github.com/gabime/spdlog/releases/tag/v1.16.0.zip
diff --git a/thirdparty/rpmalloc/rpmalloc.c b/thirdparty/rpmalloc/rpmalloc.c
index 08cefe6dd..b8fe16a0a 100644
--- a/thirdparty/rpmalloc/rpmalloc.c
+++ b/thirdparty/rpmalloc/rpmalloc.c
@@ -57,6 +57,9 @@
#endif
#if PLATFORM_WINDOWS
+#ifndef WIN32_LEAN_AND_MEAN
+#define WIN32_LEAN_AND_MEAN
+#endif
#include <windows.h>
#include <fibersapi.h>
static DWORD fls_key;
@@ -184,6 +187,12 @@ madvise(caddr_t, size_t, int);
#define SPAN_SIZE (256 * 1024 * 1024)
#define SPAN_MASK (~((uintptr_t)(SPAN_SIZE - 1)))
+#if ENABLE_VALIDATE_ARGS
+//! Maximum allocation size to avoid integer overflow
+#undef MAX_ALLOC_SIZE
+#define MAX_ALLOC_SIZE (((size_t)-1) - SPAN_SIZE)
+#endif
+
////////////
///
/// Utility macros
@@ -258,13 +267,13 @@ static inline size_t
rpmalloc_clz(uintptr_t x) {
#if ARCH_64BIT
#if defined(_MSC_VER) && !defined(__clang__)
- return (size_t)_lzcnt_u64(x);
+ return (size_t)__lzcnt64(x);
#else
return (size_t)__builtin_clzll(x);
#endif
#else
#if defined(_MSC_VER) && !defined(__clang__)
- return (size_t)_lzcnt_u32(x);
+ return (size_t)__lzcnt32(x);
#else
return (size_t)__builtin_clzl(x);
#endif
@@ -279,9 +288,9 @@ wait_spin(void) {
#else
_mm_pause();
#endif
-#elif defined(__x86_64__) || defined(__i386__)
+#elif (defined(__x86_64__) || defined(__i386__)) && !defined(_M_ARM64EC)
__asm__ volatile("pause" ::: "memory");
-#elif defined(__aarch64__) || (defined(__arm__) && __ARM_ARCH >= 7)
+#elif defined(__aarch64__) || (defined(__arm__) && __ARM_ARCH >= 7) || defined(_M_ARM64EC)
__asm__ volatile("yield" ::: "memory");
#elif defined(__powerpc__) || defined(__powerpc64__)
// No idea if ever been compiled in such archs but ... as precaution
@@ -468,6 +477,9 @@ struct heap_t {
uint32_t offset;
//! Memory map size
size_t mapped_size;
+#if RPMALLOC_HEAP_STATISTICS
+ struct rpmalloc_heap_statistics_t stats;
+#endif
};
_Static_assert(sizeof(page_t) <= PAGE_HEADER_SIZE, "Invalid page header size");
@@ -530,10 +542,10 @@ static const size_class_t global_size_class[SIZE_CLASS_COUNT] = {
LCLASS(262144), LCLASS(327680), LCLASS(393216), LCLASS(458752), LCLASS(524288)};
//! Threshold number of pages for when free pages are decommitted
-static uint32_t global_page_free_overflow[4] = {16, 8, 2, 0};
+static uint32_t global_page_free_overflow[4] = {64, 16, 4, 0};
//! Number of pages to retain when free page threshold overflows
-static uint32_t global_page_free_retain[4] = {4, 2, 1, 0};
+static uint32_t global_page_free_retain[4] = {16, 4, 2, 0};
//! OS huge page support
static int os_huge_pages;
@@ -719,6 +731,8 @@ os_mmap(size_t size, size_t alignment, size_t* offset, size_t* mapped_size) {
// page to avoid saturating the OS commit limit
#if ENABLE_DECOMMIT
DWORD do_commit = 0;
+ if (global_config.disable_decommit)
+ do_commit = MEM_COMMIT;
#else
DWORD do_commit = MEM_COMMIT;
#endif
@@ -788,35 +802,29 @@ os_mmap(size_t size, size_t alignment, size_t* offset, size_t* mapped_size) {
page_mapped_current, memory_order_relaxed, memory_order_relaxed))
break;
}
-#if ENABLE_DECOMMIT
- size_t page_active_current =
- atomic_fetch_add_explicit(&global_statistics.page_active, page_count, memory_order_relaxed) + page_count;
- size_t page_active_peak = atomic_load_explicit(&global_statistics.page_active_peak, memory_order_relaxed);
- while (page_active_current > page_active_peak) {
- if (atomic_compare_exchange_weak_explicit(&global_statistics.page_active_peak, &page_active_peak,
- page_active_current, memory_order_relaxed, memory_order_relaxed))
- break;
- }
-#endif
#endif
return ptr;
}
-static void
+static int
os_mcommit(void* address, size_t size) {
#if ENABLE_DECOMMIT
- if (global_config.disable_decommit)
- return;
+ if (global_config.disable_decommit) {
+ return 0;
+ }
#if PLATFORM_WINDOWS
if (!VirtualAlloc(address, size, MEM_COMMIT, PAGE_READWRITE)) {
+ if (global_memory_interface->map_fail_callback && global_memory_interface->map_fail_callback(size))
+ return os_mcommit(address, size);
rpmalloc_assert(0, "Failed to commit virtual memory block");
+ return 1;
}
#else
- /*
- if (mprotect(address, size, PROT_READ | PROT_WRITE)) {
- rpmalloc_assert(0, "Failed to commit virtual memory block");
- }
- */
+ /*
+ if (mprotect(address, size, PROT_READ | PROT_WRITE)) {
+ rpmalloc_assert(0, "Failed to commit virtual memory block");
+ }
+ */
#endif
#if ENABLE_STATISTICS
size_t page_count = size / global_config.page_size;
@@ -833,23 +841,25 @@ os_mcommit(void* address, size_t size) {
#endif
(void)sizeof(address);
(void)sizeof(size);
+ return 0;
}
-static void
+static int
os_mdecommit(void* address, size_t size) {
#if ENABLE_DECOMMIT
if (global_config.disable_decommit)
- return;
+ return 1;
#if PLATFORM_WINDOWS
if (!VirtualFree(address, size, MEM_DECOMMIT)) {
rpmalloc_assert(0, "Failed to decommit virtual memory block");
+ return 1;
}
#else
- /*
- if (mprotect(address, size, PROT_NONE)) {
- rpmalloc_assert(0, "Failed to decommit virtual memory block");
- }
- */
+ /*
+ if (mprotect(address, size, PROT_NONE)) {
+ rpmalloc_assert(0, "Failed to decommit virtual memory block");
+ }
+ */
#if defined(MADV_DONTNEED)
if (madvise(address, size, MADV_DONTNEED)) {
#elif defined(MADV_FREE_REUSABLE)
@@ -865,6 +875,7 @@ os_mdecommit(void* address, size_t size) {
if (posix_madvise(address, size, POSIX_MADV_DONTNEED)) {
#endif
rpmalloc_assert(0, "Failed to decommit virtual memory block");
+ return 1;
}
#endif
#if ENABLE_STATISTICS
@@ -879,6 +890,7 @@ os_mdecommit(void* address, size_t size) {
(void)sizeof(address);
(void)sizeof(size);
#endif
+ return 0;
}
static void
@@ -986,19 +998,29 @@ page_decommit_memory_pages(page_t* page) {
return;
void* extra_page = pointer_offset(page, global_config.page_size);
size_t extra_page_size = page_get_size(page) - global_config.page_size;
- global_memory_interface->memory_decommit(extra_page, extra_page_size);
+ if (global_memory_interface->memory_decommit(extra_page, extra_page_size) != 0)
+ return;
+#if RPMALLOC_HEAP_STATISTICS && ENABLE_DECOMMIT
+ if (page->heap)
+ page->heap->stats.committed_size -= extra_page_size;
+#endif
page->is_decommitted = 1;
}
-static inline void
+static inline int
page_commit_memory_pages(page_t* page) {
if (!page->is_decommitted)
- return;
+ return 0;
void* extra_page = pointer_offset(page, global_config.page_size);
size_t extra_page_size = page_get_size(page) - global_config.page_size;
- global_memory_interface->memory_commit(extra_page, extra_page_size);
+ if (global_memory_interface->memory_commit(extra_page, extra_page_size) != 0)
+ return 1;
page->is_decommitted = 0;
#if ENABLE_DECOMMIT
+#if RPMALLOC_HEAP_STATISTICS
+ if (page->heap)
+ page->heap->stats.committed_size += extra_page_size;
+#endif
#if !defined(__APPLE__)
// When page is recommitted, the blocks in the second memory page and forward
// will be zeroed out by OS - take advantage in zalloc/calloc calls and make sure
@@ -1008,6 +1030,7 @@ page_commit_memory_pages(page_t* page) {
page->is_zero = 1;
#endif
#endif
+ return 0;
}
static void
@@ -1090,7 +1113,7 @@ static NOINLINE void
page_adopt_thread_free_block_list(page_t* page) {
if (page->local_free)
return;
- unsigned long long thread_free = atomic_load_explicit(&page->thread_free, memory_order_acquire);
+ unsigned long long thread_free = atomic_load_explicit(&page->thread_free, memory_order_relaxed);
if (thread_free != 0) {
// Other threads can only replace with another valid list head, this will never change to 0 in other threads
while (!atomic_compare_exchange_weak_explicit(&page->thread_free, &thread_free, 0, memory_order_acquire,
@@ -1243,8 +1266,13 @@ span_allocate_page(span_t* span) {
#if ENABLE_DECOMMIT
// The first page is always committed on initial span map of memory
- if (span->page_initialized)
- global_memory_interface->memory_commit(page, span->page_size);
+ if (span->page_initialized) {
+ if (global_memory_interface->memory_commit(page, span->page_size) != 0)
+ return 0;
+#if RPMALLOC_HEAP_STATISTICS
+ heap->stats.committed_size += span->page_size;
+#endif
+ }
#endif
++span->page_initialized;
@@ -1268,6 +1296,16 @@ span_allocate_page(span_t* span) {
static NOINLINE void
span_deallocate_block(span_t* span, page_t* page, void* block) {
if (UNEXPECTED(page->page_type == PAGE_HUGE)) {
+#if RPMALLOC_HEAP_STATISTICS
+ if (span->heap) {
+ span->heap->stats.mapped_size -= span->mapped_size;
+#if ENABLE_DECOMMIT
+ span->heap->stats.committed_size -= span->page_count * span->page_size;
+#else
+ span->heap->stats.committed_size -= mapped_size;
+#endif
+ }
+#endif
global_memory_interface->memory_unmap(span, span->offset, span->mapped_size);
return;
}
@@ -1303,6 +1341,16 @@ block_deallocate(block_t* block) {
page_t* page = span_get_page_from_block(span, block);
const int is_thread_local = page_is_thread_heap(page);
+#if RPMALLOC_HEAP_STATISTICS
+ heap_t* heap = span->heap;
+ if (heap) {
+ if (span->page_type <= PAGE_LARGE)
+ heap->stats.allocated_size -= page->block_size;
+ else
+ heap->stats.allocated_size -= ((size_t)span->page_size * (size_t)span->page_count);
+ }
+#endif
+
// Optimized path for thread local free with non-huge block in page
// that has no aligned blocks
if (EXPECTED(is_thread_local != 0)) {
@@ -1373,7 +1421,8 @@ heap_allocate_new(void) {
size_t mapped_size = 0;
block_t* block = global_memory_interface->memory_map(heap_size, 0, &offset, &mapped_size);
#if ENABLE_DECOMMIT
- global_memory_interface->memory_commit(block, heap_size);
+ if (global_memory_interface->memory_commit(block, heap_size) != 0)
+ return 0;
#endif
heap_t* heap = heap_initialize((void*)block);
heap->offset = (uint32_t)offset;
@@ -1442,7 +1491,7 @@ heap_page_free_decommit(heap_t* heap, uint32_t page_type, uint32_t page_retain_c
}
}
-static inline void
+static inline int
heap_make_free_page_available(heap_t* heap, uint32_t size_class, page_t* page) {
page->size_class = size_class;
page->block_size = global_size_class[size_class].block_size;
@@ -1463,8 +1512,9 @@ heap_make_free_page_available(heap_t* heap, uint32_t size_class, page_t* page) {
if (head)
head->prev = page;
heap->page_available[size_class] = page;
- if (page->is_decommitted)
- page_commit_memory_pages(page);
+ if (page->is_decommitted != 0)
+ return page_commit_memory_pages(page);
+ return 0;
}
//! Find or allocate a span for the given page type with the given size class
@@ -1478,6 +1528,9 @@ heap_get_span(heap_t* heap, page_type_t page_type) {
size_t offset = 0;
size_t mapped_size = 0;
span_t* span = global_memory_interface->memory_map(SPAN_SIZE, SPAN_SIZE, &offset, &mapped_size);
+#if RPMALLOC_HEAP_STATISTICS
+ heap->stats.mapped_size += mapped_size;
+#endif
if (EXPECTED(span != 0)) {
uint32_t page_count = 0;
uint32_t page_size = 0;
@@ -1496,7 +1549,15 @@ heap_get_span(heap_t* heap, page_type_t page_type) {
page_address_mask = LARGE_PAGE_MASK;
}
#if ENABLE_DECOMMIT
- global_memory_interface->memory_commit(span, page_size);
+ if (global_memory_interface->memory_commit(span, page_size) != 0)
+ return 0;
+#endif
+#if RPMALLOC_HEAP_STATISTICS
+#if ENABLE_DECOMMIT
+ heap->stats.committed_size += page_size;
+#else
+ heap->stats.committed_size += mapped_size;
+#endif
#endif
span->heap = heap;
span->page_type = page_type;
@@ -1523,9 +1584,9 @@ heap_get_page_generic(heap_t* heap, uint32_t size_class) {
page_type_t page_type = get_page_type(size_class);
// Check if there is a free page from multithreaded deallocations
- uintptr_t block_mt = atomic_load_explicit(&heap->thread_free[page_type], memory_order_acquire);
+ uintptr_t block_mt = atomic_load_explicit(&heap->thread_free[page_type], memory_order_relaxed);
if (UNEXPECTED(block_mt != 0)) {
- while (!atomic_compare_exchange_weak_explicit(&heap->thread_free[page_type], &block_mt, 0, memory_order_release,
+ while (!atomic_compare_exchange_weak_explicit(&heap->thread_free[page_type], &block_mt, 0, memory_order_acquire,
memory_order_relaxed)) {
wait_spin();
}
@@ -1547,7 +1608,8 @@ heap_get_page_generic(heap_t* heap, uint32_t size_class) {
rpmalloc_assert(heap->page_free_commit_count[page_type] > 0, "Free committed page count out of sync");
--heap->page_free_commit_count[page_type];
}
- heap_make_free_page_available(heap, size_class, page);
+ if (heap_make_free_page_available(heap, size_class, page) != 0)
+ return 0;
return page;
}
rpmalloc_assert(heap->page_free_commit_count[page_type] == 0, "Free committed page count out of sync");
@@ -1565,7 +1627,8 @@ heap_get_page_generic(heap_t* heap, uint32_t size_class) {
span_t* span = heap_get_span(heap, page_type);
if (EXPECTED(span != 0)) {
page = span_allocate_page(span);
- heap_make_free_page_available(page->heap, size_class, page);
+ if (heap_make_free_page_available(page->heap, size_class, page) != 0)
+ return 0;
}
return page;
@@ -1604,6 +1667,7 @@ heap_allocate_block_small_to_large(heap_t* heap, uint32_t size_class, unsigned i
static NOINLINE RPMALLOC_ALLOCATOR void*
heap_allocate_block_huge(heap_t* heap, size_t size, unsigned int zero) {
if (heap->id == 0) {
+ // Thread has not yet initialized, assign heap and try again
rpmalloc_initialize(0);
heap = get_thread_heap();
}
@@ -1614,7 +1678,16 @@ heap_allocate_block_huge(heap_t* heap, size_t size, unsigned int zero) {
if (block) {
span_t* span = block;
#if ENABLE_DECOMMIT
- global_memory_interface->memory_commit(span, alloc_size);
+ if (global_memory_interface->memory_commit(span, alloc_size) != 0)
+ return 0;
+#endif
+#if RPMALLOC_HEAP_STATISTICS
+ heap->stats.mapped_size += mapped_size;
+#if ENABLE_DECOMMIT
+ heap->stats.committed_size += alloc_size;
+#else
+ heap->stats.committed_size += mapped_size;
+#endif
#endif
span->heap = heap;
span->page_type = PAGE_HUGE;
@@ -1635,6 +1708,9 @@ heap_allocate_block_huge(heap_t* heap, size_t size, unsigned int zero) {
void* ptr = pointer_offset(block, SPAN_HEADER_SIZE);
if (zero)
memset(ptr, 0, size);
+#if RPMALLOC_HEAP_STATISTICS
+ heap->stats.allocated_size += size;
+#endif
return ptr;
}
return 0;
@@ -1644,6 +1720,10 @@ static RPMALLOC_ALLOCATOR NOINLINE void*
heap_allocate_block_generic(heap_t* heap, size_t size, unsigned int zero) {
uint32_t size_class = get_size_class(size);
if (EXPECTED(size_class < SIZE_CLASS_COUNT)) {
+#if RPMALLOC_HEAP_STATISTICS
+ heap->stats.allocated_size += global_size_class[size_class].block_size;
+#endif
+
block_t* block = heap_pop_local_free(heap, size_class);
if (EXPECTED(block != 0)) {
// Fast track with small block available in heap level local free list
@@ -1668,6 +1748,9 @@ heap_allocate_block(heap_t* heap, size_t size, unsigned int zero) {
// Fast track with small block available in heap level local free list
if (zero)
memset(block, 0, global_size_class[size_class].block_size);
+#if RPMALLOC_HEAP_STATISTICS
+ heap->stats.allocated_size += global_size_class[size_class].block_size;
+#endif
return block;
}
}
@@ -1901,7 +1984,7 @@ rprealloc(void* ptr, size_t size) {
extern RPMALLOC_ALLOCATOR void*
rpaligned_realloc(void* ptr, size_t alignment, size_t size, size_t oldsize, unsigned int flags) {
#if ENABLE_VALIDATE_ARGS
- if ((size + alignment < size) || (alignment > _memory_page_size)) {
+ if ((size + alignment < size) || (alignment > SMALL_PAGE_SIZE)) {
errno = EINVAL;
return 0;
}
@@ -2210,6 +2293,21 @@ rpmalloc_dump_statistics(void* file) {
#endif
}
+void
+rpmalloc_global_statistics(rpmalloc_global_statistics_t* stats) {
+#if ENABLE_STATISTICS
+ stats->mapped = global_config.page_size * atomic_load_explicit(&global_statistics.page_mapped, memory_order_relaxed);
+ stats->mapped_peak = global_config.page_size * atomic_load_explicit(&global_statistics.page_mapped_peak, memory_order_relaxed);
+ stats->committed = global_config.page_size * atomic_load_explicit(&global_statistics.page_commit, memory_order_relaxed);
+ stats->decommitted = global_config.page_size * atomic_load_explicit(&global_statistics.page_decommit, memory_order_relaxed);
+ stats->active = global_config.page_size * atomic_load_explicit(&global_statistics.page_active, memory_order_relaxed);
+ stats->active_peak = global_config.page_size * atomic_load_explicit(&global_statistics.page_active_peak, memory_order_relaxed);
+ stats->heap_count = atomic_load_explicit(&global_statistics.heap_count, memory_order_relaxed);
+#else
+ memset(stats, 0, sizeof(rpmalloc_global_statistics_t));
+#endif
+}
+
#if RPMALLOC_FIRST_CLASS_HEAPS
rpmalloc_heap_t*
@@ -2253,6 +2351,17 @@ rpmalloc_heap_aligned_alloc(rpmalloc_heap_t* heap, size_t alignment, size_t size
}
RPMALLOC_ALLOCATOR void*
+rpmalloc_heap_aligned_zalloc(rpmalloc_heap_t* heap, size_t alignment, size_t size) {
+#if ENABLE_VALIDATE_ARGS
+ if (size >= MAX_ALLOC_SIZE) {
+ errno = EINVAL;
+ return 0;
+ }
+#endif
+ return heap_allocate_block_aligned(heap, alignment, size, 1);
+}
+
+RPMALLOC_ALLOCATOR void*
rpmalloc_heap_calloc(rpmalloc_heap_t* heap, size_t num, size_t size) {
size_t total;
#if ENABLE_VALIDATE_ARGS
@@ -2312,7 +2421,7 @@ rpmalloc_heap_realloc(rpmalloc_heap_t* heap, void* ptr, size_t size, unsigned in
RPMALLOC_ALLOCATOR void*
rpmalloc_heap_aligned_realloc(rpmalloc_heap_t* heap, void* ptr, size_t alignment, size_t size, unsigned int flags) {
#if ENABLE_VALIDATE_ARGS
- if ((size + alignment < size) || (alignment > _memory_page_size)) {
+ if ((size + alignment < size) || (alignment > SMALL_PAGE_SIZE)) {
errno = EINVAL;
return 0;
}
@@ -2332,6 +2441,18 @@ rpmalloc_heap_free_all(rpmalloc_heap_t* heap) {
heap_free_all(heap);
}
+struct rpmalloc_heap_statistics_t
+rpmalloc_heap_statistics(rpmalloc_heap_t* heap) {
+#if RPMALLOC_HEAP_STATISTICS
+ if (heap) {
+ return heap->stats;
+ }
+#endif
+ (void)sizeof(heap);
+ struct rpmalloc_heap_statistics_t stats = {0};
+ return stats;
+}
+
extern inline void
rpmalloc_heap_thread_set_current(rpmalloc_heap_t* heap) {
heap_t* prev_heap = get_thread_heap();
diff --git a/thirdparty/rpmalloc/rpmalloc.h b/thirdparty/rpmalloc/rpmalloc.h
index d11292fb1..ea7d18e23 100644
--- a/thirdparty/rpmalloc/rpmalloc.h
+++ b/thirdparty/rpmalloc/rpmalloc.h
@@ -54,11 +54,16 @@ extern "C" {
#define RPMALLOC_MAX_ALIGNMENT (256 * 1024)
-//! Define RPMALLOC_FIRST_CLASS_HEAPS to enable heap based API (rpmalloc_heap_* functions).
+//! Define RPMALLOC_FIRST_CLASS_HEAPS to non-zero to enable heap based API (rpmalloc_heap_* functions).
#ifndef RPMALLOC_FIRST_CLASS_HEAPS
#define RPMALLOC_FIRST_CLASS_HEAPS 0
#endif
+//! Define RPMALLOC_HEAP_STATISTICS to non-zero to enable first class heap statistics gathering.
+#ifndef RPMALLOC_HEAP_STATISTICS
+#define RPMALLOC_HEAP_STATISTICS 0
+#endif
+
//! Flag to rpaligned_realloc to not preserve content in reallocation
#define RPMALLOC_NO_PRESERVE 1
//! Flag to rpaligned_realloc to fail and return null pointer if grow cannot be done in-place,
@@ -72,18 +77,16 @@ typedef struct rpmalloc_global_statistics_t {
size_t mapped;
//! Peak amount of virtual memory mapped, all of which might not have been committed (only if ENABLE_STATISTICS=1)
size_t mapped_peak;
- //! Current amount of memory in global caches for small and medium sizes (<32KiB)
- size_t cached;
- //! Current amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by
- //! default (only if ENABLE_STATISTICS=1)
- size_t huge_alloc;
- //! Peak amount of memory allocated in huge allocations, i.e larger than LARGE_SIZE_LIMIT which is 2MiB by default
- //! (only if ENABLE_STATISTICS=1)
- size_t huge_alloc_peak;
- //! Total amount of memory mapped since initialization (only if ENABLE_STATISTICS=1)
- size_t mapped_total;
- //! Total amount of memory unmapped since initialization (only if ENABLE_STATISTICS=1)
- size_t unmapped_total;
+ //! Running counter of total amount of memory committed (only if ENABLE_STATISTICS=1)
+ size_t committed;
+ //! Running counter of total amount of memory decommitted (only if ENABLE_STATISTICS=1)
+ size_t decommitted;
+ //! Current amount of virtual memory active and committed (only if ENABLE_STATISTICS=1)
+ size_t active;
+ //! Peak amount of virtual memory active and committed (only if ENABLE_STATISTICS=1)
+ size_t active_peak;
+ //! Current heap count (only if ENABLE_STATISTICS=1)
+ size_t heap_count;
} rpmalloc_global_statistics_t;
typedef struct rpmalloc_thread_statistics_t {
@@ -147,10 +150,10 @@ typedef struct rpmalloc_interface_t {
//! set a memory_unmap function or else the default implementation will be used for both. This function must be
//! thread safe, it can be called by multiple threads simultaneously.
void* (*memory_map)(size_t size, size_t alignment, size_t* offset, size_t* mapped_size);
- //! Commit a range of memory pages
- void (*memory_commit)(void* address, size_t size);
- //! Decommit a range of memory pages
- void (*memory_decommit)(void* address, size_t size);
+ //! Commit a range of memory pages. Return non-zero if the operation failed and the address range could not be committed.
+ int (*memory_commit)(void* address, size_t size);
+ //! Decommit a range of memory pages. Return non-zero if the operation failed and the address range could not be decommitted.
+ int (*memory_decommit)(void* address, size_t size);
//! Unmap the memory pages starting at address and spanning the given number of bytes. If you set a memory_unmap
//! function, you must also set a memory_map function or else the default implementation will be used for both. This
//! function must be thread safe, it can be called by multiple threads simultaneously.
@@ -260,44 +263,38 @@ rprealloc(void* ptr, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_S
//! Reallocate the given block to at least the given size and alignment,
// with optional control flags (see RPMALLOC_NO_PRESERVE).
// Alignment must be a power of two and a multiple of sizeof(void*),
-// and should ideally be less than memory page size. A caveat of rpmalloc
-// internals is that this must also be strictly less than the span size (default 64KiB)
+// and should ideally be less than memory page size.
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
rpaligned_realloc(void* ptr, size_t alignment, size_t size, size_t oldsize, unsigned int flags) RPMALLOC_ATTRIB_MALLOC
RPMALLOC_ATTRIB_ALLOC_SIZE(3);
//! Allocate a memory block of at least the given size and alignment.
// Alignment must be a power of two and a multiple of sizeof(void*),
-// and should ideally be less than memory page size. A caveat of rpmalloc
-// internals is that this must also be strictly less than the span size (default 64KiB)
+// and should ideally be less than memory page size.
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
rpaligned_alloc(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
//! Allocate a memory block of at least the given size and alignment.
// Alignment must be a power of two and a multiple of sizeof(void*),
-// and should ideally be less than memory page size. A caveat of rpmalloc
-// internals is that this must also be strictly less than the span size (default 64KiB)
+// and should ideally be less than memory page size.
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
rpaligned_zalloc(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
//! Allocate a memory block of at least the given size and alignment, and zero initialize it.
// Alignment must be a power of two and a multiple of sizeof(void*),
-// and should ideally be less than memory page size. A caveat of rpmalloc
-// internals is that this must also be strictly less than the span size (default 64KiB)
+// and should ideally be less than memory page size.
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
rpaligned_calloc(size_t alignment, size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE2(2, 3);
//! Allocate a memory block of at least the given size and alignment.
// Alignment must be a power of two and a multiple of sizeof(void*),
-// and should ideally be less than memory page size. A caveat of rpmalloc
-// internals is that this must also be strictly less than the span size (default 64KiB)
+// and should ideally be less than memory page size.
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
rpmemalign(size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(2);
//! Allocate a memory block of at least the given size and alignment.
// Alignment must be a power of two and a multiple of sizeof(void*),
-// and should ideally be less than memory page size. A caveat of rpmalloc
-// internals is that this must also be strictly less than the span size (default 64KiB)
+// and should ideally be less than memory page size.
RPMALLOC_EXPORT int
rpposix_memalign(void** memptr, size_t alignment, size_t size);
@@ -336,12 +333,18 @@ rpmalloc_heap_alloc(rpmalloc_heap_t* heap, size_t size) RPMALLOC_ATTRIB_MALLOC R
//! Allocate a memory block of at least the given size using the given heap. The returned
// block will have the requested alignment. Alignment must be a power of two and a multiple of sizeof(void*),
-// and should ideally be less than memory page size. A caveat of rpmalloc
-// internals is that this must also be strictly less than the span size (default 64KiB).
+// and should ideally be less than memory page size.
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
rpmalloc_heap_aligned_alloc(rpmalloc_heap_t* heap, size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC
RPMALLOC_ATTRIB_ALLOC_SIZE(3);
+//! Allocate a zero initialized memory block of at least the given size using the given heap. The returned
+// block will have the requested alignment. Alignment must be a power of two and a multiple of sizeof(void*),
+// and should ideally be less than memory page size.
+RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
+rpmalloc_heap_aligned_zalloc(rpmalloc_heap_t* heap, size_t alignment, size_t size) RPMALLOC_ATTRIB_MALLOC
+ RPMALLOC_ATTRIB_ALLOC_SIZE(3);
+
//! Allocate a memory block of at least the given size using the given heap and zero initialize it.
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
rpmalloc_heap_calloc(rpmalloc_heap_t* heap, size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC
@@ -349,8 +352,7 @@ rpmalloc_heap_calloc(rpmalloc_heap_t* heap, size_t num, size_t size) RPMALLOC_AT
//! Allocate a memory block of at least the given size using the given heap and zero initialize it. The returned
// block will have the requested alignment. Alignment must either be zero, or a power of two and a multiple of
-// sizeof(void*), and should ideally be less than memory page size. A caveat of rpmalloc internals is that this must
-// also be strictly less than the span size (default 64KiB).
+// sizeof(void*), and should ideally be less than memory page size.
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
rpmalloc_heap_aligned_calloc(rpmalloc_heap_t* heap, size_t alignment, size_t num, size_t size) RPMALLOC_ATTRIB_MALLOC
RPMALLOC_ATTRIB_ALLOC_SIZE2(3, 4);
@@ -364,8 +366,7 @@ rpmalloc_heap_realloc(rpmalloc_heap_t* heap, void* ptr, size_t size, unsigned in
//! Reallocate the given block to at least the given size. The memory block MUST be allocated
// by the same heap given to this function. The returned block will have the requested alignment.
// Alignment must be either zero, or a power of two and a multiple of sizeof(void*), and should ideally be
-// less than memory page size. A caveat of rpmalloc internals is that this must also be strictly less than
-// the span size (default 64KiB).
+// less than memory page size.
RPMALLOC_EXPORT RPMALLOC_ALLOCATOR void*
rpmalloc_heap_aligned_realloc(rpmalloc_heap_t* heap, void* ptr, size_t alignment, size_t size,
unsigned int flags) RPMALLOC_ATTRIB_MALLOC RPMALLOC_ATTRIB_ALLOC_SIZE(4);
@@ -379,6 +380,19 @@ rpmalloc_heap_free(rpmalloc_heap_t* heap, void* ptr);
RPMALLOC_EXPORT void
rpmalloc_heap_free_all(rpmalloc_heap_t* heap);
+struct rpmalloc_heap_statistics_t {
+ // Number of bytes allocated
+ size_t allocated_size;
+ // Number of bytes committed
+ size_t committed_size;
+ // Number of bytes mapped
+ size_t mapped_size;
+};
+
+//! Get heap statistics (if enabled in build)
+RPMALLOC_EXPORT struct rpmalloc_heap_statistics_t
+rpmalloc_heap_statistics(rpmalloc_heap_t* heap);
+
//! Set the given heap as the current heap for the calling thread. A heap MUST only be current heap
// for a single thread, a heap can never be shared between multiple threads. The previous
// current heap for the calling thread is released to be reused by other threads.
diff --git a/thirdparty/xmake.lua b/thirdparty/xmake.lua
index 1f5902fdf..ea861fc55 100644
--- a/thirdparty/xmake.lua
+++ b/thirdparty/xmake.lua
@@ -32,9 +32,9 @@ target('ue-trace')
add_includedirs("trace", {public=true})
add_headerfiles("trace/**.h")
--- rpmalloc 1.5.0-dev.20250810
--- Vendored from develop branch commit 6b34d956911b (2025-08-10)
--- https://github.com/mjansson/rpmalloc/commit/6b34d956911b
+-- rpmalloc 1.5.0-dev.20251026
+-- Vendored from develop branch commit feb43aee0d4d (2025-10-26)
+-- https://github.com/mjansson/rpmalloc/commit/feb43aee0d4d
target('rpmalloc')
set_kind("static")
set_group('thirdparty')