aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/zencore/include/zencore/blockingqueue.h27
-rw-r--r--src/zencore/include/zencore/workthreadpool.h8
-rw-r--r--src/zencore/workthreadpool.cpp230
-rw-r--r--src/zenhttp/httpserver.cpp1
-rw-r--r--src/zenhttp/include/zenhttp/httpserver.h1
-rw-r--r--src/zenhttp/servers/httpsys.cpp58
-rw-r--r--src/zenhttp/servers/httpsys.h1
-rw-r--r--src/zenhttp/servers/iothreadpool.cpp275
-rw-r--r--src/zenhttp/servers/iothreadpool.h92
-rw-r--r--src/zenhttp/servers/wshttpsys.cpp13
-rw-r--r--src/zenhttp/servers/wshttpsys.h9
-rw-r--r--src/zenserver/config/config.cpp10
-rw-r--r--src/zenserver/main.cpp2
-rw-r--r--src/zenutil/include/zenutil/zenserverprocess.h2
-rw-r--r--src/zenutil/zenserverprocess.cpp11
15 files changed, 651 insertions, 89 deletions
diff --git a/src/zencore/include/zencore/blockingqueue.h b/src/zencore/include/zencore/blockingqueue.h
index b6c93e937..6ac43b1ac 100644
--- a/src/zencore/include/zencore/blockingqueue.h
+++ b/src/zencore/include/zencore/blockingqueue.h
@@ -5,6 +5,7 @@
#include <zencore/zencore.h> // For ZEN_ASSERT
#include <atomic>
+#include <chrono>
#include <condition_variable>
#include <deque>
#include <mutex>
@@ -50,6 +51,32 @@ public:
return true;
}
+ // Returns: true if item dequeued, false on timeout or completion
+ template<typename Rep, typename Period>
+ bool WaitAndDequeueFor(T& Item, std::chrono::duration<Rep, Period> Timeout)
+ {
+ std::unique_lock Lock(m_Lock);
+ if (m_Queue.empty())
+ {
+ if (m_CompleteAdding)
+ {
+ return false;
+ }
+ if (!m_NewItemSignal.wait_for(Lock, Timeout, [this]() { return !m_Queue.empty() || m_CompleteAdding; }))
+ {
+ return false; // Timed out
+ }
+ if (m_Queue.empty())
+ {
+ ZEN_ASSERT(m_CompleteAdding);
+ return false;
+ }
+ }
+ Item = std::move(m_Queue.front());
+ m_Queue.pop_front();
+ return true;
+ }
+
void CompleteAdding()
{
std::unique_lock Lock(m_Lock);
diff --git a/src/zencore/include/zencore/workthreadpool.h b/src/zencore/include/zencore/workthreadpool.h
index 4c38dd651..cb0b8f491 100644
--- a/src/zencore/include/zencore/workthreadpool.h
+++ b/src/zencore/include/zencore/workthreadpool.h
@@ -27,8 +27,9 @@ private:
class WorkerThreadPool
{
public:
- explicit WorkerThreadPool(int InThreadCount);
- WorkerThreadPool(int InThreadCount, std::string_view WorkerThreadBaseName);
+ explicit WorkerThreadPool(int InThreadCount, bool UseExplicitThreads = true);
+ WorkerThreadPool(int InThreadCount, std::string_view WorkerThreadBaseName, bool UseExplicitThreads = true);
+ WorkerThreadPool(int InMinThreadCount, int InMaxThreadCount, std::string_view WorkerThreadBaseName, bool UseExplicitThreads = true);
~WorkerThreadPool();
// Decides what to do if there are no free workers in the pool when the work is submitted
@@ -48,6 +49,9 @@ private:
struct Impl;
struct ThreadStartInfo;
+ friend struct WinTpImpl;
+ friend struct ExplicitImpl;
+
std::unique_ptr<Impl> m_Impl;
};
diff --git a/src/zencore/workthreadpool.cpp b/src/zencore/workthreadpool.cpp
index 1cb338c66..b179527d7 100644
--- a/src/zencore/workthreadpool.cpp
+++ b/src/zencore/workthreadpool.cpp
@@ -11,6 +11,7 @@
#include <zencore/thread.h>
#include <zencore/trace.h>
+#include <algorithm>
#include <thread>
#include <vector>
@@ -18,9 +19,7 @@ ZEN_THIRD_PARTY_INCLUDES_START
#include <gsl/gsl-lite.hpp>
ZEN_THIRD_PARTY_INCLUDES_END
-#define ZEN_USE_WINDOWS_THREADPOOL 1
-
-#if ZEN_PLATFORM_WINDOWS && ZEN_USE_WINDOWS_THREADPOOL
+#if ZEN_PLATFORM_WINDOWS
# include <zencore/windows.h>
#endif
@@ -38,17 +37,26 @@ namespace detail {
//////////////////////////////////////////////////////////////////////////
-#if ZEN_USE_WINDOWS_THREADPOOL && ZEN_PLATFORM_WINDOWS
+struct WorkerThreadPool::Impl
+{
+ virtual ~Impl() = default;
+ [[nodiscard]] virtual Ref<IWork> ScheduleWork(Ref<IWork> Work, WorkerThreadPool::EMode Mode) = 0;
+};
+
+//////////////////////////////////////////////////////////////////////////
+
+#if ZEN_PLATFORM_WINDOWS
namespace {
thread_local bool t_IsThreadNamed{false};
}
-struct WorkerThreadPool::Impl
+struct WinTpImpl : WorkerThreadPool::Impl
{
- const int m_ThreadCount = 0;
- PTP_POOL m_ThreadPool = nullptr;
- PTP_CLEANUP_GROUP m_CleanupGroup = nullptr;
+ const int m_MinThreadCount = 0;
+ const int m_MaxThreadCount = 0;
+ PTP_POOL m_ThreadPool = nullptr;
+ PTP_CLEANUP_GROUP m_CleanupGroup = nullptr;
TP_CALLBACK_ENVIRON m_CallbackEnvironment;
PTP_WORK m_Work = nullptr;
@@ -59,10 +67,11 @@ struct WorkerThreadPool::Impl
mutable RwLock m_QueueLock;
std::deque<Ref<IWork>> m_WorkQueue;
- Impl(int InThreadCount, std::string_view WorkerThreadBaseName)
- : m_ThreadCount(InThreadCount)
+ WinTpImpl(int InMinThreadCount, int InMaxThreadCount, std::string_view WorkerThreadBaseName)
+ : m_MinThreadCount(InMinThreadCount)
+ , m_MaxThreadCount(InMaxThreadCount < InMinThreadCount ? InMinThreadCount : InMaxThreadCount)
, m_WorkerThreadBaseName(WorkerThreadBaseName)
- , m_FreeWorkerCount(m_ThreadCount)
+ , m_FreeWorkerCount(m_MinThreadCount)
{
// Thread pool setup
@@ -72,11 +81,11 @@ struct WorkerThreadPool::Impl
ThrowLastError("CreateThreadpool failed");
}
- if (!SetThreadpoolThreadMinimum(m_ThreadPool, (DWORD)m_ThreadCount))
+ if (!SetThreadpoolThreadMinimum(m_ThreadPool, (DWORD)m_MinThreadCount))
{
ThrowLastError("SetThreadpoolThreadMinimum failed");
}
- SetThreadpoolThreadMaximum(m_ThreadPool, (DWORD)m_ThreadCount);
+ SetThreadpoolThreadMaximum(m_ThreadPool, (DWORD)m_MaxThreadCount);
InitializeThreadpoolEnvironment(&m_CallbackEnvironment);
@@ -96,14 +105,14 @@ struct WorkerThreadPool::Impl
}
}
- ~Impl()
+ ~WinTpImpl() override
{
WaitForThreadpoolWorkCallbacks(m_Work, /* CancelPendingCallbacks */ TRUE);
CloseThreadpoolWork(m_Work);
CloseThreadpool(m_ThreadPool);
}
- [[nodiscard]] Ref<IWork> ScheduleWork(Ref<IWork> Work, WorkerThreadPool::EMode Mode)
+ [[nodiscard]] Ref<IWork> ScheduleWork(Ref<IWork> Work, WorkerThreadPool::EMode Mode) override
{
if (Mode == WorkerThreadPool::EMode::DisableBacklog)
{
@@ -130,7 +139,7 @@ struct WorkerThreadPool::Impl
static VOID CALLBACK WorkCallback(_Inout_ PTP_CALLBACK_INSTANCE Instance, _Inout_opt_ PVOID Context, _Inout_ PTP_WORK Work)
{
ZEN_UNUSED(Instance, Work);
- Impl* ThisPtr = reinterpret_cast<Impl*>(Context);
+ WinTpImpl* ThisPtr = reinterpret_cast<WinTpImpl*>(Context);
ThisPtr->DoWork();
}
@@ -175,7 +184,9 @@ struct WorkerThreadPool::Impl
}
};
-#else
+#endif
+
+//////////////////////////////////////////////////////////////////////////
struct WorkerThreadPool::ThreadStartInfo
{
@@ -183,42 +194,54 @@ struct WorkerThreadPool::ThreadStartInfo
zen::Latch* Latch;
};
-struct WorkerThreadPool::Impl
+struct ExplicitImpl : WorkerThreadPool::Impl
{
- const int m_ThreadCount = 0;
- void WorkerThreadFunction(ThreadStartInfo Info);
- std::string m_WorkerThreadBaseName;
- std::vector<std::thread> m_WorkerThreads;
- BlockingQueue<Ref<IWork>> m_WorkQueue;
- std::atomic<int> m_FreeWorkerCount{0};
-
- Impl(int InThreadCount, std::string_view WorkerThreadBaseName)
- : m_ThreadCount(InThreadCount)
+ const int m_MinThreads;
+ const int m_MaxThreads;
+ std::atomic<int> m_TotalThreads{0};
+ std::atomic<int> m_ActiveCount{0};
+ void WorkerThreadFunction(WorkerThreadPool::ThreadStartInfo Info);
+ void SpawnWorkerThread();
+ void PruneExitedThreads();
+ std::string m_WorkerThreadBaseName;
+ RwLock m_ThreadListLock;
+ std::vector<std::thread> m_WorkerThreads;
+ std::vector<std::thread::id> m_ExitedThreadIds;
+ BlockingQueue<Ref<IWork>> m_WorkQueue;
+ std::atomic<int> m_FreeWorkerCount{0};
+
+ bool ScalingEnabled() const { return m_MinThreads != m_MaxThreads; }
+
+ ExplicitImpl(int InMinThreadCount, int InMaxThreadCount, std::string_view WorkerThreadBaseName)
+ : m_MinThreads(InMinThreadCount)
+ , m_MaxThreads(InMaxThreadCount < InMinThreadCount ? InMinThreadCount : InMaxThreadCount)
, m_WorkerThreadBaseName(WorkerThreadBaseName)
- , m_FreeWorkerCount(m_ThreadCount)
+ , m_FreeWorkerCount(InMinThreadCount)
{
-# if ZEN_WITH_TRACE
+#if ZEN_WITH_TRACE
trace::ThreadGroupBegin(m_WorkerThreadBaseName.c_str());
-# endif
+#endif
- zen::Latch WorkerLatch{m_ThreadCount};
+ zen::Latch WorkerLatch{m_MinThreads};
- for (int i = 0; i < m_ThreadCount; ++i)
+ for (int i = 0; i < m_MinThreads; ++i)
{
- m_WorkerThreads.emplace_back(&Impl::WorkerThreadFunction, this, ThreadStartInfo{i + 1, &WorkerLatch});
+ m_TotalThreads.fetch_add(1, std::memory_order::relaxed);
+ m_WorkerThreads.emplace_back(&ExplicitImpl::WorkerThreadFunction, this, WorkerThreadPool::ThreadStartInfo{i + 1, &WorkerLatch});
}
WorkerLatch.Wait();
-# if ZEN_WITH_TRACE
+#if ZEN_WITH_TRACE
trace::ThreadGroupEnd();
-# endif
+#endif
}
- ~Impl()
+ ~ExplicitImpl() override
{
m_WorkQueue.CompleteAdding();
+ RwLock::ExclusiveLockScope _(m_ThreadListLock);
for (std::thread& Thread : m_WorkerThreads)
{
if (Thread.joinable())
@@ -230,7 +253,7 @@ struct WorkerThreadPool::Impl
m_WorkerThreads.clear();
}
- [[nodiscard]] Ref<IWork> ScheduleWork(Ref<IWork> Work, WorkerThreadPool::EMode Mode)
+ [[nodiscard]] Ref<IWork> ScheduleWork(Ref<IWork> Work, WorkerThreadPool::EMode Mode) override
{
if (Mode == WorkerThreadPool::EMode::DisableBacklog)
{
@@ -245,24 +268,97 @@ struct WorkerThreadPool::Impl
}
}
m_WorkQueue.Enqueue(std::move(Work));
+
+ // Scale up: if all workers are busy and we haven't hit the max, spawn a new thread
+ if (ScalingEnabled())
+ {
+ const int Active = m_ActiveCount.load(std::memory_order::acquire);
+ const int Total = m_TotalThreads.load(std::memory_order::acquire);
+ if (Active >= Total && Total < m_MaxThreads)
+ {
+ int Expected = Total;
+ if (m_TotalThreads.compare_exchange_strong(Expected, Total + 1, std::memory_order::acq_rel))
+ {
+ ZEN_DEBUG("scaling up worker thread pool '{}', {} -> {} threads", m_WorkerThreadBaseName, Total, Total + 1);
+ SpawnWorkerThread();
+ }
+ }
+ }
+
return {};
}
};
void
-WorkerThreadPool::Impl::WorkerThreadFunction(ThreadStartInfo Info)
+ExplicitImpl::PruneExitedThreads()
+{
+ // Must be called under m_ThreadListLock
+ if (m_ExitedThreadIds.empty())
+ {
+ return;
+ }
+
+ for (auto It = m_WorkerThreads.begin(); It != m_WorkerThreads.end();)
+ {
+ auto IdIt = std::find(m_ExitedThreadIds.begin(), m_ExitedThreadIds.end(), It->get_id());
+ if (IdIt != m_ExitedThreadIds.end())
+ {
+ It->join();
+ It = m_WorkerThreads.erase(It);
+ m_ExitedThreadIds.erase(IdIt);
+ }
+ else
+ {
+ ++It;
+ }
+ }
+}
+
+void
+ExplicitImpl::SpawnWorkerThread()
+{
+ static std::atomic<int> s_DynamicThreadIndex{0};
+ const int ThreadNumber = ++s_DynamicThreadIndex;
+
+ RwLock::ExclusiveLockScope _(m_ThreadListLock);
+ PruneExitedThreads();
+ m_WorkerThreads.emplace_back(&ExplicitImpl::WorkerThreadFunction, this, WorkerThreadPool::ThreadStartInfo{ThreadNumber, nullptr});
+}
+
+void
+ExplicitImpl::WorkerThreadFunction(WorkerThreadPool::ThreadStartInfo Info)
{
SetCurrentThreadName(fmt::format("{}_{}", m_WorkerThreadBaseName, Info.ThreadNumber));
- Info.Latch->CountDown();
+ if (Info.Latch)
+ {
+ Info.Latch->CountDown();
+ }
+
+ static constexpr auto kIdleTimeout = std::chrono::seconds(15);
do
{
Ref<IWork> Work;
- if (m_WorkQueue.WaitAndDequeue(Work))
+
+ bool Dequeued;
+ if (ScalingEnabled())
+ {
+ Dequeued = m_WorkQueue.WaitAndDequeueFor(Work, kIdleTimeout);
+ }
+ else
+ {
+ Dequeued = m_WorkQueue.WaitAndDequeue(Work);
+ }
+
+ if (Dequeued)
{
m_FreeWorkerCount--;
- auto _ = MakeGuard([&]() { m_FreeWorkerCount++; });
+ m_ActiveCount.fetch_add(1, std::memory_order::acq_rel);
+ auto _ = MakeGuard([&]() {
+ m_ActiveCount.fetch_sub(1, std::memory_order::release);
+ m_FreeWorkerCount++;
+ });
try
{
@@ -281,25 +377,65 @@ WorkerThreadPool::Impl::WorkerThreadFunction(ThreadStartInfo Info)
ZEN_ERROR("Caught exception in worker thread: {}", e.what());
}
}
+ else if (ScalingEnabled())
+ {
+ // Timed out - consider scaling down
+ const int CurrentTotal = m_TotalThreads.load(std::memory_order::acquire);
+ if (CurrentTotal > m_MinThreads)
+ {
+ int Expected = CurrentTotal;
+ if (m_TotalThreads.compare_exchange_strong(Expected, CurrentTotal - 1, std::memory_order::acq_rel))
+ {
+ ZEN_DEBUG("scaling down worker thread pool '{}' (idle timeout), {} threads remaining",
+ m_WorkerThreadBaseName,
+ CurrentTotal - 1);
+ m_FreeWorkerCount--;
+ {
+ RwLock::ExclusiveLockScope _(m_ThreadListLock);
+ m_ExitedThreadIds.push_back(std::this_thread::get_id());
+ }
+ return; // Thread exits
+ }
+ }
+ // CAS failed or at min threads - continue waiting
+ }
else
{
+ // CompleteAdding was called - exit
return;
}
} while (true);
}
-#endif
-
//////////////////////////////////////////////////////////////////////////
-WorkerThreadPool::WorkerThreadPool(int InThreadCount) : WorkerThreadPool(InThreadCount, "workerthread")
+WorkerThreadPool::WorkerThreadPool(int InThreadCount, bool UseExplicitThreads)
+: WorkerThreadPool(InThreadCount, "workerthread", UseExplicitThreads)
+{
+}
+
+WorkerThreadPool::WorkerThreadPool(int InThreadCount, std::string_view WorkerThreadBaseName, bool UseExplicitThreads)
+: WorkerThreadPool(InThreadCount, InThreadCount, WorkerThreadBaseName, UseExplicitThreads)
{
}
-WorkerThreadPool::WorkerThreadPool(int InThreadCount, std::string_view WorkerThreadBaseName)
+WorkerThreadPool::WorkerThreadPool(int InMinThreadCount,
+ int InMaxThreadCount,
+ std::string_view WorkerThreadBaseName,
+ bool UseExplicitThreads)
{
- if (InThreadCount > 0)
+ if (InMinThreadCount > 0)
{
- m_Impl = std::make_unique<Impl>(InThreadCount, WorkerThreadBaseName);
+#if ZEN_PLATFORM_WINDOWS
+ if (!UseExplicitThreads)
+ {
+ m_Impl = std::make_unique<WinTpImpl>(InMinThreadCount, InMaxThreadCount, WorkerThreadBaseName);
+ }
+ else
+#endif
+ {
+ ZEN_UNUSED(UseExplicitThreads);
+ m_Impl = std::make_unique<ExplicitImpl>(InMinThreadCount, InMaxThreadCount, WorkerThreadBaseName);
+ }
}
}
diff --git a/src/zenhttp/httpserver.cpp b/src/zenhttp/httpserver.cpp
index 4d98e9650..7bb02cb0f 100644
--- a/src/zenhttp/httpserver.cpp
+++ b/src/zenhttp/httpserver.cpp
@@ -1049,6 +1049,7 @@ CreateHttpServerClass(const std::string_view ServerClass, const HttpServerConfig
.IsRequestLoggingEnabled = Config.HttpSys.IsRequestLoggingEnabled,
.IsDedicatedServer = Config.IsDedicatedServer,
.ForceLoopback = Config.ForceLoopback,
+ .UseExplicitIoThreadPool = Config.HttpSys.UseExplicitIoThreadPool,
.HttpsPort = Config.HttpSys.HttpsPort,
.CertThumbprint = Config.HttpSys.CertThumbprint,
.CertStoreName = Config.HttpSys.CertStoreName,
diff --git a/src/zenhttp/include/zenhttp/httpserver.h b/src/zenhttp/include/zenhttp/httpserver.h
index 2a8b2ca94..c90e20aef 100644
--- a/src/zenhttp/include/zenhttp/httpserver.h
+++ b/src/zenhttp/include/zenhttp/httpserver.h
@@ -340,6 +340,7 @@ struct HttpServerConfig
unsigned int AsyncWorkThreadCount = 0;
bool IsAsyncResponseEnabled = true;
bool IsRequestLoggingEnabled = false;
+ bool UseExplicitIoThreadPool = false;
int HttpsPort = 0; // 0 = HTTPS disabled
std::string CertThumbprint; // Hex SHA-1 (40 chars) for auto SSL binding
std::string CertStoreName = "MY"; // Windows certificate store name
diff --git a/src/zenhttp/servers/httpsys.cpp b/src/zenhttp/servers/httpsys.cpp
index f8fb1c9be..67a71c0cc 100644
--- a/src/zenhttp/servers/httpsys.cpp
+++ b/src/zenhttp/servers/httpsys.cpp
@@ -145,6 +145,7 @@ private:
bool m_IsAsyncResponseEnabled = true;
std::unique_ptr<WinIoThreadPool> m_IoThreadPool;
+ bool m_IoThreadPoolIsWinTp = true;
RwLock m_AsyncWorkPoolInitLock;
std::atomic<WorkerThreadPool*> m_AsyncWorkPool = nullptr;
@@ -403,7 +404,8 @@ public:
void IssueInitialRequest(std::error_code& ErrorCode);
bool IssueNextRequest(HttpSysRequestHandler* NewCompletionHandler);
- PTP_IO Iocp();
+ void StartIo();
+ void CancelIo();
HANDLE RequestQueueHandle();
inline OVERLAPPED* Overlapped() { return &m_IoContext.Overlapped; }
inline HttpSysServer& Server() { return m_HttpServer; }
@@ -646,9 +648,8 @@ HttpMessageResponseRequest::IssueRequest(std::error_code& ErrorCode)
ZEN_TRACE_CPU("httpsys::Response::IssueRequest");
HttpSysTransaction& Tx = Transaction();
HTTP_REQUEST* const HttpReq = Tx.HttpRequest();
- PTP_IO const Iocp = Tx.Iocp();
- StartThreadpoolIo(Iocp);
+ Tx.StartIo();
// Split payload into batches to play well with the underlying API
@@ -863,7 +864,7 @@ HttpMessageResponseRequest::IssueRequest(std::error_code& ErrorCode)
// An error occurred, no completion will be posted to IOCP
- CancelThreadpoolIo(Iocp);
+ Tx.CancelIo();
// Emit diagnostics
@@ -1045,7 +1046,8 @@ HttpSysServer::HttpSysServer(const HttpSysConfig& InConfig)
MaxThreadCount *= 2;
}
- m_IoThreadPool = std::make_unique<WinIoThreadPool>(MinThreadCount, MaxThreadCount);
+ m_IoThreadPoolIsWinTp = !m_InitialConfig.UseExplicitIoThreadPool;
+ m_IoThreadPool = WinIoThreadPool::Create(!m_IoThreadPoolIsWinTp, MinThreadCount, MaxThreadCount);
if (m_InitialConfig.AsyncWorkThreadCount == 0)
{
@@ -1062,10 +1064,11 @@ HttpSysServer::HttpSysServer(const HttpSysConfig& InConfig)
m_IsHttpInitialized = true;
m_IsOk = true;
- ZEN_INFO("http.sys server started in {} mode, using {}-{} I/O threads and {} async worker threads",
+ ZEN_INFO("http.sys server started in {} mode, using {}-{} I/O threads ({}) and {} async worker threads",
m_InitialConfig.IsDedicatedServer ? "DEDICATED" : "NORMAL",
MinThreadCount,
MaxThreadCount,
+ m_InitialConfig.UseExplicitIoThreadPool ? "explicit IOCP" : "Windows Thread Pool",
m_InitialConfig.AsyncWorkThreadCount);
}
@@ -1664,7 +1667,8 @@ HttpSysServer::WorkPool()
if (!m_AsyncWorkPool.load(std::memory_order_relaxed))
{
- m_AsyncWorkPool.store(new WorkerThreadPool(m_InitialConfig.AsyncWorkThreadCount, "http_async"), std::memory_order_release);
+ m_AsyncWorkPool =
+ new WorkerThreadPool(m_InitialConfig.AsyncWorkThreadCount, "http_async", m_InitialConfig.UseExplicitIoThreadPool);
}
}
@@ -1839,10 +1843,16 @@ HttpSysTransaction::~HttpSysTransaction()
{
}
-PTP_IO
-HttpSysTransaction::Iocp()
+void
+HttpSysTransaction::StartIo()
+{
+ m_HttpServer.m_IoThreadPool->StartIo();
+}
+
+void
+HttpSysTransaction::CancelIo()
{
- return m_HttpServer.m_IoThreadPool->Iocp();
+ m_HttpServer.m_IoThreadPool->CancelIo();
}
HANDLE
@@ -1863,7 +1873,6 @@ static std::atomic<int> HttpSysThreadIndex = 0;
static void
NameCurrentHttpSysThread()
{
- t_IsHttpSysThreadNamed = true;
const int ThreadIndex = ++HttpSysThreadIndex;
zen::ExtendableStringBuilder<16> ThreadName;
ThreadName << "httpio_" << ThreadIndex;
@@ -1880,13 +1889,6 @@ HttpSysTransaction::IoCompletionCallback(PTP_CALLBACK_INSTANCE Instance,
{
ZEN_UNUSED(Io, Instance);
- // Assign names to threads for context
-
- if (!t_IsHttpSysThreadNamed)
- {
- NameCurrentHttpSysThread();
- }
-
// Note that for a given transaction we may be in this completion function on more
// than one thread at any given moment. This means we need to be careful about what
// happens in here
@@ -1909,6 +1911,19 @@ HttpSysTransaction::IoCompletionCallback(PTP_CALLBACK_INSTANCE Instance,
HttpSysTransaction* Transaction = CONTAINING_RECORD(IoContext, HttpSysTransaction, m_IoContext);
+ // Assign names to threads for context (only needed when using Windows' native
+ // thread pool)
+
+ if (Transaction->Server().m_IoThreadPoolIsWinTp)
+ {
+ if (!t_IsHttpSysThreadNamed)
+ {
+ t_IsHttpSysThreadNamed = true;
+
+ NameCurrentHttpSysThread();
+ }
+ }
+
if (Transaction->HandleCompletion(IoResult, NumberOfBytesTransferred) == HttpSysTransaction::Status::kDone)
{
delete Transaction;
@@ -2391,10 +2406,9 @@ InitialRequestHandler::IssueRequest(std::error_code& ErrorCode)
ZEN_TRACE_CPU("httpsys::Request::IssueRequest");
HttpSysTransaction& Tx = Transaction();
- PTP_IO Iocp = Tx.Iocp();
HTTP_REQUEST* HttpReq = Tx.HttpRequest();
- StartThreadpoolIo(Iocp);
+ Tx.StartIo();
ULONG HttpApiResult;
@@ -2428,7 +2442,7 @@ InitialRequestHandler::IssueRequest(std::error_code& ErrorCode)
if (HttpApiResult != ERROR_IO_PENDING && HttpApiResult != NO_ERROR)
{
- CancelThreadpoolIo(Iocp);
+ Tx.CancelIo();
ErrorCode = MakeErrorCode(HttpApiResult);
@@ -2574,7 +2588,7 @@ InitialRequestHandler::HandleCompletion(ULONG IoResult, ULONG_PTR NumberOfBytesT
Ref<WsHttpSysConnection> WsConn(new WsHttpSysConnection(RequestQueueHandle,
RequestId,
*WsHandler,
- Transaction().Iocp(),
+ *Transaction().Server().m_IoThreadPool,
&Transaction().Server()));
Ref<WebSocketConnection> WsConnRef(WsConn.Get());
diff --git a/src/zenhttp/servers/httpsys.h b/src/zenhttp/servers/httpsys.h
index ca465ad00..0685b42b2 100644
--- a/src/zenhttp/servers/httpsys.h
+++ b/src/zenhttp/servers/httpsys.h
@@ -22,6 +22,7 @@ struct HttpSysConfig
bool IsRequestLoggingEnabled = false;
bool IsDedicatedServer = false;
bool ForceLoopback = false;
+ bool UseExplicitIoThreadPool = false;
int HttpsPort = 0; // 0 = HTTPS disabled
std::string CertThumbprint; // Hex SHA-1 (40 chars) for auto SSL binding
std::string CertStoreName = "MY"; // Windows certificate store name
diff --git a/src/zenhttp/servers/iothreadpool.cpp b/src/zenhttp/servers/iothreadpool.cpp
index e941606e2..1053f0b0c 100644
--- a/src/zenhttp/servers/iothreadpool.cpp
+++ b/src/zenhttp/servers/iothreadpool.cpp
@@ -3,12 +3,36 @@
#include "iothreadpool.h"
#include <zencore/except.h>
+#include <zencore/logging.h>
+#include <zencore/thread.h>
#if ZEN_PLATFORM_WINDOWS
+# include <algorithm>
+# include <thread>
+
namespace zen {
-WinIoThreadPool::WinIoThreadPool(int InThreadCount, int InMaxThreadCount)
+//////////////////////////////////////////////////////////////////////////
+// Factory
+
+std::unique_ptr<WinIoThreadPool>
+WinIoThreadPool::Create(bool UseExplicitThreads, int MinThreads, int MaxThreads)
+{
+ if (UseExplicitThreads)
+ {
+ return std::make_unique<ExplicitIoThreadPool>(MinThreads, MaxThreads);
+ }
+ else
+ {
+ return std::make_unique<WinTpIoThreadPool>(MinThreads, MaxThreads);
+ }
+}
+
+//////////////////////////////////////////////////////////////////////////
+// WinTpIoThreadPool - Windows Thread Pool implementation
+
+WinTpIoThreadPool::WinTpIoThreadPool(int InThreadCount, int InMaxThreadCount)
{
ZEN_ASSERT(InThreadCount);
@@ -31,7 +55,7 @@ WinIoThreadPool::WinIoThreadPool(int InThreadCount, int InMaxThreadCount)
SetThreadpoolCallbackCleanupGroup(&m_CallbackEnvironment, m_CleanupGroup, NULL);
}
-WinIoThreadPool::~WinIoThreadPool()
+WinTpIoThreadPool::~WinTpIoThreadPool()
{
// this will wait for all callbacks to complete and tear down the `CreateThreadpoolIo`
// object and release all related objects
@@ -42,7 +66,7 @@ WinIoThreadPool::~WinIoThreadPool()
}
void
-WinIoThreadPool::CreateIocp(HANDLE IoHandle, PTP_WIN32_IO_CALLBACK Callback, void* Context, std::error_code& ErrorCode)
+WinTpIoThreadPool::CreateIocp(HANDLE IoHandle, PTP_WIN32_IO_CALLBACK Callback, void* Context, std::error_code& ErrorCode)
{
ZEN_ASSERT(!m_ThreadPoolIo);
@@ -54,6 +78,251 @@ WinIoThreadPool::CreateIocp(HANDLE IoHandle, PTP_WIN32_IO_CALLBACK Callback, voi
}
}
+void
+WinTpIoThreadPool::StartIo()
+{
+ StartThreadpoolIo(m_ThreadPoolIo);
+}
+
+void
+WinTpIoThreadPool::CancelIo()
+{
+ CancelThreadpoolIo(m_ThreadPoolIo);
+}
+
+//////////////////////////////////////////////////////////////////////////
+// ExplicitIoThreadPool - Raw IOCP + std::thread with load-based scaling
+
+static LoggerRef
+ExplicitIoPoolLog()
+{
+ static LoggerRef s_Log = logging::Get("iopool");
+ return s_Log;
+}
+
+ExplicitIoThreadPool::ExplicitIoThreadPool(int InMinThreadCount, int InMaxThreadCount)
+: m_MinThreads(InMinThreadCount)
+, m_MaxThreads(InMaxThreadCount)
+{
+ ZEN_ASSERT(InMinThreadCount > 0);
+
+ if (m_MaxThreads < m_MinThreads)
+ {
+ m_MaxThreads = m_MinThreads;
+ }
+
+ m_Iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0);
+
+ if (!m_Iocp)
+ {
+ ZEN_LOG_ERROR(ExplicitIoPoolLog(), "failed to create I/O completion port: {}", GetLastError());
+ }
+}
+
+ExplicitIoThreadPool::~ExplicitIoThreadPool()
+{
+ m_ShuttingDown.store(true, std::memory_order::release);
+
+ // Post poison-pill completions to wake all threads
+ const int ThreadCount = m_TotalThreads.load(std::memory_order::acquire);
+ for (int i = 0; i < ThreadCount; ++i)
+ {
+ PostQueuedCompletionStatus(m_Iocp, 0, 0, nullptr);
+ }
+
+ // Join all threads
+ {
+ RwLock::ExclusiveLockScope _(m_ThreadListLock);
+ for (auto& Thread : m_Threads)
+ {
+ if (Thread.joinable())
+ {
+ Thread.join();
+ }
+ }
+ m_Threads.clear();
+ }
+
+ if (m_Iocp)
+ {
+ CloseHandle(m_Iocp);
+ m_Iocp = nullptr;
+ }
+}
+
+void
+ExplicitIoThreadPool::CreateIocp(HANDLE IoHandle, PTP_WIN32_IO_CALLBACK Callback, void* Context, std::error_code& ErrorCode)
+{
+ ZEN_ASSERT(m_Iocp);
+ ZEN_ASSERT(!m_Callback);
+
+ m_Callback = Callback;
+ m_Context = Context;
+
+ // Associate the I/O handle with our completion port
+ HANDLE Result = CreateIoCompletionPort(IoHandle, m_Iocp, /* CompletionKey */ 0, 0);
+
+ if (!Result)
+ {
+ ErrorCode = MakeErrorCodeFromLastError();
+ return;
+ }
+
+ // Now spawn the initial worker threads
+ for (int i = 0; i < m_MinThreads; ++i)
+ {
+ SpawnWorkerThread();
+ }
+}
+
+void
+ExplicitIoThreadPool::StartIo()
+{
+ // No-op for raw IOCP - completions are posted automatically
+}
+
+void
+ExplicitIoThreadPool::CancelIo()
+{
+ // No-op for raw IOCP - completions are posted automatically
+}
+
+void
+ExplicitIoThreadPool::PruneExitedThreads()
+{
+ // Must be called under m_ThreadListLock
+ if (m_ExitedThreadIds.empty())
+ {
+ return;
+ }
+
+ for (auto It = m_Threads.begin(); It != m_Threads.end();)
+ {
+ auto IdIt = std::find(m_ExitedThreadIds.begin(), m_ExitedThreadIds.end(), It->get_id());
+ if (IdIt != m_ExitedThreadIds.end())
+ {
+ It->join();
+ It = m_Threads.erase(It);
+ m_ExitedThreadIds.erase(IdIt);
+ }
+ else
+ {
+ ++It;
+ }
+ }
+}
+
+void
+ExplicitIoThreadPool::SpawnWorkerThread()
+{
+ RwLock::ExclusiveLockScope _(m_ThreadListLock);
+
+ PruneExitedThreads();
+ ++m_TotalThreads;
+ m_Threads.emplace_back([this] { WorkerThreadMain(); });
+}
+
+void
+ExplicitIoThreadPool::WorkerThreadMain()
+{
+ static std::atomic<int> s_ThreadIndex{0};
+ const int ThreadIndex = ++s_ThreadIndex;
+ ExtendableStringBuilder<16> ThreadName;
+ ThreadName << "xpio_" << ThreadIndex;
+ SetCurrentThreadName(ThreadName);
+
+ static constexpr DWORD kIdleTimeoutMs = 15000;
+
+ while (!m_ShuttingDown.load(std::memory_order::acquire))
+ {
+ DWORD BytesTransferred = 0;
+ ULONG_PTR CompletionKey = 0;
+ OVERLAPPED* pOverlapped = nullptr;
+
+ BOOL Success = GetQueuedCompletionStatus(m_Iocp, &BytesTransferred, &CompletionKey, &pOverlapped, kIdleTimeoutMs);
+
+ if (m_ShuttingDown.load(std::memory_order::acquire))
+ {
+ break;
+ }
+
+ if (!Success && !pOverlapped)
+ {
+ DWORD Error = GetLastError();
+
+ if (Error == WAIT_TIMEOUT)
+ {
+ // Timeout - consider scaling down
+ const int CurrentTotal = m_TotalThreads.load(std::memory_order::acquire);
+ if (CurrentTotal > m_MinThreads)
+ {
+ // Try to claim this thread for exit by decrementing the count.
+ // Use CAS to avoid thundering herd of exits.
+ int Expected = CurrentTotal;
+ if (m_TotalThreads.compare_exchange_strong(Expected, CurrentTotal - 1, std::memory_order::acq_rel))
+ {
+ ZEN_LOG_DEBUG(ExplicitIoPoolLog(),
+ "scaling down I/O thread (idle timeout), {} threads remaining",
+ CurrentTotal - 1);
+ {
+ RwLock::ExclusiveLockScope _(m_ThreadListLock);
+ m_ExitedThreadIds.push_back(std::this_thread::get_id());
+ }
+ return; // Thread exits
+ }
+ }
+ continue;
+ }
+
+ // Some other error with no overlapped - unexpected
+ ZEN_LOG_WARN(ExplicitIoPoolLog(), "GetQueuedCompletionStatus failed with error {}", Error);
+ continue;
+ }
+
+ if (!pOverlapped)
+ {
+ // Poison pill (null overlapped) - shutdown signal
+ break;
+ }
+
+ // Got a real completion - determine the I/O result
+ ULONG IoResult = NO_ERROR;
+ if (!Success)
+ {
+ IoResult = GetLastError();
+ }
+
+ // Track active threads for scale-up decisions
+ const int ActiveBefore = m_ActiveCount.fetch_add(1, std::memory_order::acq_rel);
+ const int TotalNow = m_TotalThreads.load(std::memory_order::acquire);
+
+ // Scale up: if all threads are now busy and we haven't hit the max, spawn another
+ if ((ActiveBefore + 1) >= TotalNow && TotalNow < m_MaxThreads)
+ {
+ // Use CAS to ensure only one thread triggers the scale-up
+ int Expected = TotalNow;
+ if (m_TotalThreads.compare_exchange_strong(Expected, TotalNow + 1, std::memory_order::acq_rel))
+ {
+ ZEN_LOG_DEBUG(ExplicitIoPoolLog(), "scaling up I/O thread pool, {} -> {} threads", TotalNow, TotalNow + 1);
+
+ // Spawn outside the hot path - but we need the thread list lock
+ // We already incremented m_TotalThreads, so do the actual spawn
+ {
+ RwLock::ExclusiveLockScope _(m_ThreadListLock);
+ PruneExitedThreads();
+ m_Threads.emplace_back([this] { WorkerThreadMain(); });
+ }
+ }
+ }
+
+ // Invoke the callback with the same signature as PTP_WIN32_IO_CALLBACK
+ // Parameters: Instance, Context, Overlapped, IoResult, NumberOfBytesTransferred, Io
+ m_Callback(nullptr, m_Context, pOverlapped, IoResult, BytesTransferred, nullptr);
+
+ m_ActiveCount.fetch_sub(1, std::memory_order::release);
+ }
+}
+
} // namespace zen
#endif
diff --git a/src/zenhttp/servers/iothreadpool.h b/src/zenhttp/servers/iothreadpool.h
index e75e95e58..f6bfce450 100644
--- a/src/zenhttp/servers/iothreadpool.h
+++ b/src/zenhttp/servers/iothreadpool.h
@@ -4,21 +4,66 @@
#include <zencore/zencore.h>
+#include <zencore/thread.h>
+
#if ZEN_PLATFORM_WINDOWS
# include <zencore/windows.h>
+# include <memory>
# include <system_error>
namespace zen {
+/**
+ * @brief Abstract base class for I/O thread pools used by the http.sys server.
+ *
+ * Two implementations are available:
+ * - WinTpIoThreadPool: Uses the Windows Thread Pool API (default, current behavior)
+ * - ExplicitIoThreadPool: Uses raw IOCP + std::thread with load-based scaling
+ */
class WinIoThreadPool
{
public:
- WinIoThreadPool(int InThreadCount, int InMaxThreadCount);
- ~WinIoThreadPool();
+ virtual ~WinIoThreadPool() = default;
+
+ /**
+ * @brief Bind an I/O handle to this pool with the given callback
+ */
+ virtual void CreateIocp(HANDLE IoHandle, PTP_WIN32_IO_CALLBACK Callback, void* Context, std::error_code& ErrorCode) = 0;
+
+ /**
+ * @brief Called before issuing an async I/O operation.
+ * The Windows TP implementation calls StartThreadpoolIo; the explicit implementation is a no-op.
+ */
+ virtual void StartIo() = 0;
- void CreateIocp(HANDLE IoHandle, PTP_WIN32_IO_CALLBACK Callback, void* Context, std::error_code& ErrorCode);
- inline PTP_IO Iocp() const { return m_ThreadPoolIo; }
+ /**
+ * @brief Called when an async I/O operation fails synchronously (to undo StartIo).
+ * The Windows TP implementation calls CancelThreadpoolIo; the explicit implementation is a no-op.
+ */
+ virtual void CancelIo() = 0;
+
+ /**
+ * @brief Factory method to create an I/O thread pool
+ * @param UseExplicitThreads If true, creates an ExplicitIoThreadPool; otherwise creates a WinTpIoThreadPool
+ * @param MinThreads Minimum number of threads
+ * @param MaxThreads Maximum number of threads
+ */
+ static std::unique_ptr<WinIoThreadPool> Create(bool UseExplicitThreads, int MinThreads, int MaxThreads);
+};
+
+/**
+ * @brief Windows Thread Pool implementation (wraps CreateThreadpool/CreateThreadpoolIo)
+ */
+class WinTpIoThreadPool : public WinIoThreadPool
+{
+public:
+ WinTpIoThreadPool(int InThreadCount, int InMaxThreadCount);
+ ~WinTpIoThreadPool();
+
+ virtual void CreateIocp(HANDLE IoHandle, PTP_WIN32_IO_CALLBACK Callback, void* Context, std::error_code& ErrorCode) override;
+ virtual void StartIo() override;
+ virtual void CancelIo() override;
private:
PTP_POOL m_ThreadPool = nullptr;
@@ -27,5 +72,44 @@ private:
TP_CALLBACK_ENVIRON m_CallbackEnvironment;
};
+/**
+ * @brief Explicit IOCP + std::thread implementation with load-based thread scaling
+ *
+ * Creates a raw I/O completion port and manages worker threads directly. Threads
+ * scale up when all are busy servicing completions, and scale down when idle for
+ * an extended period.
+ */
+class ExplicitIoThreadPool : public WinIoThreadPool
+{
+public:
+ ExplicitIoThreadPool(int InMinThreadCount, int InMaxThreadCount);
+ ~ExplicitIoThreadPool();
+
+ virtual void CreateIocp(HANDLE IoHandle, PTP_WIN32_IO_CALLBACK Callback, void* Context, std::error_code& ErrorCode) override;
+ virtual void StartIo() override;
+ virtual void CancelIo() override;
+
+private:
+ void WorkerThreadMain();
+ void SpawnWorkerThread();
+ void PruneExitedThreads();
+
+ HANDLE m_Iocp = nullptr;
+
+ PTP_WIN32_IO_CALLBACK m_Callback = nullptr;
+ void* m_Context = nullptr;
+
+ int m_MinThreads;
+ int m_MaxThreads;
+
+ std::atomic<int> m_TotalThreads{0};
+ std::atomic<int> m_ActiveCount{0};
+ std::atomic<bool> m_ShuttingDown{false};
+
+ RwLock m_ThreadListLock;
+ std::vector<std::thread> m_Threads;
+ std::vector<std::thread::id> m_ExitedThreadIds;
+};
+
} // namespace zen
#endif
diff --git a/src/zenhttp/servers/wshttpsys.cpp b/src/zenhttp/servers/wshttpsys.cpp
index af320172d..6abf805ec 100644
--- a/src/zenhttp/servers/wshttpsys.cpp
+++ b/src/zenhttp/servers/wshttpsys.cpp
@@ -4,6 +4,7 @@
#if ZEN_WITH_HTTPSYS
+# include "iothreadpool.h"
# include "wsframecodec.h"
# include <zencore/logging.h>
@@ -23,12 +24,12 @@ WsHttpSysLog()
WsHttpSysConnection::WsHttpSysConnection(HANDLE RequestQueueHandle,
HTTP_REQUEST_ID RequestId,
IWebSocketHandler& Handler,
- PTP_IO Iocp,
+ WinIoThreadPool& IoThreadPool,
HttpServer* Server)
: m_RequestQueueHandle(RequestQueueHandle)
, m_RequestId(RequestId)
, m_Handler(Handler)
-, m_Iocp(Iocp)
+, m_IoThreadPool(IoThreadPool)
, m_HttpServer(Server)
, m_ReadBuffer(8192)
{
@@ -98,7 +99,7 @@ WsHttpSysConnection::IssueAsyncRead()
ZeroMemory(&m_ReadIoContext.Overlapped, sizeof(OVERLAPPED));
- StartThreadpoolIo(m_Iocp);
+ m_IoThreadPool.StartIo();
ULONG Result = HttpReceiveRequestEntityBody(m_RequestQueueHandle,
m_RequestId,
@@ -110,7 +111,7 @@ WsHttpSysConnection::IssueAsyncRead()
if (Result != NO_ERROR && Result != ERROR_IO_PENDING)
{
- CancelThreadpoolIo(m_Iocp);
+ m_IoThreadPool.CancelIo();
m_OutstandingOps.fetch_sub(1, std::memory_order_relaxed);
if (m_IsOpen.exchange(false))
@@ -315,7 +316,7 @@ WsHttpSysConnection::FlushWriteQueue()
ZeroMemory(&m_WriteIoContext.Overlapped, sizeof(OVERLAPPED));
- StartThreadpoolIo(m_Iocp);
+ m_IoThreadPool.StartIo();
ULONG Result = HttpSendResponseEntityBody(m_RequestQueueHandle,
m_RequestId,
@@ -330,7 +331,7 @@ WsHttpSysConnection::FlushWriteQueue()
if (Result != NO_ERROR && Result != ERROR_IO_PENDING)
{
- CancelThreadpoolIo(m_Iocp);
+ m_IoThreadPool.CancelIo();
m_OutstandingOps.fetch_sub(1, std::memory_order_relaxed);
ZEN_LOG_DEBUG(WsHttpSysLog(), "WebSocket async write failed: {}", Result);
diff --git a/src/zenhttp/servers/wshttpsys.h b/src/zenhttp/servers/wshttpsys.h
index 6015e3873..7b10fe563 100644
--- a/src/zenhttp/servers/wshttpsys.h
+++ b/src/zenhttp/servers/wshttpsys.h
@@ -20,6 +20,7 @@
namespace zen {
class HttpServer;
+class WinIoThreadPool;
/**
* WebSocket connection over an http.sys opaque-mode connection
@@ -39,7 +40,11 @@ class HttpServer;
class WsHttpSysConnection : public WebSocketConnection
{
public:
- WsHttpSysConnection(HANDLE RequestQueueHandle, HTTP_REQUEST_ID RequestId, IWebSocketHandler& Handler, PTP_IO Iocp, HttpServer* Server);
+ WsHttpSysConnection(HANDLE RequestQueueHandle,
+ HTTP_REQUEST_ID RequestId,
+ IWebSocketHandler& Handler,
+ WinIoThreadPool& IoThreadPool,
+ HttpServer* Server);
~WsHttpSysConnection() override;
/**
@@ -76,7 +81,7 @@ private:
HANDLE m_RequestQueueHandle;
HTTP_REQUEST_ID m_RequestId;
IWebSocketHandler& m_Handler;
- PTP_IO m_Iocp;
+ WinIoThreadPool& m_IoThreadPool;
HttpServer* m_HttpServer;
// Tagged OVERLAPPED contexts for concurrent read and write
diff --git a/src/zenserver/config/config.cpp b/src/zenserver/config/config.cpp
index 60ae93853..fb88acab3 100644
--- a/src/zenserver/config/config.cpp
+++ b/src/zenserver/config/config.cpp
@@ -168,6 +168,9 @@ ZenServerConfiguratorBase::AddCommonConfigOptions(LuaConfig::Options& LuaOptions
LuaOptions.AddOption("network.httpsys.requestlogging"sv,
ServerOptions.HttpConfig.HttpSys.IsRequestLoggingEnabled,
"httpsys-enable-request-logging"sv);
+ LuaOptions.AddOption("network.httpsys.explicitiopool"sv,
+ ServerOptions.HttpConfig.HttpSys.UseExplicitIoThreadPool,
+ "httpsys-explicit-iopool"sv);
LuaOptions.AddOption("network.httpsys.httpsport"sv, ServerOptions.HttpConfig.HttpSys.HttpsPort, "httpsys-https-port"sv);
LuaOptions.AddOption("network.httpsys.certthumbprint"sv, ServerOptions.HttpConfig.HttpSys.CertThumbprint, "httpsys-cert-thumbprint"sv);
LuaOptions.AddOption("network.httpsys.certstorename"sv, ServerOptions.HttpConfig.HttpSys.CertStoreName, "httpsys-cert-store"sv);
@@ -383,6 +386,13 @@ ZenServerCmdLineOptions::AddCliOptions(cxxopts::Options& options, ZenServerConfi
options.add_option("httpsys",
"",
+ "httpsys-explicit-iopool",
+ "Use explicit IOCP thread pool instead of Windows Thread Pool",
+ cxxopts::value<bool>(ServerOptions.HttpConfig.HttpSys.UseExplicitIoThreadPool)->default_value("false"),
+ "<explicit iopool>");
+
+ options.add_option("httpsys",
+ "",
"httpsys-https-port",
"HTTPS listen port for http.sys (0 = disabled)",
cxxopts::value<int>(ServerOptions.HttpConfig.HttpSys.HttpsPort)->default_value("0"),
diff --git a/src/zenserver/main.cpp b/src/zenserver/main.cpp
index 26ae85ae1..23d497308 100644
--- a/src/zenserver/main.cpp
+++ b/src/zenserver/main.cpp
@@ -284,7 +284,7 @@ main(int argc, char* argv[])
//
// This isn't a great solution, but for now it seems to help reduce
// shutdown crashes observed in some situations.
- WaitForThreads(1000);
+ WaitForThreads(10);
});
#endif
diff --git a/src/zenutil/include/zenutil/zenserverprocess.h b/src/zenutil/include/zenutil/zenserverprocess.h
index 2f76f0d6c..933a89491 100644
--- a/src/zenutil/include/zenutil/zenserverprocess.h
+++ b/src/zenutil/include/zenutil/zenserverprocess.h
@@ -6,6 +6,7 @@
#include <zencore/logging.h>
#include <zencore/process.h>
#include <zencore/thread.h>
+#include <zencore/timer.h>
#include <zencore/uid.h>
#include <atomic>
@@ -169,6 +170,7 @@ struct ZenServerInstance
private:
ZenServerEnvironment& m_Env;
+ Stopwatch m_SpawnTime;
ProcessHandle m_Process;
NamedEvent m_ReadyEvent;
std::unique_ptr<NamedEvent> m_ShutdownEvent;
diff --git a/src/zenutil/zenserverprocess.cpp b/src/zenutil/zenserverprocess.cpp
index ac614f779..36a8b9c6f 100644
--- a/src/zenutil/zenserverprocess.cpp
+++ b/src/zenutil/zenserverprocess.cpp
@@ -943,6 +943,8 @@ ZenServerInstance::SignalShutdown(std::error_code& OutEc)
int
ZenServerInstance::Shutdown()
{
+ Stopwatch ShutdownTimer;
+
if (m_Process.IsValid())
{
if (m_ShutdownOnDestroy)
@@ -988,7 +990,10 @@ ZenServerInstance::Shutdown()
m_Process.Pid(),
NiceTimeSpanMs(Timer.GetElapsedTimeMs()));
}
- ZEN_DEBUG("zenserver process {} ({}) exited", m_Name, m_Process.Pid());
+ ZEN_DEBUG("zenserver process {} ({}) exited (after {})",
+ m_Name,
+ m_Process.Pid(),
+ NiceTimeSpanMs(ShutdownTimer.GetElapsedTimeMs()));
int ExitCode = m_Process.GetExitCode();
m_Process.Reset();
return ExitCode;
@@ -1070,6 +1075,8 @@ ToString(ZenServerInstance::ServerMode Mode)
void
ZenServerInstance::SpawnServerInternal(int ChildId, std::string_view ServerArgs, bool OpenConsole, int WaitTimeoutMs)
{
+ m_SpawnTime.Reset();
+
const bool IsTest = m_Env.IsTestEnvironment();
ExtendableStringBuilder<32> ChildEventName;
@@ -1398,7 +1405,7 @@ ZenServerInstance::OnServerReady()
}
CreateShutdownEvent(m_BasePort);
- ZEN_DEBUG("Server '{}' is ready on port {}", m_Name, m_BasePort);
+ ZEN_DEBUG("Server '{}' is ready on port {} (after {})", m_Name, m_BasePort, NiceTimeSpanMs(m_SpawnTime.GetElapsedTimeMs()));
}
std::string