aboutsummaryrefslogtreecommitdiff
path: root/src/zenserver/cache/cachedisklayer.cpp
diff options
context:
space:
mode:
authorDan Engelbrecht <[email protected]>2023-10-04 14:37:49 +0200
committerGitHub <[email protected]>2023-10-04 14:37:49 +0200
commit387b6d99e6ef3958a6fd78b22c48bb8a85b53bda (patch)
treefd6a5e07e9785a10606f35f92b2f205af87fff1f /src/zenserver/cache/cachedisklayer.cpp
parentadded CHANGELOG.md note for websocket removal (diff)
downloadzen-387b6d99e6ef3958a6fd78b22c48bb8a85b53bda.tar.xz
zen-387b6d99e6ef3958a6fd78b22c48bb8a85b53bda.zip
refactor comapactcas index (#443)
- Bugfix: Fix scrub messing up payload and access time in disk cache bucket when compacting index - Improvement: Split up disk cache bucket index into hash lookup and payload array to improve performance - Improvement: Reserve space up front for compact binary output when saving cache bucket manifest to improve performance
Diffstat (limited to 'src/zenserver/cache/cachedisklayer.cpp')
-rw-r--r--src/zenserver/cache/cachedisklayer.cpp13
1 files changed, 10 insertions, 3 deletions
diff --git a/src/zenserver/cache/cachedisklayer.cpp b/src/zenserver/cache/cachedisklayer.cpp
index 9883e2119..7ce713de9 100644
--- a/src/zenserver/cache/cachedisklayer.cpp
+++ b/src/zenserver/cache/cachedisklayer.cpp
@@ -884,7 +884,14 @@ ZenCacheDiskLayer::CacheBucket::MakeManifest(IndexMap&& Index, std::vector<Acces
ZEN_TRACE_CPU("Z$::Disk::Bucket::MakeManifest");
- CbObjectWriter Writer;
+ size_t ItemCount = m_Index.size();
+
+ // This tends to overestimate a little bit but it is still way more accurate than what we get with exponential growth
+ // And we don't need to reallocate theunderying buffer in almost every case
+ const size_t EstimatedSizePerItem = 54u;
+ const size_t ReserveSize = ItemCount == 0 ? 48u : RoundUp(32u + (ItemCount * EstimatedSizePerItem), 128);
+ CbObjectWriter Writer(ReserveSize);
+
Writer << "BucketId"sv << m_BucketId;
Writer << "Version"sv << CurrentDiskBucketVersion;
@@ -1213,8 +1220,8 @@ ZenCacheDiskLayer::CacheBucket::ScrubStorage(ScrubContext& Ctx)
for (auto It : m_Index)
{
size_t EntryIndex = Payloads.size();
- Payloads.push_back(m_Payloads[EntryIndex]);
- AccessTimes.push_back(m_AccessTimes[EntryIndex]);
+ Payloads.push_back(m_Payloads[It.second]);
+ AccessTimes.push_back(m_AccessTimes[It.second]);
Index.insert({It.first, EntryIndex});
}
m_Index.swap(Index);