aboutsummaryrefslogtreecommitdiff
path: root/src/zenstore/blockstore.cpp
diff options
context:
space:
mode:
authorDan Engelbrecht <[email protected]>2026-01-09 16:52:08 +0100
committerGitHub Enterprise <[email protected]>2026-01-09 16:52:08 +0100
commit4b25a0926ce5cc4336a58165ddfbb11e7fe97f6b (patch)
treedc278605bd7b1036a24701455ab6df80f7871e30 /src/zenstore/blockstore.cpp
parentCprHttpClient cleanup (#703) (diff)
downloadzen-4b25a0926ce5cc4336a58165ddfbb11e7fe97f6b.tar.xz
zen-4b25a0926ce5cc4336a58165ddfbb11e7fe97f6b.zip
various optimizations (#704)
- Improvement: Validate chunk hashes when dechunking files in oplog import - Improvement: Use stream decompression when dechunking files - Improvement: When assembling blocks for oplog export, make sure we keep under/at block size limit - Improvement: Make cancelling of oplog import more responsive - Improvement: Use decompress to composite to avoid allocating a new memory buffer for uncompressed chunks during oplog import - Improvement: Reduce memory buffer size and allocate it on demand when writing multiple chunks to block store - Improvement: Reduce lock contention when fetching/checking existence of chunks in block store
Diffstat (limited to 'src/zenstore/blockstore.cpp')
-rw-r--r--src/zenstore/blockstore.cpp24
1 files changed, 18 insertions, 6 deletions
diff --git a/src/zenstore/blockstore.cpp b/src/zenstore/blockstore.cpp
index f97c98e08..0542d1171 100644
--- a/src/zenstore/blockstore.cpp
+++ b/src/zenstore/blockstore.cpp
@@ -762,7 +762,7 @@ BlockStore::WriteChunks(std::span<const IoBuffer> Datas, uint32_t Alignment, con
LargestSize = Max(LargestSize, Size);
}
- const uint64_t MinSize = Max(LargestSize, 8u * 1024u * 1024u);
+ const uint64_t MinSize = Max(LargestSize, 512u * 1024u);
const uint64_t BufferSize = Min(TotalSize, MinSize);
std::vector<uint8_t> Buffer(BufferSize);
@@ -815,7 +815,12 @@ BlockStore::WriteChunks(std::span<const IoBuffer> Datas, uint32_t Alignment, con
auto _ = MakeGuard([this, WriteBlockIndex]() { RemoveActiveWriteBlock(WriteBlockIndex); });
+ if (Count > 1)
{
+ if (Buffer.empty())
+ {
+ Buffer.resize(BufferSize);
+ }
MutableMemoryView WriteBuffer(Buffer.data(), RangeSize);
for (size_t Index = 0; Index < Count; Index++)
{
@@ -824,9 +829,14 @@ BlockStore::WriteChunks(std::span<const IoBuffer> Datas, uint32_t Alignment, con
WriteBuffer.MidInline(RoundUp(SourceBuffer.GetSize(), Alignment));
}
WriteBlock->Write(Buffer.data(), RangeSize, AlignedInsertOffset);
+ m_TotalSize.fetch_add(RangeSize, std::memory_order::relaxed);
+ }
+ else
+ {
+ MemoryView SourceBuffer = Datas[Offset];
+ WriteBlock->Write(SourceBuffer.GetData(), SourceBuffer.GetSize(), AlignedInsertOffset);
+ m_TotalSize.fetch_add(SourceBuffer.GetSize(), std::memory_order::relaxed);
}
-
- m_TotalSize.fetch_add(RangeSize, std::memory_order::relaxed);
uint32_t ChunkOffset = AlignedInsertOffset;
std::vector<BlockStoreLocation> Locations(Count);
@@ -845,11 +855,11 @@ BlockStore::WriteChunks(std::span<const IoBuffer> Datas, uint32_t Alignment, con
bool
BlockStore::HasChunk(const BlockStoreLocation& Location) const
{
- ZEN_TRACE_CPU("BlockStore::TryGetChunk");
+ ZEN_TRACE_CPU("BlockStore::HasChunk");
RwLock::SharedLockScope InsertLock(m_InsertLock);
if (auto BlockIt = m_ChunkBlocks.find(Location.BlockIndex); BlockIt != m_ChunkBlocks.end())
{
- if (const Ref<BlockStoreFile>& Block = BlockIt->second; Block)
+ if (Ref<BlockStoreFile> Block = BlockIt->second; Block)
{
InsertLock.ReleaseNow();
@@ -878,8 +888,10 @@ BlockStore::TryGetChunk(const BlockStoreLocation& Location) const
RwLock::SharedLockScope InsertLock(m_InsertLock);
if (auto BlockIt = m_ChunkBlocks.find(Location.BlockIndex); BlockIt != m_ChunkBlocks.end())
{
- if (const Ref<BlockStoreFile>& Block = BlockIt->second; Block)
+ if (Ref<BlockStoreFile> Block = BlockIt->second; Block)
{
+ InsertLock.ReleaseNow();
+
IoBuffer Chunk = Block->GetChunk(Location.Offset, Location.Size);
if (Chunk.GetSize() == Location.Size)
{