aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan Engelbrecht <[email protected]>2024-09-25 09:48:15 +0200
committerGitHub Enterprise <[email protected]>2024-09-25 09:48:15 +0200
commit1f587062485dc7c69e9783cdb8bb187842eafc8a (patch)
tree116cec9c8d05aeaf482af9721da600bd9bd00244
parent5.5.8-pre1 (diff)
downloadzen-1f587062485dc7c69e9783cdb8bb187842eafc8a.tar.xz
zen-1f587062485dc7c69e9783cdb8bb187842eafc8a.zip
exception safety when writing block (#168)
* make sure we always clear writing block from m_ActiveWriteBlocks even if we have an exception
-rw-r--r--CHANGELOG.md1
-rw-r--r--src/zenstore/blockstore.cpp18
2 files changed, 9 insertions, 10 deletions
diff --git a/CHANGELOG.md b/CHANGELOG.md
index da6cd320d..f4c8a157a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -5,6 +5,7 @@
- Improvement: Refactored GCv2 to reduce time we block write requests, trading for longer overall GC time
- Reduces time writes are blocked by ~8 times, going from 6s to 0.7s on a large data set
- Increases time for GC to execute (without blocking reads/writes) by ~1.7 times, going from 10.4s to 17.6s on a large data set
+- Bugfix: Make sure we clear active write block if block store if we get exception when writing to disk
## 5.5.8
- Feature: Added `zen cache-gen` command to generate large amount of cache data for testing
diff --git a/src/zenstore/blockstore.cpp b/src/zenstore/blockstore.cpp
index 00a38c3b6..7c6677052 100644
--- a/src/zenstore/blockstore.cpp
+++ b/src/zenstore/blockstore.cpp
@@ -539,16 +539,15 @@ BlockStore::WriteChunk(const void* Data, uint64_t Size, uint32_t Alignment, cons
Ref<BlockStoreFile> WriteBlock = m_WriteBlock;
m_ActiveWriteBlocks.push_back(WriteBlockIndex);
InsertLock.ReleaseNow();
+ auto _ = MakeGuard([this, WriteBlockIndex]() {
+ RwLock::ExclusiveLockScope _(m_InsertLock);
+ m_ActiveWriteBlocks.erase(std::find(m_ActiveWriteBlocks.begin(), m_ActiveWriteBlocks.end(), WriteBlockIndex));
+ });
WriteBlock->Write(Data, ChunkSize, AlignedInsertOffset);
m_TotalSize.fetch_add(AlignedWriteSize, std::memory_order::relaxed);
Callback({.BlockIndex = WriteBlockIndex, .Offset = AlignedInsertOffset, .Size = ChunkSize});
-
- {
- RwLock::ExclusiveLockScope _(m_InsertLock);
- m_ActiveWriteBlocks.erase(std::find(m_ActiveWriteBlocks.begin(), m_ActiveWriteBlocks.end(), WriteBlockIndex));
- }
}
void
@@ -615,6 +614,10 @@ BlockStore::WriteChunks(std::span<IoBuffer> Datas, uint32_t Alignment, const Wri
Ref<BlockStoreFile> WriteBlock = m_WriteBlock;
m_ActiveWriteBlocks.push_back(WriteBlockIndex);
InsertLock.ReleaseNow();
+ auto _ = MakeGuard([this, WriteBlockIndex]() {
+ RwLock::ExclusiveLockScope _(m_InsertLock);
+ m_ActiveWriteBlocks.erase(std::find(m_ActiveWriteBlocks.begin(), m_ActiveWriteBlocks.end(), WriteBlockIndex));
+ });
{
MutableMemoryView WriteBuffer(Buffer.data(), RangeSize);
@@ -639,11 +642,6 @@ BlockStore::WriteChunks(std::span<IoBuffer> Datas, uint32_t Alignment, const Wri
}
Callback(Locations);
- {
- RwLock::ExclusiveLockScope _(m_InsertLock);
- m_ActiveWriteBlocks.erase(std::find(m_ActiveWriteBlocks.begin(), m_ActiveWriteBlocks.end(), WriteBlockIndex));
- }
-
Offset += Count;
}
}