aboutsummaryrefslogtreecommitdiff
path: root/zenstore/compactcas.cpp
diff options
context:
space:
mode:
authorDan Engelbrecht <[email protected]>2022-05-03 23:04:45 +0200
committerDan Engelbrecht <[email protected]>2022-05-03 23:04:45 +0200
commita19eee841d7ce0c9c868dced40a6380f55cdb9bd (patch)
tree7dc1a81d9ba159588e845c94c10eb7e391cfed9b /zenstore/compactcas.cpp
parentunused variable in test fix (diff)
downloadzen-a19eee841d7ce0c9c868dced40a6380f55cdb9bd.tar.xz
zen-a19eee841d7ce0c9c868dced40a6380f55cdb9bd.zip
handle that more than one block can be written to in parallel
Diffstat (limited to 'zenstore/compactcas.cpp')
-rw-r--r--zenstore/compactcas.cpp29
1 files changed, 12 insertions, 17 deletions
diff --git a/zenstore/compactcas.cpp b/zenstore/compactcas.cpp
index 7cc742beb..cc0e2241c 100644
--- a/zenstore/compactcas.cpp
+++ b/zenstore/compactcas.cpp
@@ -250,15 +250,16 @@ CasContainerStrategy::InsertChunk(const void* ChunkData, size_t ChunkSize, const
// This should be a rare occasion and the current flow reduces the time we block for
// reads, insert and GC.
- BlockStoreLocation Location = m_BlockStore.WriteChunk(ChunkData, ChunkSize, m_PayloadAlignment);
- BlockStoreDiskLocation DiskLocation(Location, m_PayloadAlignment);
- const CasDiskIndexEntry IndexEntry{.Key = ChunkHash, .Location = DiskLocation};
- m_CasLog.Append(IndexEntry);
- {
- RwLock::ExclusiveLockScope _(m_LocationMapLock);
- m_LocationMap.emplace(ChunkHash, DiskLocation);
- }
- m_TotalSize.fetch_add(static_cast<uint64_t>(ChunkSize), std::memory_order::relaxed);
+ m_BlockStore.WriteChunk(ChunkData, ChunkSize, m_PayloadAlignment, [&](const BlockStoreLocation& Location) {
+ BlockStoreDiskLocation DiskLocation(Location, m_PayloadAlignment);
+ const CasDiskIndexEntry IndexEntry{.Key = ChunkHash, .Location = DiskLocation};
+ m_CasLog.Append(IndexEntry);
+ {
+ RwLock::ExclusiveLockScope _(m_LocationMapLock);
+ m_LocationMap.emplace(ChunkHash, DiskLocation);
+ }
+ m_TotalSize.fetch_add(static_cast<uint64_t>(ChunkSize), std::memory_order::relaxed);
+ });
return CasStore::InsertResult{.New = true};
}
@@ -1685,7 +1686,7 @@ TEST_CASE("compactcas.legacyconversion")
}
}
-TEST_CASE("compactcas.threadedinsert") // * doctest::skip(true))
+TEST_CASE("compactcas.threadedinsert")
{
// for (uint32_t i = 0; i < 100; ++i)
{
@@ -1887,13 +1888,7 @@ TEST_CASE("compactcas.threadedinsert") // * doctest::skip(true))
{
ThreadPool.ScheduleWork([&Cas, &WorkCompleted, ChunkHash]() {
CHECK(Cas.HaveChunk(ChunkHash));
- if (ChunkHash != IoHash::HashBuffer(Cas.FindChunk(ChunkHash)))
- {
- IoBuffer Buffer = Cas.FindChunk(ChunkHash);
- CHECK(Buffer);
- IoHash BufferHash = IoHash::HashBuffer(Buffer);
- CHECK(ChunkHash == BufferHash);
- }
+ CHECK(ChunkHash == IoHash::HashBuffer(Cas.FindChunk(ChunkHash)));
WorkCompleted.fetch_add(1);
});
}