aboutsummaryrefslogtreecommitdiff
path: root/src/zenstore/cache/cachedisklayer.cpp
diff options
context:
space:
mode:
authorDan Engelbrecht <[email protected]>2024-04-24 13:53:54 +0200
committerGitHub Enterprise <[email protected]>2024-04-24 13:53:54 +0200
commit1c0ddc112a6c18d411f1f3ae6d236ecc2bedcfaa (patch)
treecfafeecb830a44a6d0870a217edabcc62d37669c /src/zenstore/cache/cachedisklayer.cpp
parentremove obsolete code (diff)
downloadzen-1c0ddc112a6c18d411f1f3ae6d236ecc2bedcfaa.tar.xz
zen-1c0ddc112a6c18d411f1f3ae6d236ecc2bedcfaa.zip
iterate cas chunks (#59)
- Improvement: Reworked GetChunkInfos in oplog store to reduce disk thrashing and improve performance
Diffstat (limited to 'src/zenstore/cache/cachedisklayer.cpp')
-rw-r--r--src/zenstore/cache/cachedisklayer.cpp27
1 files changed, 14 insertions, 13 deletions
diff --git a/src/zenstore/cache/cachedisklayer.cpp b/src/zenstore/cache/cachedisklayer.cpp
index 4911ff4f8..51d547b3d 100644
--- a/src/zenstore/cache/cachedisklayer.cpp
+++ b/src/zenstore/cache/cachedisklayer.cpp
@@ -1553,23 +1553,24 @@ ZenCacheDiskLayer::CacheBucket::ScrubStorage(ScrubContext& Ctx)
ZEN_INFO("scrubbing '{}'", m_BucketDir);
- Stopwatch Timer;
- uint64_t ChunkCount = 0;
- uint64_t VerifiedChunkBytes = 0;
+ Stopwatch Timer;
+ std::atomic_uint64_t ChunkCount = 0;
+ std::atomic_uint64_t VerifiedChunkBytes = 0;
auto LogStats = MakeGuard([&] {
const uint32_t DurationMs = gsl::narrow<uint32_t>(Timer.GetElapsedTimeMs());
ZEN_INFO("cache bucket '{}' scrubbed {}B in {} from {} chunks ({})",
m_BucketName,
- NiceBytes(VerifiedChunkBytes),
+ NiceBytes(VerifiedChunkBytes.load()),
NiceTimeSpanMs(DurationMs),
- ChunkCount,
+ ChunkCount.load(),
NiceRate(VerifiedChunkBytes, DurationMs));
});
+ RwLock BadKeysLock;
std::vector<IoHash> BadKeys;
- auto ReportBadKey = [&](const IoHash& Key) { BadKeys.push_back(Key); };
+ auto ReportBadKey = [&](const IoHash& Key) { BadKeysLock.WithExclusiveLock([&]() { BadKeys.push_back(Key); }); };
try
{
@@ -1596,8 +1597,8 @@ ZenCacheDiskLayer::CacheBucket::ScrubStorage(ScrubContext& Ctx)
{
Ctx.ThrowIfDeadlineExpired();
- ++ChunkCount;
- VerifiedChunkBytes += Loc.Size();
+ ChunkCount.fetch_add(1);
+ VerifiedChunkBytes.fetch_add(Loc.Size());
if (Loc.GetContentType() == ZenContentType::kBinary)
{
@@ -1645,8 +1646,8 @@ ZenCacheDiskLayer::CacheBucket::ScrubStorage(ScrubContext& Ctx)
}
const auto ValidateSmallChunk = [&](size_t ChunkIndex, const void* Data, uint64_t Size) -> void {
- ++ChunkCount;
- VerifiedChunkBytes += Size;
+ ChunkCount.fetch_add(1);
+ VerifiedChunkBytes.fetch_add(Size);
const IoHash& Hash = ChunkIndexToChunkHash[ChunkIndex];
if (!Data)
{
@@ -1678,8 +1679,8 @@ ZenCacheDiskLayer::CacheBucket::ScrubStorage(ScrubContext& Ctx)
const auto ValidateLargeChunk = [&](size_t ChunkIndex, BlockStoreFile& File, uint64_t Offset, uint64_t Size) -> void {
Ctx.ThrowIfDeadlineExpired();
- ++ChunkCount;
- VerifiedChunkBytes += Size;
+ ChunkCount.fetch_add(1);
+ VerifiedChunkBytes.fetch_add(Size);
const IoHash& Hash = ChunkIndexToChunkHash[ChunkIndex];
IoBuffer Buffer(IoBuffer::BorrowedFile, File.GetBasicFile().Handle(), Offset, Size);
if (!Buffer)
@@ -1697,7 +1698,7 @@ ZenCacheDiskLayer::CacheBucket::ScrubStorage(ScrubContext& Ctx)
}
};
- m_BlockStore.IterateChunks(ChunkLocations, ValidateSmallChunk, ValidateLargeChunk);
+ m_BlockStore.IterateChunks(ChunkLocations, ValidateSmallChunk, ValidateLargeChunk, nullptr);
}
catch (ScrubDeadlineExpiredException&)
{