diff options
| author | Dan Engelbrecht <[email protected]> | 2022-05-02 10:18:31 +0200 |
|---|---|---|
| committer | Dan Engelbrecht <[email protected]> | 2022-05-02 10:18:31 +0200 |
| commit | c89190f7fabf8a08cda2255937dc99ca35972210 (patch) | |
| tree | f67248118b6dc47f5f3665ba09f7745bd69b0f5a /zenstore/blockstore.cpp | |
| parent | cleanup (diff) | |
| download | zen-c89190f7fabf8a08cda2255937dc99ca35972210.tar.xz zen-c89190f7fabf8a08cda2255937dc99ca35972210.zip | |
Move bulk of MigrateLegacyData to blockstore.cpp
Diffstat (limited to 'zenstore/blockstore.cpp')
| -rw-r--r-- | zenstore/blockstore.cpp | 219 |
1 files changed, 215 insertions, 4 deletions
diff --git a/zenstore/blockstore.cpp b/zenstore/blockstore.cpp index 9961e734d..6f5578be8 100644 --- a/zenstore/blockstore.cpp +++ b/zenstore/blockstore.cpp @@ -437,7 +437,7 @@ BlockStore::ReclaimSpace(const ReclaimSnapshotState& Snapshot, { DeletedSize += ChunkLocations[DeleteIndex].Size; } - Callback(BlockIndex, {}, DeleteMap); + Callback({}, DeleteMap); DeletedCount += DeleteMap.size(); { RwLock::ExclusiveLockScope _i(m_InsertLock); @@ -477,7 +477,7 @@ BlockStore::ReclaimSpace(const ReclaimSnapshotState& Snapshot, NewBlockFile->Flush(); } { - Callback(0xfffffffful, MovedChunks, {}); + Callback(MovedChunks, {}); MovedCount += KeepMap.size(); MovedChunks.clear(); RwLock::ExclusiveLockScope __(m_InsertLock); @@ -558,7 +558,7 @@ BlockStore::ReclaimSpace(const ReclaimSnapshotState& Snapshot, DeletedSize += ChunkLocations[DeleteIndex].Size; } - Callback(BlockIndex, MovedChunks, DeleteMap); + Callback(MovedChunks, DeleteMap); MovedCount += KeepMap.size(); DeletedCount += DeleteMap.size(); MovedChunks.clear(); @@ -599,7 +599,7 @@ BlockStore::IterateChunks(const std::vector<BlockStoreLocation>& ChunkLocations, IoBuffer ReadBuffer{WindowSize}; void* BufferBase = ReadBuffer.MutableData(); - RwLock::SharedLockScope _(m_InsertLock); // TODO: Refactor so we don't have to keep m_InsertLock all the time? + RwLock::SharedLockScope _(m_InsertLock); for (const auto& Block : m_ChunkBlocks) { @@ -659,6 +659,217 @@ BlockStore::IterateChunks(const std::vector<BlockStoreLocation>& ChunkLocations, } } +bool +BlockStore::Split(const std::vector<BlockStoreLocation>& ChunkLocations, + const std::filesystem::path& SourceBlockFilePath, + const std::filesystem::path& BlocksBasePath, + uint64_t MaxBlockSize, + uint64_t MaxBlockCount, + size_t PayloadAlignment, + bool CleanSource, + const SplitCallback& Callback) +{ + std::error_code Error; + DiskSpace Space = DiskSpaceInfo(BlocksBasePath.parent_path(), Error); + if (Error) + { + ZEN_ERROR("get disk space in {} FAILED, reason: '{}'", BlocksBasePath, Error.message()); + return false; + } + + if (Space.Free < MaxBlockSize) + { + ZEN_ERROR("legacy store migration from '{}' FAILED, required disk space {}, free {}", + BlocksBasePath, + MaxBlockSize, + NiceBytes(Space.Free)); + return false; + } + + size_t TotalSize = 0; + for (const BlockStoreLocation& Location : ChunkLocations) + { + TotalSize += Location.Size; + } + size_t ChunkCount = ChunkLocations.size(); + uint64_t RequiredDiskSpace = TotalSize + ((PayloadAlignment - 1) * ChunkCount); + uint64_t MaxRequiredBlockCount = RoundUp(RequiredDiskSpace, MaxBlockSize) / MaxBlockSize; + if (MaxRequiredBlockCount > MaxBlockCount) + { + ZEN_ERROR("legacy store migration from '{}' FAILED, required block count {}, possible {}", + BlocksBasePath, + MaxRequiredBlockCount, + MaxBlockCount); + return false; + } + + constexpr const uint64_t DiskReserve = 1ul << 28; + + if (CleanSource) + { + if (Space.Free < (MaxBlockSize + DiskReserve)) + { + ZEN_INFO("legacy store migration from '{}' aborted, not enough disk space available {} ({})", + BlocksBasePath, + NiceBytes(MaxBlockSize + DiskReserve), + NiceBytes(Space.Free)); + return false; + } + } + else + { + if (Space.Free < (RequiredDiskSpace + DiskReserve)) + { + ZEN_INFO("legacy store migration from '{}' aborted, not enough disk space available {} ({})", + BlocksBasePath, + NiceBytes(RequiredDiskSpace + DiskReserve), + NiceBytes(Space.Free)); + return false; + } + } + + uint32_t WriteBlockIndex = 0; + while (std::filesystem::exists(BlockStore::GetBlockPath(BlocksBasePath, WriteBlockIndex))) + { + ++WriteBlockIndex; + } + + BasicFile BlockFile; + BlockFile.Open(SourceBlockFilePath, CleanSource ? BasicFile::Mode::kWrite : BasicFile::Mode::kRead); + + if (CleanSource && (MaxRequiredBlockCount < 2)) + { + std::vector<std::pair<size_t, BlockStoreLocation>> Chunks; + Chunks.reserve(ChunkCount); + for (size_t Index = 0; Index < ChunkCount; ++Index) + { + const BlockStoreLocation& ChunkLocation = ChunkLocations[Index]; + Chunks.push_back({Index, {.BlockIndex = WriteBlockIndex, .Offset = ChunkLocation.Offset, .Size = ChunkLocation.Size}}); + } + std::filesystem::path BlockPath = BlockStore::GetBlockPath(BlocksBasePath, WriteBlockIndex); + CreateDirectories(BlockPath.parent_path()); + BlockFile.Close(); + std::filesystem::rename(SourceBlockFilePath, BlockPath); + Callback(Chunks); + return true; + } + + std::vector<size_t> ChunkIndexes; + ChunkIndexes.reserve(ChunkCount); + for (size_t Index = 0; Index < ChunkCount; ++Index) + { + ChunkIndexes.push_back(Index); + } + + std::sort(begin(ChunkIndexes), end(ChunkIndexes), [&ChunkLocations](size_t Lhs, size_t Rhs) { + const BlockStoreLocation& LhsLocation = ChunkLocations[Lhs]; + const BlockStoreLocation& RhsLocation = ChunkLocations[Rhs]; + return LhsLocation.Offset < RhsLocation.Offset; + }); + + uint64_t BlockSize = 0; + uint64_t BlockOffset = 0; + std::vector<BlockStoreLocation> NewLocations; + struct BlockData + { + std::vector<std::pair<size_t, BlockStoreLocation>> Chunks; + uint64_t BlockOffset; + uint64_t BlockSize; + uint32_t BlockIndex; + }; + + std::vector<BlockData> BlockRanges; + std::vector<std::pair<size_t, BlockStoreLocation>> Chunks; + BlockRanges.reserve(MaxRequiredBlockCount); + for (const size_t& ChunkIndex : ChunkIndexes) + { + const BlockStoreLocation& LegacyChunkLocation = ChunkLocations[ChunkIndex]; + + uint64_t ChunkOffset = LegacyChunkLocation.Offset; + uint64_t ChunkSize = LegacyChunkLocation.Size; + uint64_t ChunkEnd = ChunkOffset + ChunkSize; + + if (BlockSize == 0) + { + BlockOffset = ChunkOffset; + } + if ((ChunkEnd - BlockOffset) > MaxBlockSize) + { + BlockData BlockRange{.BlockOffset = BlockOffset, .BlockSize = BlockSize, .BlockIndex = WriteBlockIndex}; + BlockRange.Chunks.swap(Chunks); + BlockRanges.push_back(BlockRange); + + WriteBlockIndex++; + while (std::filesystem::exists(BlockStore::GetBlockPath(BlocksBasePath, WriteBlockIndex))) + { + ++WriteBlockIndex; + } + BlockOffset = ChunkOffset; + BlockSize = 0; + } + BlockSize = RoundUp(BlockSize, PayloadAlignment); + BlockStoreLocation ChunkLocation = {.BlockIndex = WriteBlockIndex, .Offset = ChunkOffset - BlockOffset, .Size = ChunkSize}; + Chunks.push_back({ChunkIndex, ChunkLocation}); + BlockSize = ChunkEnd - BlockOffset; + } + if (BlockSize > 0) + { + BlockRanges.push_back( + {.Chunks = std::move(Chunks), .BlockOffset = BlockOffset, .BlockSize = BlockSize, .BlockIndex = WriteBlockIndex}); + } + + Stopwatch WriteBlockTimer; + + std::reverse(BlockRanges.begin(), BlockRanges.end()); + std::vector<std::uint8_t> Buffer(1 << 28); + for (size_t Idx = 0; Idx < BlockRanges.size(); ++Idx) + { + const BlockData& BlockRange = BlockRanges[Idx]; + if (Idx > 0) + { + uint64_t Remaining = BlockRange.BlockOffset + BlockRange.BlockSize; + uint64_t Completed = BlockOffset + BlockSize - Remaining; + uint64_t ETA = (WriteBlockTimer.GetElapsedTimeMs() * Remaining) / Completed; + + ZEN_INFO("migrating store '{}' {}/{} blocks, remaining {} ({}) ETA: {}", + BlocksBasePath, + Idx, + BlockRanges.size(), + NiceBytes(BlockRange.BlockOffset + BlockRange.BlockSize), + NiceBytes(BlockOffset + BlockSize), + NiceTimeSpanMs(ETA)); + } + + std::filesystem::path BlockPath = BlockStore::GetBlockPath(BlocksBasePath, BlockRange.BlockIndex); + BlockStoreFile ChunkBlock(BlockPath); + ChunkBlock.Create(BlockRange.BlockSize); + uint64_t Offset = 0; + while (Offset < BlockRange.BlockSize) + { + uint64_t Size = BlockRange.BlockSize - Offset; + if (Size > Buffer.size()) + { + Size = Buffer.size(); + } + BlockFile.Read(Buffer.data(), Size, BlockRange.BlockOffset + Offset); + ChunkBlock.Write(Buffer.data(), Size, Offset); + Offset += Size; + } + ChunkBlock.Truncate(Offset); + ChunkBlock.Flush(); + + Callback(BlockRange.Chunks); + + if (CleanSource) + { + BlockFile.SetFileSize(BlockRange.BlockOffset); + } + } + BlockFile.Close(); + + return true; +} + const char* BlockStore::GetBlockFileExtension() { |