diff options
| author | Stefan Boberg <[email protected]> | 2025-03-06 17:27:59 +0100 |
|---|---|---|
| committer | Stefan Boberg <[email protected]> | 2025-03-06 17:27:59 +0100 |
| commit | 66e5d1f4e288e0c32f854ebe3b63584b42b83554 (patch) | |
| tree | d67e9d358419b5baccd429d54988414e0d7cd7a6 /src/zenstore/blockstore.cpp | |
| parent | reduced memory churn using fixed_xxx containers (#236) (diff) | |
| download | zen-66e5d1f4e288e0c32f854ebe3b63584b42b83554.tar.xz zen-66e5d1f4e288e0c32f854ebe3b63584b42b83554.zip | |
switched std::vector -> eastl::vector
Diffstat (limited to 'src/zenstore/blockstore.cpp')
| -rw-r--r-- | src/zenstore/blockstore.cpp | 90 |
1 files changed, 45 insertions, 45 deletions
diff --git a/src/zenstore/blockstore.cpp b/src/zenstore/blockstore.cpp index e976c061d..7df9117db 100644 --- a/src/zenstore/blockstore.cpp +++ b/src/zenstore/blockstore.cpp @@ -293,8 +293,8 @@ BlockStore::Initialize(const std::filesystem::path& BlocksBasePath, uint64_t Max if (std::filesystem::is_directory(m_BlocksBasePath)) { - uint32_t NextBlockIndex = 0; - std::vector<std::filesystem::path> FoldersToScan; + uint32_t NextBlockIndex = 0; + eastl::vector<std::filesystem::path> FoldersToScan; FoldersToScan.push_back(m_BlocksBasePath); size_t FolderOffset = 0; while (FolderOffset < FoldersToScan.size()) @@ -399,7 +399,7 @@ BlockStore::GetBlocksToCompact(const BlockUsageMap& BlockUsage, uint32_t BlockUs const uint64_t SmallBlockLimit = m_MaxBlockSize / 2; - std::vector<uint32_t> SmallBlockIndexes; + eastl::vector<uint32_t> SmallBlockIndexes; for (const auto& It : m_ChunkBlocks) { @@ -598,9 +598,9 @@ BlockStore::WriteChunks(std::span<IoBuffer> Datas, uint32_t Alignment, const Wri LargestSize = Max(LargestSize, Size); } - const uint64_t MinSize = Max(LargestSize, 8u * 1024u * 1024u); - const uint64_t BufferSize = Min(TotalSize, MinSize); - std::vector<uint8_t> Buffer(BufferSize); + const uint64_t MinSize = Max(LargestSize, 8u * 1024u * 1024u); + const uint64_t BufferSize = Min(TotalSize, MinSize); + eastl::vector<uint8_t> Buffer(BufferSize); size_t Offset = 0; while (Offset < TotalCount) @@ -660,8 +660,8 @@ BlockStore::WriteChunks(std::span<IoBuffer> Datas, uint32_t Alignment, const Wri m_TotalSize.fetch_add(RangeSize, std::memory_order::relaxed); - uint32_t ChunkOffset = AlignedInsertOffset; - std::vector<BlockStoreLocation> Locations(Count); + uint32_t ChunkOffset = AlignedInsertOffset; + eastl::vector<BlockStoreLocation> Locations(Count); for (size_t Index = 0; Index < Count; Index++) { uint32_t ChunkSize = gsl::narrow<uint32_t>(Datas[Offset + Index].GetSize()); @@ -746,8 +746,8 @@ BlockStore::IterateBlock(std::span<const BlockStoreLocation> ChunkLocations, IterateSmallChunkWindowSize = Min((LargeSizeLimit + IterateSmallChunkMaxGapSize) * ChunkLocations.size(), IterateSmallChunkWindowSize); - uint32_t BlockIndex = ChunkLocations[InChunkIndexes[0]].BlockIndex; - std::vector<size_t> ChunkIndexes(InChunkIndexes.begin(), InChunkIndexes.end()); + uint32_t BlockIndex = ChunkLocations[InChunkIndexes[0]].BlockIndex; + eastl::vector<size_t> ChunkIndexes(InChunkIndexes.begin(), InChunkIndexes.end()); std::sort(ChunkIndexes.begin(), ChunkIndexes.end(), [&](size_t IndexA, size_t IndexB) -> bool { return ChunkLocations[IndexA].Offset < ChunkLocations[IndexB].Offset; }); @@ -894,7 +894,7 @@ BlockStore::IterateChunks(const std::span<const BlockStoreLocation>& ChunkLocati ZEN_LOG_SCOPE("iterating chunks from '{}'", m_BlocksBasePath); - std::vector<size_t> ChunkOrder(ChunkLocations.size()); + eastl::vector<size_t> ChunkOrder(ChunkLocations.size()); for (size_t ChunkIndex = 0; ChunkIndex < ChunkLocations.size(); ++ChunkIndex) { ChunkOrder[ChunkIndex] = ChunkIndex; @@ -998,11 +998,11 @@ BlockStore::CompactBlocks(const BlockStoreCompactState& CompactState, return Continue; }; - std::vector<uint32_t> RemovedBlocks; + eastl::vector<uint32_t> RemovedBlocks; - CompactState.IterateBlocks([&](uint32_t BlockIndex, - const std::vector<size_t>& KeepChunkIndexes, - const std::vector<BlockStoreLocation>& ChunkLocations) -> bool { + CompactState.IterateBlocks([&](uint32_t BlockIndex, + const eastl::vector<size_t>& KeepChunkIndexes, + const eastl::vector<BlockStoreLocation>& ChunkLocations) -> bool { ZEN_TRACE_CPU("BlockStore::CompactBlock"); Ref<BlockStoreFile> OldBlockFile; { @@ -1047,15 +1047,15 @@ BlockStore::CompactBlocks(const BlockStoreCompactState& CompactState, } else { - std::vector<size_t> SortedChunkIndexes(KeepChunkIndexes); + eastl::vector<size_t> SortedChunkIndexes(KeepChunkIndexes); std::sort(SortedChunkIndexes.begin(), SortedChunkIndexes.end(), [&ChunkLocations](size_t Lhs, size_t Rhs) { return ChunkLocations[Lhs].Offset < ChunkLocations[Rhs].Offset; }); BasicFileBuffer SourceFileBuffer(OldBlockFile->GetBasicFile(), Min(65536u, OldBlockSize)); - uint64_t WrittenBytesToBlock = 0; - uint64_t MovedFromBlock = 0; - std::vector<uint8_t> Chunk; + uint64_t WrittenBytesToBlock = 0; + uint64_t MovedFromBlock = 0; + eastl::vector<uint8_t> Chunk; for (const size_t& ChunkIndex : SortedChunkIndexes) { const BlockStoreLocation ChunkLocation = ChunkLocations[ChunkIndex]; @@ -1424,14 +1424,14 @@ namespace blockstore::impl { return AsString; }; - std::vector<std::filesystem::path> GetDirectoryContent(std::filesystem::path RootDir, bool Files, bool Directories) + eastl::vector<std::filesystem::path> GetDirectoryContent(std::filesystem::path RootDir, bool Files, bool Directories) { DirectoryContent DirectoryContent; GetDirectoryContent(RootDir, DirectoryContentFlags::Recursive | (Files ? DirectoryContentFlags::IncludeFiles : DirectoryContentFlags::None) | (Directories ? DirectoryContentFlags::IncludeDirs : DirectoryContentFlags::None), DirectoryContent); - std::vector<std::filesystem::path> Result; + eastl::vector<std::filesystem::path> Result; Result.insert(Result.end(), DirectoryContent.Directories.begin(), DirectoryContent.Directories.end()); Result.insert(Result.end(), DirectoryContent.Files.begin(), DirectoryContent.Files.end()); return Result; @@ -1449,8 +1449,8 @@ TEST_CASE("blockstore.multichunks") BlockStore Store; Store.Initialize(RootDirectory, 128, 1024); - std::vector<IoBuffer> MultiChunkData; - std::string FirstChunkData = "0123456789012345678901234567890123456789012345678901234567890123"; + eastl::vector<IoBuffer> MultiChunkData; + std::string FirstChunkData = "0123456789012345678901234567890123456789012345678901234567890123"; MultiChunkData.push_back(IoBuffer(IoBuffer::Wrap, FirstChunkData.data(), FirstChunkData.size())); std::string SecondChunkData = "12345678901234567890123456789012345678901234567890123456"; @@ -1588,16 +1588,16 @@ TEST_CASE("blockstore.iterate.chunks") WorkerThreadPool WorkerPool(4); - std::vector<BlockStoreLocation> Locations{FirstChunkLocation, - SecondChunkLocation, - VeryLargeChunkLocation, - BadLocationZeroSize, - BadLocationOutOfRange, - BadBlockIndex}; - Latch WorkLatch(1); + eastl::vector<BlockStoreLocation> Locations{FirstChunkLocation, + SecondChunkLocation, + VeryLargeChunkLocation, + BadLocationZeroSize, + BadLocationOutOfRange, + BadBlockIndex}; + Latch WorkLatch(1); Store.IterateChunks(Locations, [&](uint32_t, std::span<const size_t> ChunkIndexes) -> bool { WorkLatch.AddCount(1); - WorkerPool.ScheduleWork([&, ChunkIndexes = std::vector<size_t>(ChunkIndexes.begin(), ChunkIndexes.end())]() { + WorkerPool.ScheduleWork([&, ChunkIndexes = eastl::vector<size_t>(ChunkIndexes.begin(), ChunkIndexes.end())]() { auto _ = MakeGuard([&WorkLatch]() { WorkLatch.CountDown(); }); bool Continue = Store.IterateBlock( Locations, @@ -1687,10 +1687,10 @@ TEST_CASE("blockstore.thread.read.write") BlockStore Store; Store.Initialize(RootDirectory / "store", 1088, 1024); - constexpr size_t ChunkCount = 1000; - constexpr size_t Alignment = 8; - std::vector<IoBuffer> Chunks; - std::vector<IoHash> ChunkHashes; + constexpr size_t ChunkCount = 1000; + constexpr size_t Alignment = 8; + eastl::vector<IoBuffer> Chunks; + eastl::vector<IoHash> ChunkHashes; Chunks.reserve(ChunkCount); ChunkHashes.reserve(ChunkCount); for (size_t ChunkIndex = 0; ChunkIndex < ChunkCount; ++ChunkIndex) @@ -1700,7 +1700,7 @@ TEST_CASE("blockstore.thread.read.write") ChunkHashes.push_back(IoHash::HashBuffer(Chunk.Data(), Chunk.Size())); } - std::vector<BlockStoreLocation> ChunkLocations; + eastl::vector<BlockStoreLocation> ChunkLocations; ChunkLocations.resize(ChunkCount); WorkerThreadPool WorkerPool(8); @@ -1734,7 +1734,7 @@ TEST_CASE("blockstore.thread.read.write") Sleep(1); } - std::vector<BlockStoreLocation> SecondChunkLocations; + eastl::vector<BlockStoreLocation> SecondChunkLocations; SecondChunkLocations.resize(ChunkCount); WorkCompleted = 0; for (size_t ChunkIndex = 0; ChunkIndex < ChunkCount; ++ChunkIndex) @@ -1770,10 +1770,10 @@ TEST_CASE("blockstore.compact.blocks") BlockStore Store; Store.Initialize(RootDirectory / "store", 1088, 1024); - constexpr size_t ChunkCount = 200; - constexpr size_t Alignment = 8; - std::vector<IoBuffer> Chunks; - std::vector<IoHash> ChunkHashes; + constexpr size_t ChunkCount = 200; + constexpr size_t Alignment = 8; + eastl::vector<IoBuffer> Chunks; + eastl::vector<IoHash> ChunkHashes; Chunks.reserve(ChunkCount); ChunkHashes.reserve(ChunkCount); for (size_t ChunkIndex = 0; ChunkIndex < ChunkCount; ++ChunkIndex) @@ -1783,7 +1783,7 @@ TEST_CASE("blockstore.compact.blocks") ChunkHashes.push_back(IoHash::HashBuffer(Chunk.Data(), Chunk.Size())); } - std::vector<BlockStoreLocation> ChunkLocations; + eastl::vector<BlockStoreLocation> ChunkLocations; ChunkLocations.resize(ChunkCount); for (size_t ChunkIndex = 0; ChunkIndex < ChunkCount; ++ChunkIndex) @@ -1924,8 +1924,8 @@ TEST_CASE("blockstore.compact.blocks") CHECK(!Store.IsWriting(0)); State.IncludeBlock(0); - uint64_t SkipChunkCount = 2; - std::vector<BlockStoreLocation> DroppedLocations(ChunkLocations.begin(), ChunkLocations.begin() + 2); + uint64_t SkipChunkCount = 2; + eastl::vector<BlockStoreLocation> DroppedLocations(ChunkLocations.begin(), ChunkLocations.begin() + 2); for (auto It = ChunkLocations.begin() + 2; It != ChunkLocations.end(); It++) { const BlockStoreLocation& Location = *It; @@ -1996,7 +1996,7 @@ TEST_CASE("blockstore.compact.blocks") } SkipFlag = false; - std::vector<BlockStoreLocation> DroppedLocations; + eastl::vector<BlockStoreLocation> DroppedLocations; for (const BlockStoreLocation& Location : ChunkLocations) { if (Store.IsWriting(Location.BlockIndex)) |