// Copyright Epic Games, Inc. All Rights Reserved. #include #include #include #include #include #include #include #include #include ZEN_THIRD_PARTY_INCLUDES_START #include #include ZEN_THIRD_PARTY_INCLUDES_END #if ZEN_WITH_TESTS # include # include #endif // ZEN_WITH_TESTS namespace zen { using namespace std::literals; ChunkBlockDescription ParseChunkBlockDescription(const CbObjectView& BlockObject) { ChunkBlockDescription Result; Result.BlockHash = BlockObject["rawHash"sv].AsHash(); if (Result.BlockHash != IoHash::Zero) { Result.HeaderSize = BlockObject["headerSize"sv].AsUInt64(); CbArrayView ChunksArray = BlockObject["rawHashes"sv].AsArrayView(); Result.ChunkRawHashes.reserve(ChunksArray.Num()); for (CbFieldView ChunkView : ChunksArray) { Result.ChunkRawHashes.push_back(ChunkView.AsHash()); } CbArrayView ChunkRawLengthsArray = BlockObject["chunkRawLengths"sv].AsArrayView(); Result.ChunkRawLengths.reserve(ChunkRawLengthsArray.Num()); for (CbFieldView ChunkView : ChunkRawLengthsArray) { Result.ChunkRawLengths.push_back(ChunkView.AsUInt32()); } CbArrayView ChunkCompressedLengthsArray = BlockObject["chunkCompressedLengths"sv].AsArrayView(); Result.ChunkCompressedLengths.reserve(ChunkCompressedLengthsArray.Num()); for (CbFieldView ChunkView : ChunkCompressedLengthsArray) { Result.ChunkCompressedLengths.push_back(ChunkView.AsUInt32()); } } return Result; } std::vector ParseChunkBlockDescriptionList(const CbObjectView& BlocksObject) { if (!BlocksObject) { return {}; } std::vector Result; CbArrayView Blocks = BlocksObject["blocks"sv].AsArrayView(); Result.reserve(Blocks.Num()); for (CbFieldView BlockView : Blocks) { CbObjectView BlockObject = BlockView.AsObjectView(); Result.emplace_back(ParseChunkBlockDescription(BlockObject)); } return Result; } CbObject BuildChunkBlockDescription(const ChunkBlockDescription& Block, CbObjectView MetaData) { ZEN_ASSERT(Block.BlockHash != IoHash::Zero); ZEN_ASSERT(Block.HeaderSize > 0); ZEN_ASSERT(Block.ChunkRawLengths.size() == Block.ChunkRawHashes.size()); ZEN_ASSERT(Block.ChunkCompressedLengths.size() == Block.ChunkRawHashes.size()); CbObjectWriter Writer; Writer.AddHash("rawHash"sv, Block.BlockHash); Writer.AddInteger("headerSize"sv, Block.HeaderSize); Writer.BeginArray("rawHashes"sv); { for (const IoHash& ChunkHash : Block.ChunkRawHashes) { Writer.AddHash(ChunkHash); } } Writer.EndArray(); Writer.BeginArray("chunkRawLengths"); { for (uint32_t ChunkSize : Block.ChunkRawLengths) { Writer.AddInteger(ChunkSize); } } Writer.EndArray(); Writer.BeginArray("chunkCompressedLengths"); { for (uint32_t ChunkSize : Block.ChunkCompressedLengths) { Writer.AddInteger(ChunkSize); } } Writer.EndArray(); Writer.AddObject("metadata", MetaData); return Writer.Save(); } ChunkBlockDescription GetChunkBlockDescription(const SharedBuffer& BlockPayload, const IoHash& RawHash) { ChunkBlockDescription BlockDescription = {{.BlockHash = IoHash::HashBuffer(BlockPayload)}}; if (BlockDescription.BlockHash != RawHash) { throw std::runtime_error(fmt::format("Block {} content hash {} does not match block hash", RawHash, BlockDescription.BlockHash)); } if (IterateChunkBlock( BlockPayload, [&BlockDescription, RawHash](CompressedBuffer&& Chunk, const IoHash& AttachmentHash) { if (CompositeBuffer Decompressed = Chunk.DecompressToComposite(); Decompressed) { IoHash ChunkHash = IoHash::HashBuffer(Decompressed.Flatten()); if (ChunkHash != AttachmentHash) { throw std::runtime_error( fmt::format("Chunk {} in block {} content hash {} does not match chunk", AttachmentHash, RawHash, ChunkHash)); } BlockDescription.ChunkRawHashes.push_back(AttachmentHash); BlockDescription.ChunkRawLengths.push_back(gsl::narrow(Decompressed.GetSize())); BlockDescription.ChunkCompressedLengths.push_back(gsl::narrow(Chunk.GetCompressedSize())); } else { throw std::runtime_error(fmt::format("Chunk {} in block {} is not a compressed buffer", AttachmentHash, RawHash)); } }, BlockDescription.HeaderSize)) { return BlockDescription; } else { throw std::runtime_error(fmt::format("Block {} is malformed", RawHash)); } } CompressedBuffer GenerateChunkBlock(std::vector>&& FetchChunks, ChunkBlockDescription& OutBlock) { const size_t ChunkCount = FetchChunks.size(); std::vector ChunkSegments; ChunkSegments.resize(1); ChunkSegments.reserve(1 + ChunkCount); OutBlock.ChunkRawHashes.reserve(ChunkCount); OutBlock.ChunkRawLengths.reserve(ChunkCount); OutBlock.ChunkCompressedLengths.reserve(ChunkCount); { IoBuffer TempBuffer(ChunkCount * 9); MutableMemoryView View = TempBuffer.GetMutableView(); uint8_t* BufferStartPtr = reinterpret_cast(View.GetData()); uint8_t* BufferEndPtr = BufferStartPtr; BufferEndPtr += WriteVarUInt(gsl::narrow(ChunkCount), BufferEndPtr); for (const auto& It : FetchChunks) { std::pair Chunk = It.second(It.first); uint64_t ChunkSize = 0; std::span Segments = Chunk.second.GetCompressed().GetSegments(); for (const SharedBuffer& Segment : Segments) { ZEN_ASSERT(Segment.IsOwned()); ChunkSize += Segment.GetSize(); ChunkSegments.push_back(Segment); } BufferEndPtr += WriteVarUInt(ChunkSize, BufferEndPtr); OutBlock.ChunkRawHashes.push_back(It.first); OutBlock.ChunkRawLengths.push_back(gsl::narrow(Chunk.first)); OutBlock.ChunkCompressedLengths.push_back(gsl::narrow(ChunkSize)); } ZEN_ASSERT(BufferEndPtr <= View.GetDataEnd()); ptrdiff_t TempBufferLength = std::distance(BufferStartPtr, BufferEndPtr); ChunkSegments[0] = SharedBuffer(IoBuffer(TempBuffer, 0, gsl::narrow(TempBufferLength))); OutBlock.HeaderSize = TempBufferLength; } CompressedBuffer CompressedBlock = CompressedBuffer::Compress(CompositeBuffer(std::move(ChunkSegments)), OodleCompressor::Mermaid, OodleCompressionLevel::None); OutBlock.BlockHash = CompressedBlock.DecodeRawHash(); return CompressedBlock; } std::vector ReadChunkBlockHeader(const MemoryView BlockView, uint64_t& OutHeaderSize) { const uint8_t* ReadPtr = reinterpret_cast(BlockView.GetData()); uint32_t NumberSize; uint64_t ChunkCount = ReadVarUInt(ReadPtr, NumberSize); ReadPtr += NumberSize; std::vector ChunkSizes; ChunkSizes.reserve(ChunkCount); while (ChunkCount--) { if (ReadPtr >= BlockView.GetDataEnd()) { throw std::runtime_error("Invalid block header, block data ended unexpectedly"); } uint64_t ChunkSize = ReadVarUInt(ReadPtr, NumberSize); if (ChunkSize > std::numeric_limits::max()) { throw std::runtime_error("Invalid block header, header data is corrupt"); } if (ChunkSize < 1) { throw std::runtime_error("Invalid block header, header data is corrupt"); } ChunkSizes.push_back(gsl::narrow(ChunkSize)); ReadPtr += NumberSize; } uint64_t Offset = std::distance((const uint8_t*)BlockView.GetData(), ReadPtr); OutHeaderSize = Offset; return ChunkSizes; } bool IterateChunkBlock(const SharedBuffer& BlockPayload, std::function Visitor, uint64_t& OutHeaderSize) { ZEN_ASSERT(BlockPayload); if (BlockPayload.GetSize() < 1) { return false; } MemoryView BlockView = BlockPayload.GetView(); std::vector ChunkSizes = ReadChunkBlockHeader(BlockView, OutHeaderSize); uint64_t Offset = OutHeaderSize; OutHeaderSize = Offset; for (uint64_t ChunkSize : ChunkSizes) { IoBuffer Chunk(BlockPayload.AsIoBuffer(), Offset, ChunkSize); IoHash AttachmentRawHash; uint64_t AttachmentRawSize; CompressedBuffer CompressedChunk = CompressedBuffer::FromCompressed(SharedBuffer(Chunk), AttachmentRawHash, AttachmentRawSize); ZEN_ASSERT_SLOW(IoHash::HashBuffer(CompressedChunk.DecompressToComposite()) == AttachmentRawHash); if (!CompressedChunk) { ZEN_ERROR("Invalid chunk in block"); return false; } Visitor(std::move(CompressedChunk), AttachmentRawHash); Offset += ChunkSize; ZEN_ASSERT(Offset <= BlockView.GetSize()); } return true; }; std::vector FindReuseBlocks(OperationLogOutput& Output, const uint8_t BlockReuseMinPercentLimit, const bool IsVerbose, ReuseBlocksStatistics& Stats, const std::vector& KnownBlocks, std::span ChunkHashes, std::span ChunkIndexes, std::vector& OutUnusedChunkIndexes) { ZEN_TRACE_CPU("FindReuseBlocks"); // Find all blocks with a usage level higher than MinPercentLimit // Pick out the blocks with usage higher or equal to MinPercentLimit // Sort them with highest size usage - most usage first // Make a list of all chunks and mark them as not found // For each block, recalculate the block has usage percent based on the chunks marked as not found // If the block still reaches MinPercentLimit, keep it and remove the matching chunks from the not found list // Repeat for following all remaining block that initially matched MinPercentLimit std::vector FilteredReuseBlockIndexes; uint32_t ChunkCount = gsl::narrow(ChunkHashes.size()); std::vector ChunkFound(ChunkCount, false); if (ChunkCount > 0) { size_t AcceptedChunkCount = 0; if (!KnownBlocks.empty()) { Stopwatch ReuseTimer; tsl::robin_map ChunkHashToChunkIndex; ChunkHashToChunkIndex.reserve(ChunkIndexes.size()); for (uint32_t ChunkIndex : ChunkIndexes) { ChunkHashToChunkIndex.insert_or_assign(ChunkHashes[ChunkIndex], ChunkIndex); } std::vector BlockSizes(KnownBlocks.size(), 0); std::vector BlockUseSize(KnownBlocks.size(), 0); std::vector ReuseBlockIndexes; for (size_t KnownBlockIndex = 0; KnownBlockIndex < KnownBlocks.size(); KnownBlockIndex++) { const ChunkBlockDescription& KnownBlock = KnownBlocks[KnownBlockIndex]; if (KnownBlock.BlockHash != IoHash::Zero && KnownBlock.ChunkRawHashes.size() == KnownBlock.ChunkCompressedLengths.size()) { size_t BlockAttachmentCount = KnownBlock.ChunkRawHashes.size(); if (BlockAttachmentCount == 0) { continue; } size_t ReuseSize = 0; size_t BlockSize = 0; size_t FoundAttachmentCount = 0; size_t BlockChunkCount = KnownBlock.ChunkRawHashes.size(); for (size_t BlockChunkIndex = 0; BlockChunkIndex < BlockChunkCount; BlockChunkIndex++) { const IoHash& BlockChunkHash = KnownBlock.ChunkRawHashes[BlockChunkIndex]; const uint32_t BlockChunkSize = KnownBlock.ChunkCompressedLengths[BlockChunkIndex]; BlockSize += BlockChunkSize; if (ChunkHashToChunkIndex.contains(BlockChunkHash)) { ReuseSize += BlockChunkSize; FoundAttachmentCount++; } } size_t ReusePercent = (ReuseSize * 100) / BlockSize; if (ReusePercent >= BlockReuseMinPercentLimit) { if (IsVerbose) { ZEN_OPERATION_LOG_INFO(Output, "Reusing block {}. {} attachments found, usage level: {}%", KnownBlock.BlockHash, FoundAttachmentCount, ReusePercent); } ReuseBlockIndexes.push_back(KnownBlockIndex); BlockSizes[KnownBlockIndex] = BlockSize; BlockUseSize[KnownBlockIndex] = ReuseSize; } else if (FoundAttachmentCount > 0) { // if (IsVerbose) //{ // ZEN_OPERATION_LOG_INFO(Output, "Skipping block {}. {} attachments found, usage level: {}%", // KnownBlock.BlockHash, // FoundAttachmentCount, ReusePercent); //} Stats.RejectedBlockCount++; Stats.RejectedChunkCount += FoundAttachmentCount; Stats.RejectedByteCount += ReuseSize; } } } if (!ReuseBlockIndexes.empty()) { std::sort(ReuseBlockIndexes.begin(), ReuseBlockIndexes.end(), [&](size_t Lhs, size_t Rhs) { return BlockUseSize[Lhs] > BlockUseSize[Rhs]; }); for (size_t KnownBlockIndex : ReuseBlockIndexes) { std::vector FoundChunkIndexes; size_t BlockSize = 0; size_t AdjustedReuseSize = 0; size_t AdjustedRawReuseSize = 0; const ChunkBlockDescription& KnownBlock = KnownBlocks[KnownBlockIndex]; for (size_t BlockChunkIndex = 0; BlockChunkIndex < KnownBlock.ChunkRawHashes.size(); BlockChunkIndex++) { const IoHash& BlockChunkHash = KnownBlock.ChunkRawHashes[BlockChunkIndex]; const uint32_t BlockChunkSize = KnownBlock.ChunkCompressedLengths[BlockChunkIndex]; BlockSize += BlockChunkSize; if (auto It = ChunkHashToChunkIndex.find(BlockChunkHash); It != ChunkHashToChunkIndex.end()) { const uint32_t ChunkIndex = It->second; if (!ChunkFound[ChunkIndex]) { FoundChunkIndexes.push_back(ChunkIndex); AdjustedReuseSize += KnownBlock.ChunkCompressedLengths[BlockChunkIndex]; AdjustedRawReuseSize += KnownBlock.ChunkRawLengths[BlockChunkIndex]; } } } size_t ReusePercent = (AdjustedReuseSize * 100) / BlockSize; if (ReusePercent >= BlockReuseMinPercentLimit) { if (IsVerbose) { ZEN_OPERATION_LOG_INFO(Output, "Reusing block {}. {} attachments found, usage level: {}%", KnownBlock.BlockHash, FoundChunkIndexes.size(), ReusePercent); } FilteredReuseBlockIndexes.push_back(KnownBlockIndex); for (uint32_t ChunkIndex : FoundChunkIndexes) { ChunkFound[ChunkIndex] = true; } AcceptedChunkCount += FoundChunkIndexes.size(); Stats.AcceptedChunkCount += FoundChunkIndexes.size(); Stats.AcceptedByteCount += AdjustedReuseSize; Stats.AcceptedRawByteCount += AdjustedRawReuseSize; Stats.AcceptedReduntantChunkCount += KnownBlock.ChunkRawHashes.size() - FoundChunkIndexes.size(); Stats.AcceptedReduntantByteCount += BlockSize - AdjustedReuseSize; } else { // if (IsVerbose) //{ // ZEN_OPERATION_LOG_INFO(Output, "Skipping block {}. filtered usage level: {}%", KnownBlock.BlockHash, // ReusePercent); //} Stats.RejectedBlockCount++; Stats.RejectedChunkCount += FoundChunkIndexes.size(); Stats.RejectedByteCount += AdjustedReuseSize; } } } } OutUnusedChunkIndexes.reserve(ChunkIndexes.size() - AcceptedChunkCount); for (uint32_t ChunkIndex : ChunkIndexes) { if (!ChunkFound[ChunkIndex]) { OutUnusedChunkIndexes.push_back(ChunkIndex); } } } return FilteredReuseBlockIndexes; } ChunkBlockAnalyser::ChunkBlockAnalyser(OperationLogOutput& LogOutput, std::span BlockDescriptions, const Options& Options) : m_LogOutput(LogOutput) , m_BlockDescriptions(BlockDescriptions) , m_Options(Options) { } std::vector ChunkBlockAnalyser::GetNeeded(const tsl::robin_map& ChunkHashToChunkIndex, std::function&& NeedsBlockChunk) { ZEN_TRACE_CPU("ChunkBlockAnalyser::GetNeeded"); std::vector Result; std::vector ChunkIsNeeded(ChunkHashToChunkIndex.size()); for (uint32_t ChunkIndex = 0; ChunkIndex < ChunkHashToChunkIndex.size(); ChunkIndex++) { ChunkIsNeeded[ChunkIndex] = NeedsBlockChunk(ChunkIndex); } std::vector BlockSlack(m_BlockDescriptions.size(), 0u); for (uint32_t BlockIndex = 0; BlockIndex < m_BlockDescriptions.size(); BlockIndex++) { const ChunkBlockDescription& BlockDescription = m_BlockDescriptions[BlockIndex]; uint64_t BlockUsedSize = 0; uint64_t BlockSize = 0; for (uint32_t ChunkBlockIndex = 0; ChunkBlockIndex < BlockDescription.ChunkRawHashes.size(); ChunkBlockIndex++) { const IoHash& ChunkHash = BlockDescription.ChunkRawHashes[ChunkBlockIndex]; if (auto It = ChunkHashToChunkIndex.find(ChunkHash); It != ChunkHashToChunkIndex.end()) { const uint32_t RemoteChunkIndex = It->second; if (ChunkIsNeeded[RemoteChunkIndex]) { BlockUsedSize += BlockDescription.ChunkCompressedLengths[ChunkBlockIndex]; } } BlockSize += BlockDescription.ChunkCompressedLengths[ChunkBlockIndex]; } BlockSlack[BlockIndex] = BlockSize - BlockUsedSize; } std::vector BlockOrder(m_BlockDescriptions.size()); std::iota(BlockOrder.begin(), BlockOrder.end(), 0); std::sort(BlockOrder.begin(), BlockOrder.end(), [&BlockSlack](uint32_t Lhs, uint32_t Rhs) { return BlockSlack[Lhs] < BlockSlack[Rhs]; }); std::vector ChunkIsPickedUp(ChunkHashToChunkIndex.size(), false); for (uint32_t BlockIndex : BlockOrder) { const ChunkBlockDescription& BlockDescription = m_BlockDescriptions[BlockIndex]; std::vector BlockChunkIndexNeeded; for (uint32_t ChunkBlockIndex = 0; ChunkBlockIndex < BlockDescription.ChunkRawHashes.size(); ChunkBlockIndex++) { const IoHash& ChunkHash = BlockDescription.ChunkRawHashes[ChunkBlockIndex]; if (auto It = ChunkHashToChunkIndex.find(ChunkHash); It != ChunkHashToChunkIndex.end()) { const uint32_t RemoteChunkIndex = It->second; if (ChunkIsNeeded[RemoteChunkIndex]) { if (!ChunkIsPickedUp[RemoteChunkIndex]) { ChunkIsPickedUp[RemoteChunkIndex] = true; BlockChunkIndexNeeded.push_back(ChunkBlockIndex); } } } else { ZEN_DEBUG("Chunk {} not found in block {}", ChunkHash, BlockDescription.BlockHash); } } if (!BlockChunkIndexNeeded.empty()) { Result.push_back(NeededBlock{.BlockIndex = BlockIndex, .ChunkIndexes = std::move(BlockChunkIndexNeeded)}); } } return Result; } ChunkBlockAnalyser::BlockResult ChunkBlockAnalyser::CalculatePartialBlockDownloads(std::span NeededBlocks, std::span BlockPartialDownloadModes) { ZEN_TRACE_CPU("ChunkBlockAnalyser::CalculatePartialBlockDownloads"); Stopwatch PartialAnalisysTimer; ChunkBlockAnalyser::BlockResult Result; uint64_t IdealDownloadTotalSize = 0; uint64_t AllBlocksTotalBlocksSize = 0; for (const NeededBlock& NeededBlock : NeededBlocks) { const ChunkBlockDescription& BlockDescription = m_BlockDescriptions[NeededBlock.BlockIndex]; std::span BlockChunkIndexNeeded(NeededBlock.ChunkIndexes); if (!NeededBlock.ChunkIndexes.empty()) { bool WantsToDoPartialBlockDownload = NeededBlock.ChunkIndexes.size() < BlockDescription.ChunkRawHashes.size(); bool CanDoPartialBlockDownload = (BlockDescription.HeaderSize > 0) && (BlockDescription.ChunkCompressedLengths.size() == BlockDescription.ChunkRawHashes.size()); EPartialBlockDownloadMode PartialBlockDownloadMode = BlockPartialDownloadModes[NeededBlock.BlockIndex]; const uint32_t ChunkStartOffsetInBlock = gsl::narrow(CompressedBuffer::GetHeaderSizeForNoneEncoder() + BlockDescription.HeaderSize); const uint64_t TotalBlockSize = std::accumulate(BlockDescription.ChunkCompressedLengths.begin(), BlockDescription.ChunkCompressedLengths.end(), std::uint64_t(ChunkStartOffsetInBlock)); AllBlocksTotalBlocksSize += TotalBlockSize; if ((PartialBlockDownloadMode != EPartialBlockDownloadMode::Off) && WantsToDoPartialBlockDownload && CanDoPartialBlockDownload) { ZEN_TRACE_CPU("PartialBlockAnalysis"); uint64_t TotalWantedChunksSize = 0; std::optional> MaybeBlockRanges = CalculateBlockRanges(NeededBlock.BlockIndex, BlockDescription, NeededBlock.ChunkIndexes, PartialBlockDownloadMode, ChunkStartOffsetInBlock, TotalBlockSize, TotalWantedChunksSize); ZEN_ASSERT(TotalWantedChunksSize <= TotalBlockSize); IdealDownloadTotalSize += TotalWantedChunksSize; if (MaybeBlockRanges.has_value()) { std::vector BlockRanges = MaybeBlockRanges.value(); ZEN_ASSERT(!BlockRanges.empty()); uint64_t RequestedSize = std::accumulate(BlockRanges.begin(), BlockRanges.end(), uint64_t(0), [](uint64_t Current, const BlockRangeDescriptor& Range) { return Current + Range.RangeLength; }); if (PartialBlockDownloadMode != EPartialBlockDownloadMode::Exact && BlockRanges.size() > 1) { // TODO: Once we have support in our http client to request multiple ranges in one request this // logic would need to change as the per-request overhead would go away const double LatencySec = PartialBlockDownloadMode == EPartialBlockDownloadMode::MultiRangeHighSpeed ? m_Options.HostHighSpeedLatencySec : m_Options.HostLatencySec; if (LatencySec > 0) { const uint64_t BytesPerSec = PartialBlockDownloadMode == EPartialBlockDownloadMode::MultiRangeHighSpeed ? m_Options.HostHighSpeedBytesPerSec : m_Options.HostSpeedBytesPerSec; const double ExtraRequestTimeSec = (BlockRanges.size() - 1) * LatencySec; const uint64_t ExtraRequestTimeBytes = uint64_t(ExtraRequestTimeSec * BytesPerSec); const uint64_t FullRangeSize = BlockRanges.back().RangeStart + BlockRanges.back().RangeLength - BlockRanges.front().RangeStart; if (ExtraRequestTimeBytes + RequestedSize >= FullRangeSize) { BlockRanges = std::vector{MergeBlockRanges(BlockRanges)}; if (m_Options.IsVerbose) { ZEN_OPERATION_LOG_INFO(m_LogOutput, "Merging {} chunks ({}) from block {} ({}) to single request (extra bytes {})", NeededBlock.ChunkIndexes.size(), NiceBytes(RequestedSize), BlockDescription.BlockHash, NiceBytes(TotalBlockSize), NiceBytes(BlockRanges.front().RangeLength - RequestedSize)); } RequestedSize = BlockRanges.front().RangeLength; } } } if ((PartialBlockDownloadMode != EPartialBlockDownloadMode::Exact) && ((TotalBlockSize - RequestedSize) < (512u * 1024u))) { if (m_Options.IsVerbose) { ZEN_OPERATION_LOG_INFO(m_LogOutput, "Requesting {} chunks ({}) from block {} ({}) using full block request due to small " "total slack (extra bytes {})", NeededBlock.ChunkIndexes.size(), NiceBytes(RequestedSize), BlockDescription.BlockHash, NiceBytes(TotalBlockSize), NiceBytes(TotalBlockSize - TotalWantedChunksSize)); } Result.FullBlockIndexes.push_back(NeededBlock.BlockIndex); } else { Result.BlockRanges.insert(Result.BlockRanges.end(), BlockRanges.begin(), BlockRanges.end()); if (m_Options.IsVerbose) { ZEN_OPERATION_LOG_INFO(m_LogOutput, "Requesting {} chunks ({}) from block {} ({}) using {} requests (extra bytes {})", NeededBlock.ChunkIndexes.size(), NiceBytes(RequestedSize), BlockDescription.BlockHash, NiceBytes(TotalBlockSize), BlockRanges.size(), NiceBytes(RequestedSize - TotalWantedChunksSize)); } } } else { Result.FullBlockIndexes.push_back(NeededBlock.BlockIndex); } } else { Result.FullBlockIndexes.push_back(NeededBlock.BlockIndex); IdealDownloadTotalSize += TotalBlockSize; } } } if (!Result.BlockRanges.empty() && !m_Options.IsQuiet) { tsl::robin_set PartialBlockIndexes; uint64_t PartialBlocksTotalSize = std::accumulate(Result.BlockRanges.begin(), Result.BlockRanges.end(), uint64_t(0u), [&](uint64_t Current, const BlockRangeDescriptor& Range) { PartialBlockIndexes.insert(Range.BlockIndex); return Current + Range.RangeLength; }); uint64_t FullBlocksTotalSize = std::accumulate(Result.FullBlockIndexes.begin(), Result.FullBlockIndexes.end(), uint64_t(0u), [&](uint64_t Current, uint32_t BlockIndex) { const ChunkBlockDescription& BlockDescription = m_BlockDescriptions[BlockIndex]; uint32_t CurrentOffset = gsl::narrow(CompressedBuffer::GetHeaderSizeForNoneEncoder() + BlockDescription.HeaderSize); return Current + std::accumulate(BlockDescription.ChunkCompressedLengths.begin(), BlockDescription.ChunkCompressedLengths.end(), std::uint64_t(CurrentOffset)); }); uint64_t PartialBlockRequestCount = Result.BlockRanges.size(); uint64_t PartialBlockCount = PartialBlockIndexes.size(); uint64_t TotalExtraPartialBlocksRequestCount = PartialBlockRequestCount - PartialBlockCount; uint64_t ActualPartialDownloadTotalSize = FullBlocksTotalSize + PartialBlocksTotalSize; uint64_t IdealSkippedSize = AllBlocksTotalBlocksSize - IdealDownloadTotalSize; uint64_t ActualSkippedSize = AllBlocksTotalBlocksSize - ActualPartialDownloadTotalSize; double PercentOfIdealPartialSkippedSize = (ActualSkippedSize * 100.0) / IdealSkippedSize; ZEN_OPERATION_LOG_INFO(m_LogOutput, "Analysis of partial block requests saves download of {} out of {}, {:.1f}% of possible {} using {} extra " "requests. Completed in {}", NiceBytes(ActualSkippedSize), NiceBytes(AllBlocksTotalBlocksSize), PercentOfIdealPartialSkippedSize, NiceBytes(IdealSkippedSize), TotalExtraPartialBlocksRequestCount, NiceTimeSpanMs(PartialAnalisysTimer.GetElapsedTimeMs())); } return Result; } ChunkBlockAnalyser::BlockRangeDescriptor ChunkBlockAnalyser::MergeBlockRanges(std::span Ranges) { ZEN_ASSERT(Ranges.size() > 1); const BlockRangeDescriptor& First = Ranges.front(); const BlockRangeDescriptor& Last = Ranges.back(); return BlockRangeDescriptor{.BlockIndex = First.BlockIndex, .RangeStart = First.RangeStart, .RangeLength = Last.RangeStart + Last.RangeLength - First.RangeStart, .ChunkBlockIndexStart = First.ChunkBlockIndexStart, .ChunkBlockIndexCount = Last.ChunkBlockIndexStart + Last.ChunkBlockIndexCount - First.ChunkBlockIndexStart}; } std::optional> ChunkBlockAnalyser::MakeOptionalBlockRangeVector(uint64_t TotalBlockSize, const BlockRangeDescriptor& Range) { if (Range.RangeLength == TotalBlockSize) { return {}; } else { return std::vector{Range}; } }; const ChunkBlockAnalyser::BlockRangeLimit* ChunkBlockAnalyser::GetBlockRangeLimitForRange(std::span Limits, uint64_t TotalBlockSize, std::span Ranges) { if (Ranges.size() > 1) { const std::uint64_t WantedSize = std::accumulate(Ranges.begin(), Ranges.end(), uint64_t(0), [](uint64_t Current, const BlockRangeDescriptor& Range) { return Current + Range.RangeLength; }); const double RangeRequestedPercent = (WantedSize * 100.0) / TotalBlockSize; for (const BlockRangeLimit& Limit : Limits) { if (RangeRequestedPercent >= Limit.SizePercent && Ranges.size() > Limit.MaxRangeCount) { return &Limit; } } } return nullptr; }; std::vector ChunkBlockAnalyser::CollapseBlockRanges(const uint64_t AlwaysAcceptableGap, std::span BlockRanges) { ZEN_ASSERT(BlockRanges.size() > 1); std::vector CollapsedBlockRanges; auto BlockRangesIt = BlockRanges.begin(); CollapsedBlockRanges.push_back(*BlockRangesIt++); for (; BlockRangesIt != BlockRanges.end(); BlockRangesIt++) { BlockRangeDescriptor& LastRange = CollapsedBlockRanges.back(); const uint64_t BothRangeSize = BlockRangesIt->RangeLength + LastRange.RangeLength; const uint64_t Gap = BlockRangesIt->RangeStart - (LastRange.RangeStart + LastRange.RangeLength); if (Gap <= Max(BothRangeSize / 16, AlwaysAcceptableGap)) { LastRange.ChunkBlockIndexCount = (BlockRangesIt->ChunkBlockIndexStart + BlockRangesIt->ChunkBlockIndexCount) - LastRange.ChunkBlockIndexStart; LastRange.RangeLength = (BlockRangesIt->RangeStart + BlockRangesIt->RangeLength) - LastRange.RangeStart; } else { CollapsedBlockRanges.push_back(*BlockRangesIt); } } return CollapsedBlockRanges; }; uint64_t ChunkBlockAnalyser::CalculateNextGap(const uint64_t AlwaysAcceptableGap, std::span BlockRanges) { ZEN_ASSERT(BlockRanges.size() > 1); uint64_t AcceptableGap = (uint64_t)-1; for (size_t RangeIndex = 0; RangeIndex < BlockRanges.size() - 1; RangeIndex++) { const BlockRangeDescriptor& Range = BlockRanges[RangeIndex]; const BlockRangeDescriptor& NextRange = BlockRanges[RangeIndex + 1]; const uint64_t Gap = NextRange.RangeStart - (Range.RangeStart + Range.RangeLength); AcceptableGap = Min(Gap, AcceptableGap); } AcceptableGap = RoundUp(AcceptableGap, AlwaysAcceptableGap); return AcceptableGap; }; std::optional> ChunkBlockAnalyser::CalculateBlockRanges(uint32_t BlockIndex, const ChunkBlockDescription& BlockDescription, std::span BlockChunkIndexNeeded, EPartialBlockDownloadMode PartialBlockDownloadMode, const uint64_t ChunkStartOffsetInBlock, const uint64_t TotalBlockSize, uint64_t& OutTotalWantedChunksSize) { ZEN_TRACE_CPU("CalculateBlockRanges"); if (PartialBlockDownloadMode == EPartialBlockDownloadMode::Off) { return {}; } std::vector BlockRanges; { uint64_t CurrentOffset = ChunkStartOffsetInBlock; uint32_t ChunkBlockIndex = 0; uint32_t NeedBlockChunkIndexOffset = 0; BlockRangeDescriptor NextRange{.BlockIndex = BlockIndex}; while (NeedBlockChunkIndexOffset < BlockChunkIndexNeeded.size() && ChunkBlockIndex < BlockDescription.ChunkRawHashes.size()) { const uint32_t ChunkCompressedLength = BlockDescription.ChunkCompressedLengths[ChunkBlockIndex]; if (ChunkBlockIndex < BlockChunkIndexNeeded[NeedBlockChunkIndexOffset]) { if (NextRange.RangeLength > 0) { BlockRanges.push_back(NextRange); NextRange = {.BlockIndex = BlockIndex}; } ChunkBlockIndex++; CurrentOffset += ChunkCompressedLength; } else if (ChunkBlockIndex == BlockChunkIndexNeeded[NeedBlockChunkIndexOffset]) { if (NextRange.RangeLength == 0) { NextRange.RangeStart = CurrentOffset; NextRange.ChunkBlockIndexStart = ChunkBlockIndex; } NextRange.RangeLength += ChunkCompressedLength; NextRange.ChunkBlockIndexCount++; ChunkBlockIndex++; CurrentOffset += ChunkCompressedLength; NeedBlockChunkIndexOffset++; } else { ZEN_ASSERT(false); } } if (NextRange.RangeLength > 0) { BlockRanges.push_back(NextRange); } } ZEN_ASSERT(!BlockRanges.empty()); OutTotalWantedChunksSize = std::accumulate(BlockRanges.begin(), BlockRanges.end(), uint64_t(0), [](uint64_t Current, const BlockRangeDescriptor& Range) { return Current + Range.RangeLength; }); double RangeWantedPercent = (OutTotalWantedChunksSize * 100.0) / TotalBlockSize; if (BlockRanges.size() == 1) { if (m_Options.IsVerbose) { ZEN_OPERATION_LOG_INFO(m_LogOutput, "Range request of {} ({:.2f}%) using single range from block {} ({}) as is", NiceBytes(OutTotalWantedChunksSize), RangeWantedPercent, BlockDescription.BlockHash, NiceBytes(TotalBlockSize)); } return BlockRanges; } if (PartialBlockDownloadMode == EPartialBlockDownloadMode::Exact) { if (m_Options.IsVerbose) { ZEN_OPERATION_LOG_INFO(m_LogOutput, "Range request of {} ({:.2f}%) using {} ranges from block {} ({})", NiceBytes(OutTotalWantedChunksSize), RangeWantedPercent, BlockRanges.size(), BlockDescription.BlockHash, NiceBytes(TotalBlockSize)); } return BlockRanges; } if (PartialBlockDownloadMode == EPartialBlockDownloadMode::SingleRange) { const BlockRangeDescriptor MergedRange = MergeBlockRanges(BlockRanges); if (m_Options.IsVerbose) { const double RangeRequestedPercent = (MergedRange.RangeLength * 100.0) / TotalBlockSize; const double WastedPercent = ((MergedRange.RangeLength - OutTotalWantedChunksSize) * 100.0) / MergedRange.RangeLength; ZEN_OPERATION_LOG_INFO( m_LogOutput, "Range request of {} ({:.2f}%) using {} ranges from block {} ({}) limited to single block range {} ({:.2f}%) wasting " "{:.2f}% ({})", NiceBytes(OutTotalWantedChunksSize), RangeWantedPercent, BlockRanges.size(), BlockDescription.BlockHash, NiceBytes(TotalBlockSize), NiceBytes(MergedRange.RangeLength), RangeRequestedPercent, WastedPercent, NiceBytes(MergedRange.RangeLength - OutTotalWantedChunksSize)); } return MakeOptionalBlockRangeVector(TotalBlockSize, MergedRange); } if (RangeWantedPercent > FullBlockRangePercentLimit) { const BlockRangeDescriptor MergedRange = MergeBlockRanges(BlockRanges); if (m_Options.IsVerbose) { const double RangeRequestedPercent = (MergedRange.RangeLength * 100.0) / TotalBlockSize; const double WastedPercent = ((MergedRange.RangeLength - OutTotalWantedChunksSize) * 100.0) / MergedRange.RangeLength; ZEN_OPERATION_LOG_INFO( m_LogOutput, "Range request of {} ({:.2f}%) using {} ranges from block {} ({}) exceeds {}%. Merged to single block range {} " "({:.2f}%) wasting {:.2f}% ({})", NiceBytes(OutTotalWantedChunksSize), RangeWantedPercent, BlockRanges.size(), BlockDescription.BlockHash, NiceBytes(TotalBlockSize), FullBlockRangePercentLimit, NiceBytes(MergedRange.RangeLength), RangeRequestedPercent, WastedPercent, NiceBytes(MergedRange.RangeLength - OutTotalWantedChunksSize)); } return MakeOptionalBlockRangeVector(TotalBlockSize, MergedRange); } const uint64_t AlwaysAcceptableGap = 4u * 1024u; std::vector CollapsedBlockRanges = CollapseBlockRanges(AlwaysAcceptableGap, BlockRanges); while (GetBlockRangeLimitForRange(ForceMergeLimits, TotalBlockSize, CollapsedBlockRanges)) { CollapsedBlockRanges = CollapseBlockRanges(CalculateNextGap(AlwaysAcceptableGap, CollapsedBlockRanges), CollapsedBlockRanges); } const std::uint64_t WantedCollapsedSize = std::accumulate(CollapsedBlockRanges.begin(), CollapsedBlockRanges.end(), uint64_t(0), [](uint64_t Current, const BlockRangeDescriptor& Range) { return Current + Range.RangeLength; }); const double CollapsedRangeRequestedPercent = (WantedCollapsedSize * 100.0) / TotalBlockSize; if (m_Options.IsVerbose) { const double WastedPercent = ((WantedCollapsedSize - OutTotalWantedChunksSize) * 100.0) / WantedCollapsedSize; ZEN_OPERATION_LOG_INFO( m_LogOutput, "Range request of {} ({:.2f}%) using {} ranges from block {} ({}) collapsed to {} {:.2f}% using {} ranges wasting {:.2f}% " "({})", NiceBytes(OutTotalWantedChunksSize), RangeWantedPercent, BlockRanges.size(), BlockDescription.BlockHash, NiceBytes(TotalBlockSize), NiceBytes(WantedCollapsedSize), CollapsedRangeRequestedPercent, CollapsedBlockRanges.size(), WastedPercent, NiceBytes(WantedCollapsedSize - OutTotalWantedChunksSize)); } return CollapsedBlockRanges; } #if ZEN_WITH_TESTS namespace testutils { static std::vector> CreateAttachments( const std::span& Sizes, OodleCompressionLevel CompressionLevel = OodleCompressionLevel::VeryFast, uint64_t BlockSize = 0) { std::vector> Result; Result.reserve(Sizes.size()); for (size_t Size : Sizes) { CompressedBuffer Compressed = CompressedBuffer::Compress(SharedBuffer(CreateSemiRandomBlob(Size)), OodleCompressor::Mermaid, CompressionLevel, BlockSize); Result.emplace_back(std::pair(Oid::NewOid(), Compressed)); } return Result; } } // namespace testutils TEST_CASE("chunkblock.block") { using namespace std::literals; using namespace testutils; std::vector AttachmentSizes({7633, 6825, 5738, 8031, 7225, 566, 3656, 6006, 24, 3466, 1093, 4269, 2257, 3685, 3489, 7194, 6151, 5482, 6217, 3511, 6738, 5061, 7537, 2759, 1916, 8210, 2235, 4024, 1582, 5251, 491, 5464, 4607, 8135, 3767, 4045, 4415, 5007, 8876, 6761, 3359, 8526, 4097, 4855, 8225}); std::vector> AttachmentsWithId = CreateAttachments(AttachmentSizes); std::vector> Chunks; Chunks.reserve(AttachmentSizes.size()); for (const auto& It : AttachmentsWithId) { Chunks.push_back( std::make_pair(It.second.DecodeRawHash(), [Buffer = It.second](const IoHash&) -> std::pair { return {Buffer.DecodeRawSize(), Buffer}; })); } ChunkBlockDescription Block; CompressedBuffer BlockBuffer = GenerateChunkBlock(std::move(Chunks), Block); uint64_t HeaderSize; CHECK(IterateChunkBlock( BlockBuffer.Decompress(), [](CompressedBuffer&&, const IoHash&) {}, HeaderSize)); } TEST_CASE("chunkblock.reuseblocks") { using namespace std::literals; using namespace testutils; std::vector> BlockAttachmentSizes( {std::vector{7633, 6825, 5738, 8031, 7225, 566, 3656, 6006, 24, 3466, 1093, 4269, 2257, 3685, 3489, 7194, 6151, 5482, 6217, 3511, 6738, 5061, 7537, 2759, 1916, 8210, 2235, 4024, 1582, 5251, 491, 5464, 4607, 8135, 3767, 4045, 4415, 5007, 8876, 6761, 3359, 8526, 4097, 4855, 8225}, {17633, 16825, 15738, 18031, 17225, 11566, 13656, 16006, 11124, 13466, 11093, 14269, 12257, 13685, 13489, 17194, 16151, 15482, 16217, 13511, 16738, 15061, 17537, 12759, 11916, 18210, 12235, 14024, 11582, 15251, 11491, 15464, 14607, 18135, 13767, 14045, 14415, 15007, 18876, 16761, 13359, 18526, 14097, 14855, 18225}}); std::vector BlockDescriptions; for (auto& AttachmentSizes : BlockAttachmentSizes) { std::vector> AttachmentsWithId = CreateAttachments(AttachmentSizes); std::vector> Chunks; Chunks.reserve(AttachmentSizes.size()); for (const auto& It : AttachmentsWithId) { Chunks.push_back( std::make_pair(It.second.DecodeRawHash(), [Buffer = It.second](const IoHash&) -> std::pair { return {Buffer.DecodeRawSize(), Buffer}; })); } ChunkBlockDescription Block; CompressedBuffer BlockBuffer = GenerateChunkBlock(std::move(Chunks), Block); BlockDescriptions.emplace_back(std::move(Block)); } LoggerRef LogRef = Log(); std::unique_ptr LogOutput(CreateStandardLogOutput(LogRef)); { // We use just about all the chunks - should result in use of both blocks ReuseBlocksStatistics ReuseBlocksStats; std::vector ManyChunkHashes; ManyChunkHashes.insert(ManyChunkHashes.end(), BlockDescriptions[0].ChunkRawHashes.begin(), BlockDescriptions[0].ChunkRawHashes.end() - 1); ManyChunkHashes.insert(ManyChunkHashes.end(), BlockDescriptions[1].ChunkRawHashes.begin() + 1, BlockDescriptions[1].ChunkRawHashes.end()); std::vector ManyChunkIndexes; ManyChunkIndexes.resize(ManyChunkHashes.size()); std::iota(ManyChunkIndexes.begin(), ManyChunkIndexes.end(), 0); std::vector UnusedChunkIndexes; std::vector ReusedBlocks = FindReuseBlocks(*LogOutput, 80, false, ReuseBlocksStats, BlockDescriptions, ManyChunkHashes, ManyChunkIndexes, UnusedChunkIndexes); CHECK_EQ(2u, ReusedBlocks.size()); CHECK_EQ(0u, UnusedChunkIndexes.size()); } { // We now only about one of the blocks ReuseBlocksStatistics ReuseBlocksStats; std::vector ManyChunkHashes; ManyChunkHashes.insert(ManyChunkHashes.end(), BlockDescriptions[0].ChunkRawHashes.begin(), BlockDescriptions[0].ChunkRawHashes.end() - 1); ManyChunkHashes.insert(ManyChunkHashes.end(), BlockDescriptions[1].ChunkRawHashes.begin() + 1, BlockDescriptions[1].ChunkRawHashes.end()); std::vector ManyChunkIndexes; ManyChunkIndexes.resize(ManyChunkHashes.size()); std::iota(ManyChunkIndexes.begin(), ManyChunkIndexes.end(), 0); std::vector UnusedChunkIndexes; std::vector ReusedBlocks = FindReuseBlocks(*LogOutput, 80, false, ReuseBlocksStats, std::vector{BlockDescriptions[0]}, ManyChunkHashes, ManyChunkIndexes, UnusedChunkIndexes); CHECK_EQ(1u, ReusedBlocks.size()); CHECK_EQ(BlockDescriptions[1].ChunkRawHashes.size() - 1, UnusedChunkIndexes.size()); } { std::vector ManyChunkHashes; ManyChunkHashes.insert(ManyChunkHashes.end(), BlockDescriptions[0].ChunkRawHashes.begin(), BlockDescriptions[0].ChunkRawHashes.end() - BlockDescriptions[0].ChunkRawHashes.size() / 2); ManyChunkHashes.insert(ManyChunkHashes.end(), BlockDescriptions[1].ChunkRawHashes.begin() + BlockDescriptions[1].ChunkRawHashes.size() / 2, BlockDescriptions[1].ChunkRawHashes.end()); std::vector ManyChunkIndexes; ManyChunkIndexes.resize(ManyChunkHashes.size()); std::iota(ManyChunkIndexes.begin(), ManyChunkIndexes.end(), 0); { // We use half the chunks - should result in no use of blocks due to 80% limit std::vector UnusedChunkIndexes80Percent; ReuseBlocksStatistics ReuseBlocksStats; std::vector ReusedBlocks80Percent = FindReuseBlocks(*LogOutput, 80, false, ReuseBlocksStats, BlockDescriptions, ManyChunkHashes, ManyChunkIndexes, UnusedChunkIndexes80Percent); CHECK_EQ(0u, ReusedBlocks80Percent.size()); CHECK_EQ(ManyChunkHashes.size(), UnusedChunkIndexes80Percent.size()); } { // We use half the chunks - should result in use of both blocks due to 40% limit std::vector UnusedChunkIndexes40Percent; ReuseBlocksStatistics ReuseBlocksStats; std::vector ReusedBlocks40Percent = FindReuseBlocks(*LogOutput, 40, false, ReuseBlocksStats, BlockDescriptions, ManyChunkHashes, ManyChunkIndexes, UnusedChunkIndexes40Percent); CHECK_EQ(2u, ReusedBlocks40Percent.size()); CHECK_EQ(0u, UnusedChunkIndexes40Percent.size()); } } { std::vector ManyChunkHashes; ManyChunkHashes.insert(ManyChunkHashes.end(), BlockDescriptions[0].ChunkRawHashes.begin(), BlockDescriptions[0].ChunkRawHashes.end() - BlockDescriptions[0].ChunkRawHashes.size() / 2); ManyChunkHashes.insert(ManyChunkHashes.end(), BlockDescriptions[1].ChunkRawHashes.begin() + 1, BlockDescriptions[1].ChunkRawHashes.end()); std::vector ManyChunkIndexes; ManyChunkIndexes.resize(ManyChunkHashes.size()); std::iota(ManyChunkIndexes.begin(), ManyChunkIndexes.end(), 0); { // We use half the chunks for first block - should result in use of one blocks due to 80% limit ReuseBlocksStatistics ReuseBlocksStats; std::vector UnusedChunkIndexes80Percent; std::vector ReusedBlocks80Percent = FindReuseBlocks(*LogOutput, 80, false, ReuseBlocksStats, BlockDescriptions, ManyChunkHashes, ManyChunkIndexes, UnusedChunkIndexes80Percent); CHECK_EQ(1u, ReusedBlocks80Percent.size()); CHECK_EQ(BlockDescriptions[0].ChunkRawHashes.size() - BlockDescriptions[0].ChunkRawHashes.size() / 2, UnusedChunkIndexes80Percent.size()); } { // We use half the chunks - should result in use of both blocks due to 40% limit ReuseBlocksStatistics ReuseBlocksStats; std::vector UnusedChunkIndexes40Percent; std::vector ReusedBlocks40Percent = FindReuseBlocks(*LogOutput, 40, false, ReuseBlocksStats, BlockDescriptions, ManyChunkHashes, ManyChunkIndexes, UnusedChunkIndexes40Percent); CHECK_EQ(2u, ReusedBlocks40Percent.size()); CHECK_EQ(0u, UnusedChunkIndexes40Percent.size()); } } { // Test simulate ThinkChunkBlockDescriptions for (ChunkBlockDescription& BlockDescription : BlockDescriptions) { BlockDescription.HeaderSize = 0; BlockDescription.ChunkRawLengths = std::vector(BlockDescription.ChunkRawHashes.size(), 1); BlockDescription.ChunkCompressedLengths = std::vector(BlockDescription.ChunkRawHashes.size(), 1); } std::vector ManyChunkHashes; ManyChunkHashes.insert(ManyChunkHashes.end(), BlockDescriptions[0].ChunkRawHashes.begin(), BlockDescriptions[0].ChunkRawHashes.end() - BlockDescriptions[0].ChunkRawHashes.size() / 2); ManyChunkHashes.insert(ManyChunkHashes.end(), BlockDescriptions[1].ChunkRawHashes.begin() + 1, BlockDescriptions[1].ChunkRawHashes.end()); std::vector ManyChunkIndexes; ManyChunkIndexes.resize(ManyChunkHashes.size()); std::iota(ManyChunkIndexes.begin(), ManyChunkIndexes.end(), 0); { // We use half the chunks for first block - should result in use of one blocks due to 80% limit ReuseBlocksStatistics ReuseBlocksStats; std::vector UnusedChunkIndexes80Percent; std::vector ReusedBlocks80Percent = FindReuseBlocks(*LogOutput, 80, false, ReuseBlocksStats, BlockDescriptions, ManyChunkHashes, ManyChunkIndexes, UnusedChunkIndexes80Percent); CHECK_EQ(1u, ReusedBlocks80Percent.size()); CHECK_EQ(BlockDescriptions[0].ChunkRawHashes.size() - BlockDescriptions[0].ChunkRawHashes.size() / 2, UnusedChunkIndexes80Percent.size()); } { // We use half the chunks - should result in use of both blocks due to 40% limit ReuseBlocksStatistics ReuseBlocksStats; std::vector UnusedChunkIndexes40Percent; std::vector ReusedBlocks40Percent = FindReuseBlocks(*LogOutput, 40, false, ReuseBlocksStats, BlockDescriptions, ManyChunkHashes, ManyChunkIndexes, UnusedChunkIndexes40Percent); CHECK_EQ(2u, ReusedBlocks40Percent.size()); CHECK_EQ(0u, UnusedChunkIndexes40Percent.size()); } } } void chunkblock_forcelink() { } #endif // ZEN_WITH_TESTS } // namespace zen