aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDan Engelbrecht <[email protected]>2026-03-14 14:49:23 +0100
committerDan Engelbrecht <[email protected]>2026-03-14 14:49:23 +0100
commit6d150e1c9cccbeee6c0594a052a2a0e476a415c9 (patch)
tree4e0a661472f3fd18b62cdd89af87eb4ba76ed93f
parentfix path with AllowChunking = false (diff)
downloadzen-6d150e1c9cccbeee6c0594a052a2a0e476a415c9.tar.xz
zen-6d150e1c9cccbeee6c0594a052a2a0e476a415c9.zip
BlockComposer tests
-rw-r--r--src/zenremotestore/projectstore/remoteprojectstore.cpp434
1 files changed, 424 insertions, 10 deletions
diff --git a/src/zenremotestore/projectstore/remoteprojectstore.cpp b/src/zenremotestore/projectstore/remoteprojectstore.cpp
index b5bc28a24..9bc762bc3 100644
--- a/src/zenremotestore/projectstore/remoteprojectstore.cpp
+++ b/src/zenremotestore/projectstore/remoteprojectstore.cpp
@@ -172,19 +172,65 @@ namespace remotestore_impl {
return BlockIndex;
}
+ // BlockComposer packs attachment chunks (each identified by an IoHash and a byte size) into
+ // fixed-size blocks subject to two constraints:
+ // - The total encoded content of a block must not exceed UsableBlockSize bytes.
+ // - A block may contain at most MaxChunksPerBlock chunk entries.
+ //
+ // Chunks belonging to the same op key (Oid) are kept together in one block whenever possible,
+ // so that a single block fetch can satisfy an entire op without needing to read multiple blocks.
+ //
+ // When a block is complete the OnNewBlock callback is invoked with ownership of the chunk-hash
+ // vector for that block. The callback is also invoked for any partially-filled pending block
+ // that remains after all attachments have been processed.
class BlockComposer
{
public:
struct Configuration
{
- uint64_t MaxBlockSize = 0;
- uint64_t MaxChunksPerBlock = 0;
- uint64_t MaxChunkEmbedSize = 0;
- std::function<bool()> IsCancelledFunc;
+ uint64_t MaxBlockSize = 0; // Total encoded block size limit in bytes (includes header overhead).
+ uint64_t MaxChunksPerBlock = 0; // Maximum number of chunk entries allowed in a single block.
+ uint64_t MaxChunkEmbedSize = 0; // Maximum size of one embeddable chunk; used to calculate worst-case header size.
+ std::function<bool()>
+ IsCancelledFunc; // Optional: if set and returns true, Compose returns early without emitting remaining blocks.
};
explicit BlockComposer(const Configuration& Config) : m_Config(Config), m_UsableBlockSize(CalculateUsableBlockSize(m_Config)) {}
+ // Compose distributes AttachmentHashes into blocks via a two-phase algorithm.
+ //
+ // Phase 1 - Gather (inner while loop):
+ // Starting from the current index, collect all consecutive attachments that share the same
+ // op key (Oid) into CurrentOpRawHashes / CurrentOpChunkSizes. Collection stops (with
+ // CurrentOpFillFullBlock = false) when a different op key is encountered. Collection also
+ // stops early (with CurrentOpFillFullBlock = true) if adding the next same-key attachment
+ // would exceed m_UsableBlockSize by bytes OR would reach MaxChunksPerBlock by count -
+ // meaning the gathered chunks exactly saturate one block and must be emitted immediately.
+ //
+ // Phase 2 - Place (while loop over CurrentOpChunkSizes):
+ // Decides where the gathered chunks go. Exactly one of four mutually exclusive paths runs
+ // per iteration; after each path the loop re-evaluates with whatever chunks remain:
+ //
+ // Path A: CurrentOpFillFullBlock == true
+ // The gathered set exactly fills one block. Emit it immediately as a standalone block
+ // and clear CurrentOpChunkSizes. The pending block is left untouched.
+ //
+ // Path B: All gathered chunks fit in the pending block (both size and count constraints met)
+ // Merge the gathered chunks into PendingChunkHashes/PendingBlockSize and clear the
+ // current-op buffers. If the pending block is now exactly full, flush it immediately.
+ //
+ // Path C: Gathered chunks don't fit AND pending block is >75% full by bytes
+ // The pending block is already well-utilised; flush it now and loop so that the gathered
+ // chunks are re-evaluated against the freshly emptied pending block.
+ //
+ // Path D: Gathered chunks don't fit AND pending block is <=75% full by bytes
+ // The binding constraint is chunk count, not bytes. Greedily fill the pending block with
+ // as many gathered chunks as fit (stopping at the first chunk that would violate either
+ // size or count), flush the pending block, remove the added chunks from the current-op
+ // buffers, and loop so the remaining gathered chunks are re-evaluated.
+ //
+ // Final flush: after all attachments have been processed, any non-empty pending block is
+ // emitted.
void Compose(std::span<const IoHash> AttachmentHashes,
std::span<const uint64_t> AttachmentSizes,
std::span<const Oid> AttachmentKeys,
@@ -258,6 +304,7 @@ namespace remotestore_impl {
ZEN_ASSERT(CurrentOpAttachmentsSize <= m_UsableBlockSize);
ZEN_ASSERT(CurrentOpAttachmentCount <= m_Config.MaxChunksPerBlock);
+ // Path A: gathered chunks exactly fill one block -- emit as a standalone block immediately.
if (CurrentOpFillFullBlock)
{
ZEN_ASSERT(CurrentOpAttachmentsSize <= m_UsableBlockSize);
@@ -272,7 +319,7 @@ namespace remotestore_impl {
else if ((PendingBlockSize + CurrentOpAttachmentsSize) <= m_UsableBlockSize &&
(PendingChunkHashes.size() + CurrentOpAttachmentCount) <= m_Config.MaxChunksPerBlock)
{
- // All attachments for Op fits in the current block...
+ // Path B: all gathered chunks fit in the pending block -- merge them in.
PendingChunkHashes.insert(PendingChunkHashes.end(), CurrentOpRawHashes.begin(), CurrentOpRawHashes.end());
PendingBlockSize += CurrentOpAttachmentsSize;
@@ -294,8 +341,8 @@ namespace remotestore_impl {
}
else if (PendingBlockSize > (m_UsableBlockSize * 3) / 4)
{
- // Our ops does not fit in the current block and the current block is using 75% of max size so lets move our op
- // to the next block...
+ // Path C: gathered chunks don't fit AND pending block is >75% full by bytes -- flush pending
+ // block now; loop to re-evaluate gathered chunks against the freshly emptied pending block.
ZEN_ASSERT(PendingChunkHashes.size() <= m_Config.MaxChunksPerBlock);
ZEN_ASSERT(PendingBlockSize <= m_UsableBlockSize);
OnNewBlock(std::move(PendingChunkHashes));
@@ -307,9 +354,10 @@ namespace remotestore_impl {
}
else
{
- // Fit as many as possible in current block...
- // I think this is dead code - either the CurrentOpRawHashes fills an entire block and the
- // CurrentOpFillFullBlock is set, or the
+ // Path D: gathered chunks don't fit AND pending block is <=75% full by bytes -- the
+ // binding constraint is chunk count. Greedily fill the pending block with as many
+ // chunks as fit, flush it, remove them from the current-op buffers, and loop with the
+ // remaining gathered chunks in the next iteration.
ZEN_ASSERT(PendingBlockSize < m_UsableBlockSize);
ZEN_ASSERT(PendingChunkHashes.size() < m_Config.MaxChunksPerBlock);
@@ -372,6 +420,15 @@ namespace remotestore_impl {
}
private:
+ // CalculateUsableBlockSize computes the maximum bytes available for chunk content in one
+ // block. The block header encodes:
+ // - A CompressedBuffer header of fixed size.
+ // - One VarUInt field encoding MaxChunksPerBlock.
+ // - MaxChunksPerBlock VarUInt entries each encoding one chunk size (bounded by
+ // MaxChunkEmbedSize, which determines the worst-case VarUInt width).
+ // MaxHeaderSize is the worst-case total header size, so
+ // UsableBlockSize = MaxBlockSize - MaxHeaderSize is a conservative bound that guarantees
+ // chunk content always fits within the encoded block.
static uint64_t CalculateUsableBlockSize(const Configuration& Config)
{
ZEN_ASSERT(Config.MaxChunksPerBlock > 0);
@@ -4942,6 +4999,37 @@ namespace projectstore_testutils {
std::vector<std::string> Messages;
};
+ // Create a test IoHash with a unique value based on a small index.
+ inline IoHash MakeTestHash(uint8_t Index)
+ {
+ uint8_t Data[20] = {};
+ Data[0] = Index;
+ return IoHash::MakeFrom(Data);
+ }
+
+ // Create a test Oid with a unique value based on a 32-bit index.
+ inline Oid MakeTestOid(uint32_t Index)
+ {
+ uint32_t Data[3] = {Index, 0, 0};
+ return Oid::FromMemory(Data);
+ }
+
+ // Build a BlockComposer::Configuration where the usable block content area is exactly
+ // UsableSize bytes and the maximum number of chunks per block is MaxChunks.
+ // MaxChunkEmbedSize is fixed at 100 (< 128, so MeasureVarUInt returns 1 byte per entry).
+ // Requires MaxChunks <= 127 so that MeasureVarUInt(MaxChunks) == 1.
+ inline remotestore_impl::BlockComposer::Configuration MakeTestConfig(uint64_t UsableSize, uint64_t MaxChunks)
+ {
+ constexpr uint64_t MaxChunkEmbedSize = 100;
+ uint64_t MaxHeaderSize =
+ CompressedBuffer::GetHeaderSizeForNoneEncoder() + MeasureVarUInt(MaxChunks) + MeasureVarUInt(MaxChunkEmbedSize) * MaxChunks;
+ return remotestore_impl::BlockComposer::Configuration{
+ .MaxBlockSize = UsableSize + MaxHeaderSize,
+ .MaxChunksPerBlock = MaxChunks,
+ .MaxChunkEmbedSize = MaxChunkEmbedSize,
+ };
+ }
+
} // namespace projectstore_testutils
TEST_SUITE_BEGIN("remotestore.projectstore");
@@ -6786,6 +6874,332 @@ TEST_CASE("project.store.embed_loose_files_already_resolved")
CHECK(SecondExport.ErrorCode == 0);
}
+TEST_CASE("project.store.blockcomposer.path_a_standalone_block")
+{
+ // Path A: a single op with exactly MaxChunksPerBlock (4) chunks.
+ // The gather loop sets CurrentOpFillFullBlock=true when the count reaches MaxChunksPerBlock.
+ // Path A emits the gathered set as a standalone block immediately, leaving pending untouched.
+ using namespace projectstore_testutils;
+
+ constexpr uint64_t UsableSize = 1000;
+ constexpr uint64_t MaxChunks = 4;
+ remotestore_impl::BlockComposer Composer(MakeTestConfig(UsableSize, MaxChunks));
+
+ Oid Op1 = MakeTestOid(1);
+ std::vector<IoHash> Hashes = {MakeTestHash(1), MakeTestHash(2), MakeTestHash(3), MakeTestHash(4)};
+ std::vector<uint64_t> Sizes = {100, 100, 100, 100};
+ std::vector<Oid> Keys = {Op1, Op1, Op1, Op1};
+
+ std::vector<std::vector<IoHash>> Blocks;
+ Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
+
+ REQUIRE(Blocks.size() == 1);
+ CHECK(Blocks[0].size() == 4);
+ CHECK(Blocks[0][0] == MakeTestHash(1));
+ CHECK(Blocks[0][3] == MakeTestHash(4));
+}
+
+TEST_CASE("project.store.blockcomposer.path_b_fits_pending")
+{
+ // Path B: a single op whose chunks fit in the empty pending block.
+ // No flush occurs during processing; the final flush emits the one pending block.
+ using namespace projectstore_testutils;
+
+ constexpr uint64_t UsableSize = 1000;
+ constexpr uint64_t MaxChunks = 4;
+ remotestore_impl::BlockComposer Composer(MakeTestConfig(UsableSize, MaxChunks));
+
+ Oid Op1 = MakeTestOid(1);
+ std::vector<IoHash> Hashes = {MakeTestHash(1), MakeTestHash(2)};
+ std::vector<uint64_t> Sizes = {200, 300};
+ std::vector<Oid> Keys = {Op1, Op1};
+
+ std::vector<std::vector<IoHash>> Blocks;
+ Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
+
+ REQUIRE(Blocks.size() == 1);
+ CHECK(Blocks[0].size() == 2);
+ CHECK(Blocks[0][0] == MakeTestHash(1));
+ CHECK(Blocks[0][1] == MakeTestHash(2));
+}
+
+TEST_CASE("project.store.blockcomposer.path_b_exact_count_fill")
+{
+ // Path B with exact count fill: two ops each contributing 2 chunks, MaxChunks=4.
+ // After the second op merges, PendingChunkHashes.size() == MaxChunksPerBlock, so Path B
+ // flushes immediately -- no separate final flush is needed.
+ using namespace projectstore_testutils;
+
+ constexpr uint64_t UsableSize = 1000;
+ constexpr uint64_t MaxChunks = 4;
+ remotestore_impl::BlockComposer Composer(MakeTestConfig(UsableSize, MaxChunks));
+
+ Oid Op1 = MakeTestOid(1);
+ Oid Op2 = MakeTestOid(2);
+ std::vector<IoHash> Hashes = {MakeTestHash(1), MakeTestHash(2), MakeTestHash(3), MakeTestHash(4)};
+ std::vector<uint64_t> Sizes = {100, 100, 100, 100};
+ std::vector<Oid> Keys = {Op1, Op1, Op2, Op2};
+
+ std::vector<std::vector<IoHash>> Blocks;
+ Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
+
+ REQUIRE(Blocks.size() == 1);
+ CHECK(Blocks[0].size() == 4);
+ CHECK(Blocks[0][0] == MakeTestHash(1));
+ CHECK(Blocks[0][3] == MakeTestHash(4));
+}
+
+TEST_CASE("project.store.blockcomposer.path_c_75pct_flush")
+{
+ // Path C: the pending block is >75% full by bytes when a new op arrives that does not fit.
+ // The pending block is flushed first; the new op chunk is then placed via Path B, and
+ // emitted by the final flush.
+ using namespace projectstore_testutils;
+
+ constexpr uint64_t UsableSize = 1000; // 75% threshold = 750 bytes
+ constexpr uint64_t MaxChunks = 4;
+ remotestore_impl::BlockComposer Composer(MakeTestConfig(UsableSize, MaxChunks));
+
+ Oid Op1 = MakeTestOid(1);
+ Oid Op2 = MakeTestOid(2);
+ // Op1: 800 bytes -> Path B, pending = {800 bytes, 1 chunk} (800 > 750)
+ // Op2: 300 bytes -> does not fit (800+300=1100 > 1000) and 800 > 750 -> Path C flush,
+ // then Path B, pending = {300 bytes} -> final flush
+ std::vector<IoHash> Hashes = {MakeTestHash(1), MakeTestHash(2)};
+ std::vector<uint64_t> Sizes = {800, 300};
+ std::vector<Oid> Keys = {Op1, Op2};
+
+ std::vector<std::vector<IoHash>> Blocks;
+ Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
+
+ // Block 1: Path C flush of Op1 chunk. Block 2: final flush of Op2 chunk.
+ REQUIRE(Blocks.size() == 2);
+ CHECK(Blocks[0].size() == 1);
+ CHECK(Blocks[0][0] == MakeTestHash(1));
+ CHECK(Blocks[1].size() == 1);
+ CHECK(Blocks[1][0] == MakeTestHash(2));
+}
+
+TEST_CASE("project.store.blockcomposer.path_d_partial_fill")
+{
+ // Path D: the pending block is <=75% full by bytes but chunk count is the binding constraint.
+ // Pending has 3 chunks; the incoming op has 2 chunks that would exceed MaxChunks=4.
+ // Path D greedily adds 1 chunk to fill pending to capacity, flushes, then places the
+ // remaining 1 chunk in the new pending block (emitted by the final flush).
+ using namespace projectstore_testutils;
+
+ constexpr uint64_t UsableSize = 1000; // 75% threshold = 750 bytes
+ constexpr uint64_t MaxChunks = 4;
+ remotestore_impl::BlockComposer Composer(MakeTestConfig(UsableSize, MaxChunks));
+
+ Oid Op1 = MakeTestOid(1);
+ Oid Op2 = MakeTestOid(2);
+ // Op1: 3 x 100 bytes -> Path B, pending = {3 chunks, 300 bytes} (300 <= 750)
+ // Op2: 2 x 100 bytes -> 3+2=5 > MaxChunks=4; 300+200=500 <= 1000; 300 <= 750 -> Path D
+ // D adds op2[0] to pending (4 chunks, count capacity reached), flushes -> block 1
+ // Remaining op2[1] -> Path B (pending empty) -> final flush -> block 2
+ std::vector<IoHash> Hashes = {MakeTestHash(1), MakeTestHash(2), MakeTestHash(3), MakeTestHash(4), MakeTestHash(5)};
+ std::vector<uint64_t> Sizes = {100, 100, 100, 100, 100};
+ std::vector<Oid> Keys = {Op1, Op1, Op1, Op2, Op2};
+
+ std::vector<std::vector<IoHash>> Blocks;
+ Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
+
+ // Block 1: [op1_c0, op1_c1, op1_c2, op2_c0] (Path D fills and flushes).
+ // Block 2: [op2_c1] (final flush).
+ REQUIRE(Blocks.size() == 2);
+ CHECK(Blocks[0].size() == 4);
+ CHECK(Blocks[0][0] == MakeTestHash(1));
+ CHECK(Blocks[0][1] == MakeTestHash(2));
+ CHECK(Blocks[0][2] == MakeTestHash(3));
+ CHECK(Blocks[0][3] == MakeTestHash(4));
+ CHECK(Blocks[1].size() == 1);
+ CHECK(Blocks[1][0] == MakeTestHash(5));
+}
+
+TEST_CASE("project.store.blockcomposer.cancellation")
+{
+ // IsCancelledFunc returns true on the second outer-loop iteration.
+ // Op1 (4 chunks, Path A) is fully emitted before cancellation; Op2 is never started.
+ using namespace projectstore_testutils;
+
+ constexpr uint64_t UsableSize = 1000;
+ constexpr uint64_t MaxChunks = 4;
+
+ int CallCount = 0;
+ remotestore_impl::BlockComposer::Configuration Config = MakeTestConfig(UsableSize, MaxChunks);
+ Config.IsCancelledFunc = [&]() { return ++CallCount > 1; };
+ remotestore_impl::BlockComposer Composer(Config);
+
+ Oid Op1 = MakeTestOid(1);
+ Oid Op2 = MakeTestOid(2);
+ std::vector<IoHash> Hashes = {MakeTestHash(1), MakeTestHash(2), MakeTestHash(3), MakeTestHash(4), MakeTestHash(5), MakeTestHash(6)};
+ std::vector<uint64_t> Sizes = {100, 100, 100, 100, 100, 100};
+ std::vector<Oid> Keys = {Op1, Op1, Op1, Op1, Op2, Op2};
+
+ std::vector<std::vector<IoHash>> Blocks;
+ Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
+
+ // Op1 fills a block (Path A) before the second cancellation check fires.
+ REQUIRE(Blocks.size() == 1);
+ CHECK(Blocks[0].size() == 4);
+}
+
+TEST_CASE("project.store.blockcomposer.final_flush")
+{
+ // Three single-chunk ops all merge into the pending block via Path B without triggering
+ // any mid-stream flush. The single pending block is emitted by the final flush.
+ using namespace projectstore_testutils;
+
+ constexpr uint64_t UsableSize = 1000;
+ constexpr uint64_t MaxChunks = 4;
+ remotestore_impl::BlockComposer Composer(MakeTestConfig(UsableSize, MaxChunks));
+
+ Oid Op1 = MakeTestOid(1);
+ Oid Op2 = MakeTestOid(2);
+ Oid Op3 = MakeTestOid(3);
+ std::vector<IoHash> Hashes = {MakeTestHash(1), MakeTestHash(2), MakeTestHash(3)};
+ std::vector<uint64_t> Sizes = {100, 200, 150};
+ std::vector<Oid> Keys = {Op1, Op2, Op3};
+
+ std::vector<std::vector<IoHash>> Blocks;
+ Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
+
+ REQUIRE(Blocks.size() == 1);
+ CHECK(Blocks[0].size() == 3);
+ CHECK(Blocks[0][0] == MakeTestHash(1));
+ CHECK(Blocks[0][1] == MakeTestHash(2));
+ CHECK(Blocks[0][2] == MakeTestHash(3));
+}
+
+TEST_CASE("project.store.blockcomposer.path_b_b_c")
+{
+ // Multi-step: Path B -> Path B -> Path C
+ // Two ops build pending past the 75% byte threshold. The third op triggers Path C,
+ // flushing the first block. The third op chunk is then placed via Path B and emitted
+ // by the final flush.
+ using namespace projectstore_testutils;
+
+ constexpr uint64_t UsableSize = 1000; // 75% threshold = 750 bytes
+ constexpr uint64_t MaxChunks = 8;
+ remotestore_impl::BlockComposer Composer(MakeTestConfig(UsableSize, MaxChunks));
+
+ Oid Op1 = MakeTestOid(1);
+ Oid Op2 = MakeTestOid(2);
+ Oid Op3 = MakeTestOid(3);
+ // Op1: 400 bytes -> Path B, pending = {400 bytes, 1 chunk}
+ // Op2: 400 bytes -> Path B, pending = {800 bytes, 2 chunks} (800 > 750)
+ // Op3: 300 bytes -> does not fit (1100 > 1000) and 800 > 750 -> Path C flush -> block 1
+ // then Path B, pending = {300 bytes} -> final flush -> block 2
+ std::vector<IoHash> Hashes = {MakeTestHash(1), MakeTestHash(2), MakeTestHash(3)};
+ std::vector<uint64_t> Sizes = {400, 400, 300};
+ std::vector<Oid> Keys = {Op1, Op2, Op3};
+
+ std::vector<std::vector<IoHash>> Blocks;
+ Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
+
+ REQUIRE(Blocks.size() == 2);
+ CHECK(Blocks[0].size() == 2);
+ CHECK(Blocks[0][0] == MakeTestHash(1));
+ CHECK(Blocks[0][1] == MakeTestHash(2));
+ CHECK(Blocks[1].size() == 1);
+ CHECK(Blocks[1][0] == MakeTestHash(3));
+}
+
+TEST_CASE("project.store.blockcomposer.path_d_b")
+{
+ // Multi-step: Path D -> Path B
+ // Path D fills and flushes the pending block with a partial set of the current op chunks.
+ // The remainder of that op then fits into the freshly emptied pending block via Path B,
+ // and is emitted by the final flush.
+ using namespace projectstore_testutils;
+
+ constexpr uint64_t UsableSize = 1000;
+ constexpr uint64_t MaxChunks = 4;
+ remotestore_impl::BlockComposer Composer(MakeTestConfig(UsableSize, MaxChunks));
+
+ Oid Op1 = MakeTestOid(1);
+ Oid Op2 = MakeTestOid(2);
+ // Op1: 3 x 100 bytes -> Path B, pending = {3 chunks, 300 bytes}
+ // Op2: 2 x 100 bytes -> count 3+2=5 > 4; bytes 300+200=500 <= 1000; 300 <= 750 -> Path D
+ // D adds op2[0] to pending (4 chunks, count capacity reached), flushes -> block 1
+ // op2[1] remaining -> Path B (pending empty) -> final flush -> block 2
+ std::vector<IoHash> Hashes = {MakeTestHash(1), MakeTestHash(2), MakeTestHash(3), MakeTestHash(4), MakeTestHash(5)};
+ std::vector<uint64_t> Sizes = {100, 100, 100, 100, 100};
+ std::vector<Oid> Keys = {Op1, Op1, Op1, Op2, Op2};
+
+ std::vector<std::vector<IoHash>> Blocks;
+ Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
+
+ REQUIRE(Blocks.size() == 2);
+ CHECK(Blocks[0].size() == 4);
+ CHECK(Blocks[0][3] == MakeTestHash(4));
+ CHECK(Blocks[1].size() == 1);
+ CHECK(Blocks[1][0] == MakeTestHash(5));
+}
+
+TEST_CASE("project.store.blockcomposer.path_c_b")
+{
+ // Multi-step: Path C -> Path B
+ // Path C flushes the pending block. The op chunk that triggered Path C is then placed
+ // via Path B into the freshly emptied pending block, and emitted by the final flush.
+ using namespace projectstore_testutils;
+
+ constexpr uint64_t UsableSize = 1000; // 75% threshold = 750 bytes
+ constexpr uint64_t MaxChunks = 4;
+ remotestore_impl::BlockComposer Composer(MakeTestConfig(UsableSize, MaxChunks));
+
+ Oid Op1 = MakeTestOid(1);
+ Oid Op2 = MakeTestOid(2);
+ // Op1: 800 bytes -> Path B, pending = {800 bytes, 1 chunk}
+ // Op2: 300 bytes -> does not fit (1100 > 1000) and 800 > 750 -> Path C flush -> block 1
+ // then Path B, pending = {300 bytes} -> final flush -> block 2
+ std::vector<IoHash> Hashes = {MakeTestHash(1), MakeTestHash(2)};
+ std::vector<uint64_t> Sizes = {800, 300};
+ std::vector<Oid> Keys = {Op1, Op2};
+
+ std::vector<std::vector<IoHash>> Blocks;
+ Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
+
+ REQUIRE(Blocks.size() == 2);
+ CHECK(Blocks[0].size() == 1);
+ CHECK(Blocks[0][0] == MakeTestHash(1));
+ CHECK(Blocks[1].size() == 1);
+ CHECK(Blocks[1][0] == MakeTestHash(2));
+}
+
+TEST_CASE("project.store.blockcomposer.path_a_b_final_flush")
+{
+ // Multi-step: Path A -> Path B -> final flush
+ // The first op exactly fills a block (count-saturated, Path A). The second op fits into
+ // the now-empty pending block via Path B and is emitted by the final flush.
+ using namespace projectstore_testutils;
+
+ constexpr uint64_t UsableSize = 1000;
+ constexpr uint64_t MaxChunks = 4;
+ remotestore_impl::BlockComposer Composer(MakeTestConfig(UsableSize, MaxChunks));
+
+ Oid Op1 = MakeTestOid(1);
+ Oid Op2 = MakeTestOid(2);
+ // Op1: 4 x 100 bytes -> MaxChunksPerBlock reached -> CurrentOpFillFullBlock=true -> Path A
+ // Op2: 2 x 100 bytes -> Path B (pending empty) -> final flush
+ std::vector<IoHash> Hashes = {MakeTestHash(1), MakeTestHash(2), MakeTestHash(3), MakeTestHash(4), MakeTestHash(5), MakeTestHash(6)};
+ std::vector<uint64_t> Sizes = {100, 100, 100, 100, 100, 100};
+ std::vector<Oid> Keys = {Op1, Op1, Op1, Op1, Op2, Op2};
+
+ std::vector<std::vector<IoHash>> Blocks;
+ Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
+
+ // Block 1: Path A standalone (all 4 Op1 chunks). Block 2: final flush of Op2 (2 chunks).
+ REQUIRE(Blocks.size() == 2);
+ CHECK(Blocks[0].size() == 4);
+ CHECK(Blocks[0][0] == MakeTestHash(1));
+ CHECK(Blocks[0][3] == MakeTestHash(4));
+ CHECK(Blocks[1].size() == 2);
+ CHECK(Blocks[1][0] == MakeTestHash(5));
+ CHECK(Blocks[1][1] == MakeTestHash(6));
+}
+
TEST_SUITE_END();
#endif // ZEN_WITH_TESTS