aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDan Engelbrecht <[email protected]>2026-03-16 14:01:22 +0100
committerDan Engelbrecht <[email protected]>2026-03-16 14:01:22 +0100
commit893dda8808922c8f766ef2b4bb0b7d4fe5c542d9 (patch)
tree729f96218600d27ba6c6c698812c87f7da91b2ee /src
parentupdated tests (diff)
downloadzen-893dda8808922c8f766ef2b4bb0b7d4fe5c542d9.tar.xz
zen-893dda8808922c8f766ef2b4bb0b7d4fe5c542d9.zip
comment cleanup
Diffstat (limited to 'src')
-rw-r--r--src/zenremotestore/projectstore/remoteprojectstore.cpp390
1 files changed, 91 insertions, 299 deletions
diff --git a/src/zenremotestore/projectstore/remoteprojectstore.cpp b/src/zenremotestore/projectstore/remoteprojectstore.cpp
index f4b93538e..6bce132c3 100644
--- a/src/zenremotestore/projectstore/remoteprojectstore.cpp
+++ b/src/zenremotestore/projectstore/remoteprojectstore.cpp
@@ -4577,8 +4577,7 @@ namespace projectstore_testutils {
mutable RwLock m_Lock;
};
- // Worker pool pair for tests that exercise both network and compute workers.
- // Member initialisation order matches declaration order: counts first, then pools.
+ // Worker pool pair with separate NetworkPool and WorkerPool.
struct TestWorkerPools
{
private:
@@ -4598,10 +4597,8 @@ namespace projectstore_testutils {
}
};
- // Worker count for tests that only need a single WorkerThreadPool.
inline uint32_t GetWorkerCount() { return Max(GetHardwareConcurrency() / 4u, 2u); }
- // Create a test IoHash with a unique value based on a small index.
inline IoHash MakeTestHash(uint8_t Index)
{
uint8_t Data[20] = {};
@@ -4609,17 +4606,14 @@ namespace projectstore_testutils {
return IoHash::MakeFrom(Data);
}
- // Create a test Oid with a unique value based on a 32-bit index.
inline Oid MakeTestOid(uint32_t Index)
{
uint32_t Data[3] = {Index, 0, 0};
return Oid::FromMemory(Data);
}
- // Build a BlockComposer::Configuration where the usable block content area is exactly
- // UsableSize bytes and the maximum number of chunks per block is MaxChunks.
- // MaxChunkEmbedSize is fixed at 100 (< 128, so MeasureVarUInt returns 1 byte per entry).
- // Requires MaxChunks <= 127 so that MeasureVarUInt(MaxChunks) == 1.
+ // MaxChunks must be <= 127 (so MeasureVarUInt(MaxChunks) == 1) and MaxChunkEmbedSize is
+ // fixed at 100 to keep header sizes deterministic in BlockComposer tests.
inline remotestore_impl::BlockComposer::Configuration MakeTestConfig(uint64_t UsableSize, uint64_t MaxChunks)
{
constexpr uint64_t MaxChunkEmbedSize = 100;
@@ -4757,9 +4751,7 @@ TEST_CASE_TEMPLATE("project.store.export",
DoLoad(true, true);
}
-// Common oplog setup used by the two tests below.
-// Returns a FileRemoteStore backed by ExportDir that has been populated with a SaveOplog call.
-// Keeps the test data identical to project.store.export so the two test suites exercise the same blocks/attachments.
+// Populates ExportDir with a SaveOplog call using the same data as project.store.export.
static std::shared_ptr<RemoteProjectStore>
SetupExportStore(CidStore& CidStore,
ProjectStore::Project& Project,
@@ -4817,23 +4809,15 @@ SetupExportStore(CidStore& CidStore,
return RemoteStore;
}
-// Creates an export store with a single oplog entry that packs six 512 KB chunks into one
-// ~3 MB block (MaxBlockSize = 8 MB). The resulting block slack (~1.5 MB) far exceeds the
-// 512 KB threshold that ChunkBlockAnalyser requires before it will consider partial-block
-// downloads instead of full-block downloads.
-//
-// This function is self-contained: it creates its own GcManager, CidStore, ProjectStore and
-// Project internally so that each call is independent of any outer test context. After
-// SaveOplog returns, all persistent data lives on disk inside ExportDir and the caller can
-// freely query OutRemoteStore without holding any references to the internal context.
+// Creates an export store with six 512 KB chunks packed into one ~3 MB block (MaxBlockSize=8 MB).
+// The ~1.5 MB slack exceeds the ChunkBlockAnalyser threshold, enabling partial-block downloads.
+// Uses its own GcManager/CidStore/ProjectStore so each call is independent.
static std::shared_ptr<RemoteProjectStore>
SetupPartialBlockExportStore(WorkerThreadPool& NetworkPool, WorkerThreadPool& WorkerPool, const std::filesystem::path& ExportDir)
{
using namespace projectstore_testutils;
using namespace std::literals;
- // Self-contained CAS and project store. Subdirectories of ExportDir keep everything
- // together without relying on the outer TEST_CASE's ExportCidStore / ExportProject.
GcManager LocalGc;
CidStore LocalCidStore(LocalGc);
CidStoreConfiguration LocalCidConfig = {.RootDirectory = ExportDir / "cas", .TinyValueThreshold = 1024, .HugeValueThreshold = 4096};
@@ -4854,17 +4838,13 @@ SetupPartialBlockExportStore(WorkerThreadPool& NetworkPool, WorkerThreadPool& Wo
throw std::runtime_error("Failed to create oplog");
}
- // Six 512 KB chunks with OodleCompressionLevel::None so the compressed size stays large
- // and the block genuinely exceeds the 512 KB slack threshold.
Oplog->AppendNewOplogEntry(CreateBulkDataOplogPackage(
Oid::NewOid(),
CreateAttachments(std::initializer_list<size_t>{512u * 1024u, 512u * 1024u, 512u * 1024u, 512u * 1024u, 512u * 1024u, 512u * 1024u},
OodleCompressionLevel::None)));
- // MaxChunkEmbedSize must be larger than the compressed size of each 512 KB chunk
- // (OodleCompressionLevel::None → compressed ≈ raw ≈ 512 KB). With the legacy
- // 32 KB limit all six chunks would become loose large attachments and no block would
- // be created, so we use the production default of 1.5 MB instead.
+ // MaxChunkEmbedSize must exceed 512 KB (compressed size with None encoding) or all chunks
+ // become loose attachments and no block is created.
FileRemoteStoreOptions Options = {RemoteStoreOptions{.MaxBlockSize = 8u * 1024u * 1024u,
.MaxChunksPerBlock = 1000,
.MaxChunkEmbedSize = RemoteStoreOptions::DefaultMaxChunkEmbedSize,
@@ -4892,8 +4872,6 @@ SetupPartialBlockExportStore(WorkerThreadPool& NetworkPool, WorkerThreadPool& Wo
return RemoteStore;
}
-// Returns the first block hash that has at least MinChunkCount chunks, or a zero IoHash
-// if no qualifying block exists in Store.
static IoHash
FindBlockWithMultipleChunks(RemoteProjectStore& Store, size_t MinChunkCount)
{
@@ -4922,10 +4900,8 @@ FindBlockWithMultipleChunks(RemoteProjectStore& Store, size_t MinChunkCount)
return {};
}
-// Loads BlockHash from Source and inserts every even-indexed chunk (0, 2, 4, …) into
-// TargetCidStore. Odd-indexed chunks are left absent so that when an import is run
-// against the same block, HasAttachment returns false for three non-adjacent positions
-// — the minimum needed to exercise the multi-range partial-block download paths.
+// Seeds TargetCidStore with even-indexed chunks (0, 2, 4 ...) from BlockHash, leaving
+// odd chunks absent to create non-adjacent missing ranges for partial-block download tests.
static void
SeedCidStoreWithAlternateChunks(CidStore& TargetCidStore, RemoteProjectStore& Source, const IoHash& BlockHash)
{
@@ -4976,8 +4952,7 @@ TEST_CASE("project.store.import.context_settings")
std::filesystem::path ProjectRootDir = TempDir.Path() / "game";
std::filesystem::path ProjectFilePath = TempDir.Path() / "game" / "game.uproject";
- // Export-side CAS and project store: used only by SetupExportStore to build the remote store
- // payload. Kept separate from the import side so the two CAS instances are disjoint.
+ // Export-side CAS and project store; kept disjoint from the import side.
GcManager ExportGc;
CidStore ExportCidStore(ExportGc);
CidStoreConfiguration ExportCidConfig = {.RootDirectory = TempDir.Path() / "export_cas",
@@ -5001,9 +4976,7 @@ TEST_CASE("project.store.import.context_settings")
std::shared_ptr<RemoteProjectStore> RemoteStore =
SetupExportStore(ExportCidStore, *ExportProject, NetworkPool, WorkerPool, ExportDir.Path());
- // Import-side CAS and project store: starts empty, mirroring a fresh machine that has never
- // downloaded the data. HasAttachment() therefore returns false for every chunk, so the import
- // genuinely contacts the remote store without needing ForceDownload on the populate pass.
+ // Import-side CAS starts empty so the first import downloads from the remote store without ForceDownload.
GcManager ImportGc;
CidStore ImportCidStore(ImportGc);
CidStoreConfiguration ImportCidConfig = {.RootDirectory = TempDir.Path() / "import_cas",
@@ -5040,9 +5013,7 @@ TEST_CASE("project.store.import.context_settings")
CapturingJobContext OpJobContext;
- // Helper: run a LoadOplog against the import-side CAS/project with the given context knobs.
- // Each call creates a fresh oplog so repeated calls within one SUBCASE don't short-circuit on
- // already-present data.
+ // Each call creates a fresh oplog to prevent short-circuiting on already-present data.
auto DoImport = [&](BuildStorageCache* OptCache,
EPartialBlockRequestMode Mode,
double StoreLatency,
@@ -5076,21 +5047,17 @@ TEST_CASE("project.store.import.context_settings")
DoImport(OptCache, EPartialBlockRequestMode::All, 0.001, 128u, 0.001, 128u, Populate, Force);
};
- SUBCASE("mode_off_no_cache")
- {
- // Baseline: no partial block requests, no cache.
- DoImport(nullptr, EPartialBlockRequestMode::Off, -1.0, (uint64_t)-1, -1.0, (uint64_t)-1, false, false);
- }
+ SUBCASE("mode_off_no_cache") { DoImport(nullptr, EPartialBlockRequestMode::Off, -1.0, (uint64_t)-1, -1.0, (uint64_t)-1, false, false); }
SUBCASE("mode_all_multirange_cloud_no_cache")
{
- // StoreMaxRangeCountPerRequest > 1 → MultiRange cloud path.
+ // StoreMaxRangeCountPerRequest > 1 -> MultiRange cloud path.
DoImport(nullptr, EPartialBlockRequestMode::All, 0.001, 128u, -1.0, 0u, false, false);
}
SUBCASE("mode_all_singlerange_cloud_no_cache")
{
- // StoreMaxRangeCountPerRequest == 1 → SingleRange cloud path.
+ // StoreMaxRangeCountPerRequest == 1 -> SingleRange cloud path.
DoImport(nullptr, EPartialBlockRequestMode::All, 0.001, 1u, -1.0, 0u, false, false);
}
@@ -5102,36 +5069,26 @@ TEST_CASE("project.store.import.context_settings")
SUBCASE("cache_populate_and_hit")
{
- // First import: ImportCidStore is empty so all blocks are downloaded from the remote store
- // and written to the cache.
+ // First import: CidStore empty -> blocks downloaded and written to cache.
ImportAll(Cache.get(), /*PopulateCache=*/true, /*Force=*/false);
CHECK(CacheStats.PutBlobCount > 0);
- // Re-import with ForceDownload=true: all chunks are now in ImportCidStore but Force overrides
- // HasAttachment() so the download logic re-runs and serves blocks from the cache instead of
- // the remote store.
+ // Re-import with Force=true: HasAttachment overridden, blocks served from cache.
ResetCacheStats();
ImportAll(Cache.get(), /*PopulateCache=*/false, /*Force=*/true);
CHECK(CacheStats.PutBlobCount == 0);
- // TotalRequestCount covers both full-blob cache hits and partial-range cache hits.
CHECK(CacheStats.TotalRequestCount > 0);
}
SUBCASE("cache_no_populate_flag")
{
- // Cache is provided but PopulateCache=false: blocks are downloaded to ImportCidStore but
- // nothing should be written to the cache.
ImportAll(Cache.get(), /*PopulateCache=*/false, /*Force=*/false);
CHECK(CacheStats.PutBlobCount == 0);
}
SUBCASE("mode_zencacheonly_cache_multirange")
{
- // Pre-populate the cache via a plain import, then re-import with ZenCacheOnly +
- // CacheMaxRangeCountPerRequest=128. With 100% of chunks needed, all blocks go to
- // FullBlockIndexes and GetBuildBlob (full blob) is called from the cache.
- // CacheMaxRangeCountPerRequest > 1 would route partial downloads through GetBuildBlobRanges
- // if the analyser ever emits BlockRanges entries.
+ // Pre-populate; re-import via ZenCacheOnly. All chunks needed -> FullBlockIndexes path (GetBuildBlob).
ImportAll(Cache.get(), /*PopulateCache=*/true, /*Force=*/false);
ResetCacheStats();
@@ -5141,11 +5098,7 @@ TEST_CASE("project.store.import.context_settings")
SUBCASE("mode_zencacheonly_cache_singlerange")
{
- // Pre-populate the cache, then re-import with ZenCacheOnly + CacheMaxRangeCountPerRequest=1.
- // With 100% of chunks needed the analyser sends all blocks to FullBlockIndexes (full-block
- // download path), which calls GetBuildBlob with no range offset — a full-blob cache hit.
- // The single-range vs multi-range distinction only matters for the partial-block (BlockRanges)
- // path, which is not reached when all chunks are needed.
+ // Pre-populate; re-import via ZenCacheOnly with CacheMaxRangeCountPerRequest=1. All chunks needed -> GetBuildBlob (full-blob).
ImportAll(Cache.get(), /*PopulateCache=*/true, /*Force=*/false);
ResetCacheStats();
@@ -5165,18 +5118,17 @@ TEST_CASE("project.store.import.context_settings")
SUBCASE("partial_block_cloud_multirange")
{
- // Export store with 6 × 512 KB chunks packed into one ~3 MB block.
ScopedTemporaryDirectory PartialExportDir;
std::shared_ptr<RemoteProjectStore> PartialRemoteStore =
SetupPartialBlockExportStore(NetworkPool, WorkerPool, PartialExportDir.Path());
// Seeding even-indexed chunks (0, 2, 4) leaves odd ones (1, 3, 5) absent in
- // ImportCidStore. Three non-adjacent needed positions → three BlockRangeDescriptors.
+ // ImportCidStore. Three non-adjacent needed positions -> three BlockRangeDescriptors.
IoHash BlockHash = FindBlockWithMultipleChunks(*PartialRemoteStore, 4u);
CHECK(BlockHash != IoHash::Zero);
SeedCidStoreWithAlternateChunks(ImportCidStore, *PartialRemoteStore, BlockHash);
- // StoreMaxRangeCountPerRequest=128 → all three ranges sent in one LoadAttachmentRanges call.
+ // StoreMaxRangeCountPerRequest=128 -> all three ranges sent in one LoadAttachmentRanges call.
Ref<ProjectStore::Oplog> PartialOplog = ImportProject->NewOplog(fmt::format("partial_cloud_multi_{}", OpJobIndex++), {});
LoadOplog(LoadOplogContext{.ChunkStore = ImportCidStore,
.RemoteStore = *PartialRemoteStore,
@@ -5238,8 +5190,7 @@ TEST_CASE("project.store.import.context_settings")
IoHash BlockHash = FindBlockWithMultipleChunks(*PartialRemoteStore, 4u);
CHECK(BlockHash != IoHash::Zero);
- // Phase 1: ImportCidStore starts empty → full block download from remote → PutBuildBlob
- // populates the cache.
+ // Phase 1: full block download from remote populates the cache.
{
Ref<ProjectStore::Oplog> Phase1Oplog = ImportProject->NewOplog(fmt::format("partial_cache_multi_p1_{}", OpJobIndex++), {});
LoadOplog(LoadOplogContext{.ChunkStore = ImportCidStore,
@@ -5264,10 +5215,7 @@ TEST_CASE("project.store.import.context_settings")
}
ResetCacheStats();
- // Phase 2: fresh CidStore with only even-indexed chunks seeded.
- // HasAttachment returns false for odd chunks (1, 3, 5) → three BlockRangeDescriptors.
- // Block is in cache from Phase 1 → cache partial path.
- // CacheMaxRangeCountPerRequest=128 → SubRangeCount=3 > 1 → GetBuildBlobRanges.
+ // Phase 2: fresh CidStore with even chunks seeded; CacheMaxRangeCountPerRequest=128 -> GetBuildBlobRanges.
GcManager Phase2Gc;
CidStore Phase2CidStore(Phase2Gc);
CidStoreConfiguration Phase2CidConfig = {.RootDirectory = TempDir.Path() / "partial_cas",
@@ -5332,9 +5280,7 @@ TEST_CASE("project.store.import.context_settings")
}
ResetCacheStats();
- // Phase 2: fresh CidStore with only even-indexed chunks seeded.
- // CacheMaxRangeCountPerRequest=1 → SubRangeCount=Min(3,1)=1 → GetBuildBlob with range
- // offset (single-range legacy cache path), called once per needed chunk range.
+ // Phase 2: CacheMaxRangeCountPerRequest=1 -> GetBuildBlob with range offset, called per needed range.
GcManager Phase2Gc;
CidStore Phase2CidStore(Phase2Gc);
CidStoreConfiguration Phase2CidConfig = {.RootDirectory = TempDir.Path() / "partial_cas_single",
@@ -5366,8 +5312,6 @@ TEST_CASE("project.store.import.context_settings")
}
}
-// Helper: construct the common CidStore/ProjectStore/Project setup used by the new tests.
-// Returns a Project backed by TempDir.
static Ref<ProjectStore::Project>
MakeTestProject(CidStore& CidStore,
GcManager& Gc,
@@ -5395,7 +5339,6 @@ MakeTestProject(CidStore& CidStore,
ProjectFilePath.string()));
}
-// Helper: call SaveOplog with a FileRemoteStore backed by ExportDir.
static void
RunSaveOplog(CidStore& CidStore,
ProjectStore::Project& Project,
@@ -5447,9 +5390,7 @@ RunSaveOplog(CidStore& CidStore,
TEST_CASE("project.store.export.no_attachments_needed")
{
- // When an oplog has no binary attachments and ForceUpload=true, BuildContainer produces
- // no blocks and no large attachments. UploadAttachments then finds nothing to upload
- // and reports "No attachments needed" (line 1408).
+ // With no binary attachments, UploadAttachments reports "No attachments needed".
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -5492,9 +5433,7 @@ TEST_CASE("project.store.export.no_attachments_needed")
TEST_CASE("project.store.embed_loose_files_true")
{
- // EmbedLooseFiles=true causes RewriteOp to rewrite file-op entries: each "files" array
- // entry with no existing "data" hash is read from disk, hashed, and the rewritten entry
- // gets a BinaryAttachment "data" field. Round-trip via LoadOplog must succeed.
+ // EmbedLooseFiles=true: file-op entries are rewritten with a BinaryAttachment field. Round-trip must succeed.
using namespace projectstore_testutils;
using namespace std::literals;
@@ -5547,8 +5486,7 @@ TEST_CASE("project.store.embed_loose_files_true")
TEST_CASE("project.store.embed_loose_files_false" * doctest::skip()) // superseded by buildcontainer.embed_loose_files_false_no_rewrite
{
- // EmbedLooseFiles=false skips RewriteOp and writes file-op entries directly to the
- // section ops writer (line 1929: SectionOpsWriter << Op). Round-trip must succeed.
+ // EmbedLooseFiles=false: file-op entries pass through unrewritten. Round-trip must succeed.
using namespace projectstore_testutils;
using namespace std::literals;
@@ -5603,9 +5541,6 @@ TEST_CASE("project.store.embed_loose_files_false" * doctest::skip()) // superse
TEST_CASE("project.store.export.missing_attachment_ignored" *
doctest::skip()) // superseded by buildcontainer.ignore_missing_file_attachment_warn
{
- // Files are created on disk, added to the oplog, then deleted before SaveOplog runs.
- // RewriteOp detects the missing file and emits a "Missing attachment" message.
- // With IgnoreMissingAttachments=true the export continues and returns Ok.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -5624,7 +5559,6 @@ TEST_CASE("project.store.export.missing_attachment_ignored" *
REQUIRE(Oplog);
Oplog->AppendNewOplogEntry(CreateFilesOplogPackage(Oid::NewOid(), RootDir, FileAtts));
- // Delete the files before SaveOplog runs so RewriteOp finds them missing.
for (const auto& [Id, Path] : FileAtts)
{
std::filesystem::remove(Path);
@@ -5657,8 +5591,6 @@ TEST_CASE("project.store.export.missing_attachment_ignored" *
TEST_CASE("project.store.export.missing_chunk_in_cidstore" *
doctest::skip()) // superseded by buildcontainer.ignore_missing_binary_attachment_warn/throw
{
- // A bulk-data op references a hash that is not present in CidStore.
- // With IgnoreMissingAttachments=false the export must fail with NotFound.
using namespace projectstore_testutils;
using namespace std::literals;
@@ -5670,12 +5602,9 @@ TEST_CASE("project.store.export.missing_chunk_in_cidstore" *
std::unique_ptr<ProjectStore> ProjectStoreDummy;
Ref<ProjectStore::Project> Project = MakeTestProject(CidStore, Gc, TempDir.Path(), ProjectStoreDummy);
- // Fabricate a hash that is not stored anywhere in CidStore.
IoBuffer FakeData = CreateRandomBlob(256);
IoHash FakeHash = IoHash::HashBuffer(FakeData);
- // Build a bulkdata op that references FakeHash as a BinaryAttachment field,
- // but do NOT add any attachment payload to the package so CidStore remains empty.
CbObjectWriter Object;
Object << "key"sv << OidAsString(Oid::NewOid());
Object.BeginArray("bulkdata"sv);
@@ -5718,9 +5647,7 @@ TEST_CASE("project.store.export.missing_chunk_in_cidstore" *
TEST_CASE("project.store.export.large_file_attachment_direct")
{
- // A file larger than 2 × MaxChunkEmbedSize is classified as a direct large attachment
- // without a compression attempt (line 2364–2368: OnLargeAttachment via raw path).
- // Round-trip via LoadOplog must recover the data.
+ // File > 2 x MaxChunkEmbedSize: classified as a direct large attachment (no compression attempt). Round-trip must succeed.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -5733,7 +5660,7 @@ TEST_CASE("project.store.export.large_file_attachment_direct")
std::filesystem::path RootDir = TempDir.Path() / "root";
- // 96 KB > 2 × 32 KB = 64 KB — triggers the "direct large attachment" path.
+ // 96 KB > 2 x 32 KB -> direct large attachment.
auto FileAtts = CreateFileAttachments(RootDir, std::initializer_list<size_t>{96u * 1024u});
Ref<ProjectStore::Oplog> Oplog = Project->NewOplog("oplog_large_direct", {});
@@ -5776,10 +5703,8 @@ TEST_CASE("project.store.export.large_file_attachment_direct")
TEST_CASE("project.store.export.large_file_attachment_via_temp")
{
- // A file whose raw size is between MaxChunkEmbedSize and 2 × MaxChunkEmbedSize is
- // compressed to a temp buffer. If the compressed form is still larger than
- // MaxChunkEmbedSize (incompressible random data), OnLargeAttachment is called with
- // the temp buffer (line 2446–2448). Round-trip must succeed.
+ // File with MaxChunkEmbedSize < size <= 2xMaxChunkEmbedSize: compressed to a temp buffer;
+ // if still large (incompressible), goes to OnLargeAttachment. Round-trip must succeed.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -5792,9 +5717,7 @@ TEST_CASE("project.store.export.large_file_attachment_via_temp")
std::filesystem::path RootDir = TempDir.Path() / "root";
- // 48 KB: 32 KB < 48 KB ≤ 64 KB — triggers the temp-compression path.
- // CreateFileAttachments uses CreateRandomBlob which produces incompressible data,
- // so the compressed result stays > 32 KB and OnLargeAttachment fires at line 2448.
+ // 48 KB: 32 KB < 48 KB <= 64 KB -> temp-compression path; incompressible data stays > 32 KB.
auto FileAtts = CreateFileAttachments(RootDir, std::initializer_list<size_t>{48u * 1024u});
Ref<ProjectStore::Oplog> Oplog = Project->NewOplog("oplog_large_via_temp", {});
@@ -5837,9 +5760,7 @@ TEST_CASE("project.store.export.large_file_attachment_via_temp")
TEST_CASE("project.store.export.large_chunk_from_cidstore")
{
- // A bulk-data attachment stored in CidStore whose decompressed size exceeds
- // MaxChunkEmbedSize goes through OnLargeAttachment (line 2499–2501).
- // Round-trip must recover the attachment.
+ // Bulkdata attachment in CidStore with compressed size > MaxChunkEmbedSize -> OnLargeAttachment. Round-trip must succeed.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -5850,8 +5771,7 @@ TEST_CASE("project.store.export.large_chunk_from_cidstore")
std::unique_ptr<ProjectStore> ProjectStoreDummy;
Ref<ProjectStore::Project> Project = MakeTestProject(CidStore, Gc, TempDir.Path(), ProjectStoreDummy);
- // 64 KB stored with OodleCompressionLevel::None so the compressed payload in CidStore
- // is also ≈ 64 KB > MaxChunkEmbedSize = 32 KB.
+ // 64 KB with None encoding -> compressed ~ 64 KB > MaxChunkEmbedSize = 32 KB.
auto Attachments = CreateAttachments(std::initializer_list<size_t>{64u * 1024u}, OodleCompressionLevel::None);
Ref<ProjectStore::Oplog> Oplog = Project->NewOplog("oplog_large_cid", {});
@@ -5894,10 +5814,7 @@ TEST_CASE("project.store.export.large_chunk_from_cidstore")
TEST_CASE("project.store.export.block_reuse")
{
- // Export an oplog that creates at least one block, then export the same oplog again
- // to the same remote store. On the second call GetKnownBlocks returns the blocks
- // created by the first export, FindReuseBlocks matches their chunk hashes, and those
- // hashes are erased from UploadAttachments (lines 2302–2307).
+ // Second export to the same store: FindReuseBlocks matches existing blocks; no new blocks are written.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -5908,10 +5825,7 @@ TEST_CASE("project.store.export.block_reuse")
std::unique_ptr<ProjectStore> ProjectStoreDummy;
Ref<ProjectStore::Project> Project = MakeTestProject(CidStore, Gc, TempDir.Path(), ProjectStoreDummy);
- // Two 20 KB attachments: compressed size ≈ 20 KB + header < MaxChunkEmbedSize = 32 KB so
- // they stay in UploadAttachments (not moved to LargeChunkHashes) and are assembled into a
- // block by ComposeBlocks. OodleCompressionLevel::None is used so the compressed size stays
- // close to the raw size and does not shrink below MaxChunkEmbedSize unexpectedly.
+ // 20 KB with None encoding: compressed ~ 20 KB < MaxChunkEmbedSize = 32 KB -> goes into a block.
Ref<ProjectStore::Oplog> Oplog = Project->NewOplog("oplog_reuse", {});
REQUIRE(Oplog);
Oplog->AppendNewOplogEntry(CreateBulkDataOplogPackage(
@@ -5925,7 +5839,6 @@ TEST_CASE("project.store.export.block_reuse")
constexpr size_t MaxChunkEmbedSize = 32u * 1024u;
constexpr size_t MaxBlockSize = 64u * 1024u;
- // First export.
std::shared_ptr<RemoteProjectStore> RemoteStore;
RunSaveOplog(CidStore,
*Project,
@@ -5953,8 +5866,6 @@ TEST_CASE("project.store.export.block_reuse")
BlockHashesAfterFirst.push_back(B.BlockHash);
}
- // Second export on the same remote store. FindReuseBlocks must match all blocks
- // from the first export, so no new block files are written.
SaveOplog(CidStore,
*RemoteStore,
*Project,
@@ -5977,7 +5888,6 @@ TEST_CASE("project.store.export.block_reuse")
BlockHashesAfterSecond.push_back(B.BlockHash);
}
- // No new blocks were created; the known set must be identical.
std::sort(BlockHashesAfterFirst.begin(), BlockHashesAfterFirst.end());
std::sort(BlockHashesAfterSecond.begin(), BlockHashesAfterSecond.end());
CHECK(BlockHashesAfterFirst == BlockHashesAfterSecond);
@@ -5985,10 +5895,7 @@ TEST_CASE("project.store.export.block_reuse")
TEST_CASE("project.store.export.max_chunks_per_block")
{
- // When a single op has more chunk-sized attachments than MaxChunksPerBlock, the block
- // assembly loop fills the per-block limit and breaks (line 2778–2781), forcing the
- // overflow into a new block. With MaxChunksPerBlock=2 and three attachments from
- // one op, at least 2 blocks must be produced.
+ // MaxChunksPerBlock=2 with 3 attachments from one op -> at least 2 blocks produced.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -5999,10 +5906,7 @@ TEST_CASE("project.store.export.max_chunks_per_block")
std::unique_ptr<ProjectStore> ProjectStoreDummy;
Ref<ProjectStore::Project> Project = MakeTestProject(CidStore, Gc, TempDir.Path(), ProjectStoreDummy);
- // Three 2 KB attachments: compressed size ≈ 2 KB + header < MaxChunkEmbedSize = 4 KB so
- // they stay in UploadAttachments and enter block assembly via ComposeBlocks.
- // Attachments > MaxChunkEmbedSize would go to LargeChunkHashes (loose chunks) and bypass
- // block assembly entirely.
+ // 2 KB with None encoding: compressed ~ 2 KB < MaxChunkEmbedSize = 4 KB -> enters block assembly.
Ref<ProjectStore::Oplog> Oplog = Project->NewOplog("oplog_max_chunks", {});
REQUIRE(Oplog);
Oplog->AppendNewOplogEntry(CreateBulkDataOplogPackage(
@@ -6038,7 +5942,6 @@ TEST_CASE("project.store.export.max_chunks_per_block")
RemoteProjectStore::GetKnownBlocksResult KnownBlocks = RemoteStore->GetKnownBlocks();
CHECK(KnownBlocks.Blocks.size() >= 2);
- // Round-trip.
Ref<ProjectStore::Oplog> ImportOplog = Project->NewOplog("oplog_max_chunks_import", {});
LoadOplog(LoadOplogContext{.ChunkStore = CidStore,
.RemoteStore = *RemoteStore,
@@ -6051,13 +5954,9 @@ TEST_CASE("project.store.export.max_chunks_per_block")
TEST_CASE("project.store.export.max_data_per_block")
{
- // Validates that ComposeBlocks respects UsableBlockSize = MaxBlockSize - MaxHeaderSize.
- // With MaxBlockSize=7168 and MaxChunksPerBlock=32:
- // MaxHeaderSize = GetHeaderSizeForNoneEncoder()(64) + MeasureVarUInt(32)(1)
- // + MeasureVarUInt(7168)(2) * 32 = 129
- // UsableBlockSize = 7168 - 129 = 7039
- // Oids[1] small attachments total 7041 compressed bytes, which is > UsableBlockSize (7039)
- // but <= MaxBlockSize (7168), specifically exercising the MaxHeaderSize boundary.
+ // Verifies ComposeBlocks respects UsableBlockSize = MaxBlockSize - MaxHeaderSize.
+ // With MaxBlockSize=7168, MaxChunksPerBlock=32: MaxHeaderSize=129, UsableBlockSize=7039.
+ // Oids[1] contributes 7041 compressed bytes (> 7039) to force a block boundary at that exact limit.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -6071,7 +5970,6 @@ TEST_CASE("project.store.export.max_data_per_block")
Ref<ProjectStore::Oplog> Oplog = Project->NewOplog("oplog_max_data_per_block", {});
REQUIRE(Oplog);
- // Set up a oid and block size scenario to trigger the various conditions in ComposeBlocks
std::vector<Oid> Oids;
Oids.push_back(Oid::NewOid());
Oids.push_back(Oid::NewOid());
@@ -6127,7 +6025,6 @@ TEST_CASE("project.store.export.max_data_per_block")
RemoteProjectStore::GetKnownBlocksResult KnownBlocks = RemoteStore->GetKnownBlocks();
CHECK(KnownBlocks.Blocks.size() >= 2);
- // Round-trip.
Ref<ProjectStore::Oplog> ImportOplog = Project->NewOplog("oplog_max_data_per_block_import", {});
LoadOplog(LoadOplogContext{.ChunkStore = CidStore,
.RemoteStore = *RemoteStore,
@@ -6140,10 +6037,8 @@ TEST_CASE("project.store.export.max_data_per_block")
TEST_CASE("project.store.export.file_deleted_between_phases")
{
- // Scenario: a file attachment exists when the building phase (RewriteOp) runs but
- // is deleted before the AllowChunking workers call MakeFromFile. This reproduces
- // the real-world case where the oplog and external files have different lifetimes.
- // With IgnoreMissingAttachments=true the export continues and line 2088 is hit.
+ // File exists during RewriteOp but is deleted before AllowChunking workers run.
+ // With IgnoreMissingAttachments=true the export continues.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -6162,16 +6057,13 @@ TEST_CASE("project.store.export.file_deleted_between_phases")
REQUIRE(Oplog);
Oplog->AppendNewOplogEntry(CreateFilesOplogPackage(Oid::NewOid(), RootDir, FileAtts));
- // Collect filesystem paths so the custom context can delete them.
std::vector<std::filesystem::path> FilePaths;
for (const auto& [Id, Path] : FileAtts)
{
FilePaths.push_back(Path);
}
- // Custom context: when the "Rewrote" message arrives (end of building phase,
- // before AllowChunking workers are dispatched) delete the files so that
- // MakeFromFile returns empty and line 2088 is hit deterministically.
+ // Deletes files when "Rewrote" arrives, before AllowChunking workers run.
struct DeleteOnRewriteContext : public CapturingJobContext
{
std::vector<std::filesystem::path>* Paths = nullptr;
@@ -6211,10 +6103,7 @@ TEST_CASE("project.store.export.file_deleted_between_phases")
&Ctx,
/*ForceDisableBlocks=*/false);
- // The AllowChunking phase cleanup (line 2195-2205) emits "Missing attachment" for each hash
- // whose MakeFromFile call failed at line 2088.
CHECK(Ctx.HasMessage("Missing attachment"));
- // The files should have been deleted by the context callback.
for (const auto& P : FilePaths)
{
CHECK(!std::filesystem::exists(P));
@@ -6223,12 +6112,8 @@ TEST_CASE("project.store.export.file_deleted_between_phases")
TEST_CASE("project.store.embed_loose_files_zero_data_hash")
{
- // File entries carry a "data" field of CbFieldType::Hash(IoHash::Zero) — a
- // declared-but-unresolved marker. CbFieldView::AsHash() returns Zero for a Hash
- // field whose stored value is zero, so RewriteOp enters the rewrite path and reads
- // the file from disk. RewriteCbObject then iterates the entry's fields; when it
- // reaches the "data" field the callback returns true at line 1858 (excluding it from
- // the field copy before the resolved BinaryAttachment is added at line 1862).
+ // File-op entries with "data": IoHash::Zero (unresolved marker) trigger RewriteOp to
+ // read from disk and replace with a resolved BinaryAttachment.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -6268,7 +6153,6 @@ TEST_CASE("project.store.embed_loose_files_zero_data_hash")
/*ForceDisableBlocks=*/false,
&RemoteStore);
- // Round-trip: the resolved attachment data must be recoverable.
Ref<ProjectStore::Oplog> ImportOplog = Project->NewOplog("oplog_zero_data_hash_import", {});
LoadOplog(LoadOplogContext{.ChunkStore = CidStore,
.RemoteStore = *RemoteStore,
@@ -6281,12 +6165,8 @@ TEST_CASE("project.store.embed_loose_files_zero_data_hash")
TEST_CASE("project.store.embed_loose_files_already_resolved")
{
- // After an export → import round-trip, the local oplog contains "files" entries with
- // "data": BinaryAttachment(H) already set by the first export's RewriteOp. On the
- // second export with EmbedLooseFiles=true, RewriteOp finds DataHash != Zero for those
- // entries → CopyField remains true → line 1873 (Cbo.AddField(Field)) is hit.
- // The attachment data is available in CidStore from the import, so the re-export
- // succeeds end-to-end.
+ // After an export->import round-trip, oplog entries carry resolved "data": BinaryAttachment(H).
+ // A re-export must preserve those fields without re-reading from disk.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -6309,8 +6189,6 @@ TEST_CASE("project.store.embed_loose_files_already_resolved")
WorkerThreadPool& NetworkPool = Pools.NetworkPool;
WorkerThreadPool& WorkerPool = Pools.WorkerPool;
- // First export: RewriteOp hashes the files and rewrites each entry to include
- // "data": BinaryAttachment(H).
std::shared_ptr<RemoteProjectStore> RemoteStore1;
RunSaveOplog(CidStore,
*Project,
@@ -6329,8 +6207,6 @@ TEST_CASE("project.store.embed_loose_files_already_resolved")
/*ForceDisableBlocks=*/false,
&RemoteStore1);
- // Import: WriteOplogSection copies the rewritten ops (with pre-resolved "data"
- // BinaryAttachment fields) into the local oplog; attachment data lands in CidStore.
Ref<ProjectStore::Oplog> ImportOplog = Project->NewOplog("oplog_already_resolved_import", {});
LoadOplog(LoadOplogContext{.ChunkStore = CidStore,
.RemoteStore = *RemoteStore1,
@@ -6340,9 +6216,6 @@ TEST_CASE("project.store.embed_loose_files_already_resolved")
.IgnoreMissingAttachments = false,
.PartialBlockRequestMode = EPartialBlockRequestMode::Mixed});
- // Re-export: the imported oplog ops carry "data": BinaryAttachment(H) (DataHash != Zero).
- // RewriteOp copies those entries via line 1873 without re-reading files from disk.
- // The attachment data is fetched from CidStore in the AllowChunking phase.
RunSaveOplog(CidStore,
*Project,
*ImportOplog,
@@ -6419,7 +6292,6 @@ TEST_CASE("project.store.import.missing_attachment")
SUBCASE("throws_when_not_ignored")
{
- // IgnoreMissingAttachments=false: LoadOplog must throw RemoteStoreError.
Ref<ProjectStore::Oplog> ImportOplog = Project->NewOplog("oplog_missing_att_throw", {});
REQUIRE(ImportOplog);
CapturingJobContext Ctx;
@@ -6437,7 +6309,6 @@ TEST_CASE("project.store.import.missing_attachment")
SUBCASE("succeeds_when_ignored")
{
- // IgnoreMissingAttachments=true: LoadOplog must succeed and report the failure.
Ref<ProjectStore::Oplog> ImportOplog = Project->NewOplog("oplog_missing_att_ignore", {});
REQUIRE(ImportOplog);
CapturingJobContext Ctx;
@@ -6456,9 +6327,7 @@ TEST_CASE("project.store.import.missing_attachment")
TEST_CASE("project.store.import.error.load_container_failure")
{
- // Point a FileRemoteProjectStore at a nonexistent directory so that
- // LoadContainer() returns a non-zero ErrorCode, then verify that
- // LoadOplog throws a RemoteStoreError.
+ // LoadContainer() on a nonexistent path returns non-zero ErrorCode -> LoadOplog throws RemoteStoreError.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -6468,7 +6337,6 @@ TEST_CASE("project.store.import.error.load_container_failure")
std::unique_ptr<ProjectStore> ProjectStoreDummy;
Ref<ProjectStore::Project> Project = MakeTestProject(CidStore, Gc, TempDir.Path(), ProjectStoreDummy);
- // Point to a path that does not contain a valid oplog container.
std::filesystem::path NonExistentPath = TempDir.Path() / "does_not_exist";
FileRemoteStoreOptions Options = {RemoteStoreOptions{.MaxBlockSize = 64u * 1024u,
.MaxChunksPerBlock = 1000,
@@ -6503,9 +6371,7 @@ TEST_CASE("project.store.import.error.load_container_failure")
TEST_CASE("project.store.blockcomposer.path_a_standalone_block")
{
- // Path A: a single op with exactly MaxChunksPerBlock (4) chunks.
- // The gather loop sets CurrentOpFillFullBlock=true when the count reaches MaxChunksPerBlock.
- // Path A emits the gathered set as a standalone block immediately, leaving pending untouched.
+ // Path A: one op with exactly MaxChunksPerBlock chunks -> emitted as a standalone block without merging into pending.
using namespace projectstore_testutils;
constexpr uint64_t UsableSize = 1000;
@@ -6552,9 +6418,7 @@ TEST_CASE("project.store.blockcomposer.path_b_fits_pending")
TEST_CASE("project.store.blockcomposer.path_b_exact_count_fill")
{
- // Path B with exact count fill: two ops each contributing 2 chunks, MaxChunks=4.
- // After the second op merges, PendingChunkHashes.size() == MaxChunksPerBlock, so Path B
- // flushes immediately -- no separate final flush is needed.
+ // Path B: pending reaches MaxChunksPerBlock exactly -> immediate flush, no separate final flush.
using namespace projectstore_testutils;
constexpr uint64_t UsableSize = 1000;
@@ -6578,12 +6442,8 @@ TEST_CASE("project.store.blockcomposer.path_b_exact_count_fill")
TEST_CASE("project.store.blockcomposer.path_c_75pct_flush")
{
- // Path C: the pending block is >75% full by bytes when a new op arrives that does not fit.
- // The pending block is flushed first; the new op chunk is then placed via Path B, and
- // emitted by the final flush.
- //
- // UsableSize=100 so that 75% threshold=75. MaxChunkEmbedSize is fixed at 100 by MakeTestConfig,
- // so Op1=80 bytes exceeds the 75% threshold within a single chunk.
+ // Path C: pending is >75% full when the next op doesn't fit -> pending flushed first, new op placed via Path B.
+ // UsableSize=100, threshold=75 bytes; Op1=80 bytes > 75%.
using namespace projectstore_testutils;
constexpr uint64_t UsableSize = 100; // 75% threshold = 75 bytes
@@ -6602,7 +6462,6 @@ TEST_CASE("project.store.blockcomposer.path_c_75pct_flush")
std::vector<std::vector<IoHash>> Blocks;
Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
- // Block 1: Path C flush of Op1 chunk. Block 2: final flush of Op2 chunk.
REQUIRE(Blocks.size() == 2);
CHECK(Blocks[0].size() == 1);
CHECK(Blocks[0][0] == MakeTestHash(1));
@@ -6612,10 +6471,7 @@ TEST_CASE("project.store.blockcomposer.path_c_75pct_flush")
TEST_CASE("project.store.blockcomposer.path_d_partial_fill")
{
- // Path D: the pending block is <=75% full by bytes but chunk count is the binding constraint.
- // Pending has 3 chunks; the incoming op has 2 chunks that would exceed MaxChunks=4.
- // Path D greedily adds 1 chunk to fill pending to capacity, flushes, then places the
- // remaining 1 chunk in the new pending block (emitted by the final flush).
+ // Path D: pending <=75% full but chunk count is the binding constraint. Greedy fill adds chunks until count capacity, then flushes.
using namespace projectstore_testutils;
constexpr uint64_t UsableSize = 1000; // 75% threshold = 750 bytes
@@ -6635,8 +6491,6 @@ TEST_CASE("project.store.blockcomposer.path_d_partial_fill")
std::vector<std::vector<IoHash>> Blocks;
Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
- // Block 1: [op1_c0, op1_c1, op1_c2, op2_c0] (Path D fills and flushes).
- // Block 2: [op2_c1] (final flush).
REQUIRE(Blocks.size() == 2);
CHECK(Blocks[0].size() == 4);
CHECK(Blocks[0][0] == MakeTestHash(1));
@@ -6670,15 +6524,13 @@ TEST_CASE("project.store.blockcomposer.cancellation")
std::vector<std::vector<IoHash>> Blocks;
Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
- // Op1 fills a block (Path A) before the second cancellation check fires.
REQUIRE(Blocks.size() == 1);
CHECK(Blocks[0].size() == 4);
}
TEST_CASE("project.store.blockcomposer.final_flush")
{
- // Three single-chunk ops all merge into the pending block via Path B without triggering
- // any mid-stream flush. The single pending block is emitted by the final flush.
+ // Three ops with all chunks fitting in pending (no mid-stream flush) -> single block from final flush.
using namespace projectstore_testutils;
constexpr uint64_t UsableSize = 1000;
@@ -6704,14 +6556,9 @@ TEST_CASE("project.store.blockcomposer.final_flush")
TEST_CASE("project.store.blockcomposer.path_b_b_c")
{
- // Multi-step: Path B -> Path B -> Path C
- // Two ops build pending past the 75% byte threshold. The third op triggers Path C,
- // flushing the first block. The third op chunk is then placed via Path B and emitted
- // by the final flush.
+ // Path B -> Path B -> Path C: two ops accumulate past 75% threshold; third op triggers Path C flush.
+ // UsableSize=200, threshold=150; two ops of 90 bytes each accumulate 180 bytes, exceeding threshold.
using namespace projectstore_testutils;
-
- // UsableSize=200 so that 75% threshold=150. MaxChunkEmbedSize is fixed at 100 by MakeTestConfig,
- // so two ops of 90 bytes each accumulate 180 bytes in pending, exceeding the threshold.
constexpr uint64_t UsableSize = 200; // 75% threshold = 150 bytes
constexpr uint64_t MaxChunks = 8;
remotestore_impl::BlockComposer Composer(MakeTestConfig(UsableSize, MaxChunks));
@@ -6740,9 +6587,7 @@ TEST_CASE("project.store.blockcomposer.path_b_b_c")
TEST_CASE("project.store.blockcomposer.path_a_b_final_flush")
{
- // Multi-step: Path A -> Path B -> final flush
- // The first op exactly fills a block (count-saturated, Path A). The second op fits into
- // the now-empty pending block via Path B and is emitted by the final flush.
+ // Path A -> Path B -> final flush: first op count-saturates -> standalone block, second op placed via Path B.
using namespace projectstore_testutils;
constexpr uint64_t UsableSize = 1000;
@@ -6760,7 +6605,6 @@ TEST_CASE("project.store.blockcomposer.path_a_b_final_flush")
std::vector<std::vector<IoHash>> Blocks;
Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
- // Block 1: Path A standalone (all 4 Op1 chunks). Block 2: final flush of Op2 (2 chunks).
REQUIRE(Blocks.size() == 2);
CHECK(Blocks[0].size() == 4);
CHECK(Blocks[0][0] == MakeTestHash(1));
@@ -6772,7 +6616,7 @@ TEST_CASE("project.store.blockcomposer.path_a_b_final_flush")
TEST_CASE("project.store.blockcomposer.empty_input")
{
- // Compose called with zero attachments emits no blocks and does not invoke the final flush.
+ // Zero attachments -> no blocks emitted.
using namespace projectstore_testutils;
constexpr uint64_t UsableSize = 1000;
@@ -6787,8 +6631,7 @@ TEST_CASE("project.store.blockcomposer.empty_input")
TEST_CASE("project.store.blockcomposer.single_attachment")
{
- // A single chunk from a single op: the gather inner loop never runs.
- // Path B places it into the empty pending block; the final flush emits it.
+ // Single chunk -> Path B into empty pending, final flush emits it.
using namespace projectstore_testutils;
constexpr uint64_t UsableSize = 1000;
@@ -6810,11 +6653,7 @@ TEST_CASE("project.store.blockcomposer.single_attachment")
TEST_CASE("project.store.blockcomposer.path_a_size_saturation")
{
- // Path A triggered by size overflow in the gather phase (not count).
- // Op1 has 2 chunks of 60 bytes; adding the second would exceed UsableSize=100.
- // The gather loop sets CurrentOpFillFullBlock=true after collecting only the first chunk,
- // which Path A emits as a standalone block. The second chunk is then processed in the next
- // outer-loop iteration and placed via Path B, emitted by the final flush.
+ // Path A by size overflow: 60+60 > UsableSize=100; first chunk emitted standalone, second via Path B.
using namespace projectstore_testutils;
constexpr uint64_t UsableSize = 100; // MaxChunkEmbedSize=100; two 60-byte chunks overflow
@@ -6830,7 +6669,6 @@ TEST_CASE("project.store.blockcomposer.path_a_size_saturation")
std::vector<std::vector<IoHash>> Blocks;
Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
- // Block 1: Path A standalone (chunk0 only). Block 2: final flush of chunk1.
REQUIRE(Blocks.size() == 2);
CHECK(Blocks[0].size() == 1);
CHECK(Blocks[0][0] == MakeTestHash(1));
@@ -6840,10 +6678,7 @@ TEST_CASE("project.store.blockcomposer.path_a_size_saturation")
TEST_CASE("project.store.blockcomposer.path_b_exact_size_fill")
{
- // Path B immediate flush triggered by exact byte fill (PendingBlockSize == UsableBlockSize),
- // as opposed to the count-fill case covered by path_b_exact_count_fill.
- // Op1 adds 60 bytes; Op2 adds the remaining 40 bytes to exactly reach UsableSize=100.
- // Path B flushes immediately after Op2 merges; no separate final flush is needed.
+ // Path B immediate flush when pending reaches UsableBlockSize exactly (vs count-fill in path_b_exact_count_fill).
using namespace projectstore_testutils;
constexpr uint64_t UsableSize = 100;
@@ -6869,16 +6704,7 @@ TEST_CASE("project.store.blockcomposer.path_b_exact_size_fill")
TEST_CASE("project.store.blockcomposer.path_d_size_limited_greedy")
{
- // Path D where the greedy fill loop stops because the next chunk would exceed
- // UsableBlockSize by bytes (not because count reaches MaxChunksPerBlock).
- //
- // UsableSize=200, MaxChunks=8 (high count cap to ensure size is the binding constraint).
- // Op1 puts 90 bytes in pending. Op2 has 3x60-byte chunks (total 180); they don't fit
- // (90+180=270>200) and pending is <=75% full (90<=150) -> Path D.
- // Greedy loop adds Op2[0] (90+60=150<=200), then Op2[1] would overflow (150+60=210>200)
- // -> breaks on size. Pending [Op1,Op2[0]] is flushed as block 1.
- // Op2[1] and Op2[2] (120 bytes total) fit in the empty pending via Path B -> final flush
- // -> block 2.
+ // Path D where greedy fill is limited by size (not count). MaxChunks=8 ensures size is binding.
using namespace projectstore_testutils;
constexpr uint64_t UsableSize = 200; // 75% threshold = 150 bytes
@@ -6894,7 +6720,6 @@ TEST_CASE("project.store.blockcomposer.path_d_size_limited_greedy")
std::vector<std::vector<IoHash>> Blocks;
Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
- // Block 1: [Op1, Op2[0]] (Path D size-limited flush). Block 2: [Op2[1], Op2[2]] (final flush).
REQUIRE(Blocks.size() == 2);
CHECK(Blocks[0].size() == 2);
CHECK(Blocks[0][0] == MakeTestHash(1));
@@ -6906,10 +6731,7 @@ TEST_CASE("project.store.blockcomposer.path_d_size_limited_greedy")
TEST_CASE("project.store.blockcomposer.path_a_pending_untouched")
{
- // Path A leaves the pending block untouched. Op1 accumulates 2 chunks in pending via
- // Path B. Op2 then count-saturates the gather phase (4 chunks == MaxChunksPerBlock),
- // triggering Path A which emits Op2 as a standalone block without disturbing pending.
- // The final flush then emits Op1's 2 chunks still held in pending.
+ // Path A leaves pending untouched: Op1 in pending, Op2 count-saturates -> standalone block. Final flush emits Op1.
using namespace projectstore_testutils;
constexpr uint64_t UsableSize = 1000;
@@ -6929,7 +6751,6 @@ TEST_CASE("project.store.blockcomposer.path_a_pending_untouched")
std::vector<std::vector<IoHash>> Blocks;
Composer.Compose(Hashes, Sizes, Keys, [&](std::vector<IoHash>&& B) { Blocks.push_back(std::move(B)); });
- // Block 1: Path A standalone (Op2's 4 chunks). Block 2: final flush of Op1's 2 pending chunks.
REQUIRE(Blocks.size() == 2);
CHECK(Blocks[0].size() == 4);
CHECK(Blocks[0][0] == MakeTestHash(3));
@@ -6940,15 +6761,12 @@ TEST_CASE("project.store.blockcomposer.path_a_pending_untouched")
}
// ---------------------------------------------------------------------------
-// BuildContainer-direct tests (Steps 2a–2l)
+// BuildContainer-direct tests
// ---------------------------------------------------------------------------
TEST_CASE("buildcontainer.public_overload_smoke")
{
- // Smoke test for the public BuildContainer overload.
- // One bulkdata op with a 1 KB attachment (< MaxChunkEmbedSize → goes into a block).
- // Verifies the public overload compiles and runs, that the result is non-empty, and
- // that AsyncOnBlock fires at least once.
+ // Verifies the public BuildContainer overload runs successfully and calls AsyncOnBlock.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -6988,9 +6806,7 @@ TEST_CASE("buildcontainer.public_overload_smoke")
TEST_CASE("buildcontainer.build_blocks_false_on_block_chunks")
{
- // With BuildBlocks=false, small attachments that would normally be grouped into a block
- // are instead delivered to OnBlockChunks (line ~2792).
- // AsyncOnBlock must NOT fire.
+ // BuildBlocks=false: small attachments go to OnBlockChunks instead of AsyncOnBlock.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -7034,8 +6850,8 @@ TEST_CASE("buildcontainer.build_blocks_false_on_block_chunks")
TEST_CASE("buildcontainer.ignore_missing_binary_attachment_warn")
{
// A bulk-data op references a hash that is absent from CidStore.
- // SUBCASE warn: IgnoreMissingAttachments=true → ReportMessage("Missing attachment …").
- // SUBCASE throw: IgnoreMissingAttachments=false → std::runtime_error.
+ // SUBCASE warn: IgnoreMissingAttachments=true -> ReportMessage("Missing attachment ...").
+ // SUBCASE throw: IgnoreMissingAttachments=false -> std::runtime_error.
using namespace projectstore_testutils;
using namespace std::literals;
@@ -7120,8 +6936,8 @@ TEST_CASE("buildcontainer.ignore_missing_binary_attachment_warn")
TEST_CASE("buildcontainer.ignore_missing_file_attachment_warn")
{
// File attachments are created on disk then deleted before BuildContainer runs.
- // SUBCASE warn: IgnoreMissingAttachments=true → ReportMessage("Missing attachment …").
- // SUBCASE throw: IgnoreMissingAttachments=false → exception.
+ // SUBCASE warn: IgnoreMissingAttachments=true -> ReportMessage("Missing attachment ...").
+ // SUBCASE throw: IgnoreMissingAttachments=false -> exception.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -7234,15 +7050,10 @@ TEST_CASE("buildcontainer.embed_loose_files_false_no_rewrite")
TEST_CASE("buildcontainer.allow_chunking_false")
{
- // When AllowChunking=false, a bulkdata attachment that exceeds ChunkFileSizeLimit
- // is NOT split into chunks and goes directly to OnLargeAttachment.
- // SUBCASE false: verify OnLargeAttachment fires.
- // SUBCASE true: verify OnLargeAttachment does NOT fire for the same data (gets chunked
- // into smaller pieces → AsyncOnBlock or OnBlockChunks fires instead).
- //
- // Parameters chosen so that the 4 KB attachment:
- // - exceeds MaxChunkEmbedSize (2 KB) → classified as large
- // - exceeds ChunkFileSizeLimit (1 KB) → eligible for chunking when AllowChunking=true
+ // AllowChunking=false: attachments exceeding ChunkFileSizeLimit skip chunking -> OnLargeAttachment.
+ // AllowChunking=true: same data is chunked, but chunk still exceeds MaxChunkEmbedSize -> OnLargeAttachment;
+ // exercises the AllowChunking branch in FindChunkSizes.
+ // 4 KB attachment: > MaxChunkEmbedSize (2 KB) and > ChunkFileSizeLimit (1 KB).
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -7252,8 +7063,7 @@ TEST_CASE("buildcontainer.allow_chunking_false")
std::unique_ptr<ProjectStore> ProjectStoreDummy;
Ref<ProjectStore::Project> Project = MakeTestProject(CidStore, Gc, TempDir.Path(), ProjectStoreDummy);
- // Use OodleCompressionLevel::None so the compressed size stays ≈ raw size (4 KB),
- // ensuring it exceeds both MaxChunkEmbedSize (2 KB) and ChunkFileSizeLimit (1 KB).
+ // None encoding: compressed ~ 4 KB > MaxChunkEmbedSize (2 KB) and ChunkFileSizeLimit (1 KB).
Ref<ProjectStore::Oplog> Oplog = Project->NewOplog("bc_allow_chunk", {});
REQUIRE(Oplog);
Oplog->AppendNewOplogEntry(
@@ -7289,10 +7099,7 @@ TEST_CASE("buildcontainer.allow_chunking_false")
SUBCASE("allow_chunking_true")
{
- // With AllowChunking=true the chunking branch in FindChunkSizes IS taken.
- // The resulting chunk (still ~4 KB) exceeds MaxChunkEmbedSize (2 KB) so it
- // still ends up in OnLargeAttachment — the value of this subcase is exercising
- // the `AllowChunking && Size > ChunkFileSizeLimit` branch in FindChunkSizes.
+ // Chunking branch in FindChunkSizes is taken, but the ~4 KB chunk still exceeds MaxChunkEmbedSize -> OnLargeAttachment.
std::atomic<int> LargeAttachmentCallCount{0};
BuildContainer(
CidStore,
@@ -7356,9 +7163,7 @@ TEST_CASE("buildcontainer.async_on_block_exception_propagates")
TEST_CASE("buildcontainer.on_large_attachment_exception_propagates")
{
- // If OnLargeAttachment throws, the exception must propagate out of BuildContainer.
- // A 64 KB attachment with MaxChunkEmbedSize=32 KB exceeds the embed limit so it
- // goes to OnLargeAttachment.
+ // OnLargeAttachment exception must propagate. 64 KB with MaxChunkEmbedSize=32 KB -> OnLargeAttachment.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -7370,7 +7175,7 @@ TEST_CASE("buildcontainer.on_large_attachment_exception_propagates")
Ref<ProjectStore::Oplog> Oplog = Project->NewOplog("bc_large_exc", {});
REQUIRE(Oplog);
- // 64 KB with OodleCompressionLevel::None → compressed ≈ 64 KB > MaxChunkEmbedSize (32 KB).
+ // 64 KB with OodleCompressionLevel::None -> compressed ~ 64 KB > MaxChunkEmbedSize (32 KB).
Oplog->AppendNewOplogEntry(
CreateBulkDataOplogPackage(Oid::NewOid(),
CreateAttachments(std::initializer_list<size_t>{64u * 1024u}, OodleCompressionLevel::None)));
@@ -7398,8 +7203,7 @@ TEST_CASE("buildcontainer.on_large_attachment_exception_propagates")
TEST_CASE("buildcontainer.context_cancellation_aborts")
{
- // With IsCancelled() returning true from the start, BuildContainer must not crash
- // or throw unhandled — it returns an empty result or exits cleanly.
+ // IsCancelled() returns true from the start; BuildContainer must not crash or throw.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -7420,7 +7224,6 @@ TEST_CASE("buildcontainer.context_cancellation_aborts")
CapturingJobContext Ctx;
Ctx.m_Cancel = true;
- // Must not throw or crash.
CHECK_NOTHROW(BuildContainer(
CidStore,
*Project,
@@ -7443,8 +7246,7 @@ TEST_CASE("buildcontainer.context_cancellation_aborts")
TEST_CASE("buildcontainer.context_progress_reporting")
{
- // ReportProgress is called at least once by BuildContainer (the "Scanning oplog" call).
- // Verifies the CapturingJobContext.ProgressMessages field is populated.
+ // BuildContainer calls ReportProgress at least once ("Scanning oplog").
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -7484,14 +7286,12 @@ TEST_CASE("buildcontainer.context_progress_reporting")
}
// ---------------------------------------------------------------------------
-// SaveOplog-focused tests (Step 3)
+// SaveOplog-focused tests
// ---------------------------------------------------------------------------
TEST_CASE("saveoplog.cancellation")
{
- // When IsCancelled() returns true from the start, SaveOplog must return without
- // throwing. The first IsCancelled check inside BuildContainer (or UploadAttachments)
- // aborts before significant work is done.
+ // IsCancelled() returns true from the start; SaveOplog must not throw.
using namespace projectstore_testutils;
ScopedTemporaryDirectory TempDir;
@@ -7531,14 +7331,13 @@ TEST_CASE("saveoplog.cancellation")
}
// ---------------------------------------------------------------------------
-// LoadOplog-focused tests (Step 4)
+// LoadOplog-focused tests
// ---------------------------------------------------------------------------
TEST_CASE("loadoplog.missing_block_attachment_ignored")
{
- // Export with ForceDisableBlocks=false so that a block (.blob) is created in the
- // remote store. Then delete the block file. With IgnoreMissingAttachments=true,
- // LoadOplog must not throw and must report the failure.
+ // Export creates a block file; deleting it then loading with IgnoreMissingAttachments=true
+ // must succeed and report the failure via "Failed to download block attachment".
using namespace projectstore_testutils;
using namespace std::literals;
@@ -7552,7 +7351,6 @@ TEST_CASE("loadoplog.missing_block_attachment_ignored")
Ref<ProjectStore::Oplog> Oplog = Project->NewOplog("oplog_missing_block", {});
REQUIRE(Oplog);
- // Four small attachments — all < MaxChunkEmbedSize (32 KB) → go into a block.
Oplog->AppendNewOplogEntry(
CreateBulkDataOplogPackage(Oid::NewOid(), CreateAttachments(std::initializer_list<size_t>{1024, 1024, 2048, 512})));
@@ -7578,7 +7376,6 @@ TEST_CASE("loadoplog.missing_block_attachment_ignored")
/*ForceDisableBlocks=*/false,
&RemoteStore);
- // Find block hashes and delete their .blob files from ExportDir.
RemoteProjectStore::GetKnownBlocksResult KnownBlocks = RemoteStore->GetKnownBlocks();
REQUIRE(KnownBlocks.ErrorCode == 0);
REQUIRE(!KnownBlocks.Blocks.empty());
@@ -7607,10 +7404,7 @@ TEST_CASE("loadoplog.missing_block_attachment_ignored")
TEST_CASE("loadoplog.clean_oplog_with_populated_cache")
{
- // First import populates the BuildStorageCache.
- // Second import with CleanOplog=true and the same non-null cache exercises the
- // OptionalCache->Flush() call at line ~4304 (currently unreached because all existing
- // CleanOplog=true callers pass OptionalCache=nullptr).
+ // Second import with CleanOplog=true and a non-null cache exercises the OptionalCache->Flush() path.
using namespace projectstore_testutils;
using namespace std::literals;
@@ -7622,7 +7416,7 @@ TEST_CASE("loadoplog.clean_oplog_with_populated_cache")
std::filesystem::path ProjectRootDir = TempDir.Path() / "game";
std::filesystem::path ProjectFilePath = TempDir.Path() / "game" / "game.uproject";
- // Export side: used only to build the remote store payload.
+ // Export side.
GcManager ExportGc;
CidStore ExportCidStore(ExportGc);
CidStoreConfiguration ExportCidConfig = {.RootDirectory = TempDir.Path() / "export_cas",
@@ -7646,7 +7440,7 @@ TEST_CASE("loadoplog.clean_oplog_with_populated_cache")
std::shared_ptr<RemoteProjectStore> RemoteStore =
SetupExportStore(ExportCidStore, *ExportProject, NetworkPool, WorkerPool, ExportDir.Path());
- // Import side: starts empty so the first import genuinely downloads everything.
+ // Import side, starts empty.
GcManager ImportGc;
CidStore ImportCidStore(ImportGc);
CidStoreConfiguration ImportCidConfig = {.RootDirectory = TempDir.Path() / "import_cas",
@@ -7667,7 +7461,6 @@ TEST_CASE("loadoplog.clean_oplog_with_populated_cache")
BuildStorageCache::Statistics CacheStats;
std::unique_ptr<BuildStorageCache> Cache = CreateInMemoryBuildStorageCache(256u, CacheStats);
- // First import: populate the cache.
{
Ref<ProjectStore::Oplog> Phase1Oplog = ImportProject->NewOplog("oplog_clean_cache_p1", {});
LoadOplog(LoadOplogContext{.ChunkStore = ImportCidStore,
@@ -7684,7 +7477,6 @@ TEST_CASE("loadoplog.clean_oplog_with_populated_cache")
.PopulateCache = true});
}
- // Second import: CleanOplog=true with a non-null cache exercises the Flush() path.
{
Ref<ProjectStore::Oplog> Phase2Oplog = ImportProject->NewOplog("oplog_clean_cache_p2", {});
CHECK_NOTHROW(LoadOplog(LoadOplogContext{.ChunkStore = ImportCidStore,