aboutsummaryrefslogtreecommitdiff
path: root/src/zenhorde/hordebundle.cpp
diff options
context:
space:
mode:
authorLiam Mitchell <[email protected]>2026-03-09 19:06:36 -0700
committerLiam Mitchell <[email protected]>2026-03-09 19:06:36 -0700
commitd1abc50ee9d4fb72efc646e17decafea741caa34 (patch)
treee4288e00f2f7ca0391b83d986efcb69d3ba66a83 /src/zenhorde/hordebundle.cpp
parentAllow requests with invalid content-types unless specified in command line or... (diff)
parentupdated chunk–block analyser (#818) (diff)
downloadzen-d1abc50ee9d4fb72efc646e17decafea741caa34.tar.xz
zen-d1abc50ee9d4fb72efc646e17decafea741caa34.zip
Merge branch 'main' into lm/restrict-content-type
Diffstat (limited to 'src/zenhorde/hordebundle.cpp')
-rw-r--r--src/zenhorde/hordebundle.cpp619
1 files changed, 619 insertions, 0 deletions
diff --git a/src/zenhorde/hordebundle.cpp b/src/zenhorde/hordebundle.cpp
new file mode 100644
index 000000000..d3974bc28
--- /dev/null
+++ b/src/zenhorde/hordebundle.cpp
@@ -0,0 +1,619 @@
+// Copyright Epic Games, Inc. All Rights Reserved.
+
+#include "hordebundle.h"
+
+#include <zencore/basicfile.h>
+#include <zencore/filesystem.h>
+#include <zencore/fmtutils.h>
+#include <zencore/intmath.h>
+#include <zencore/iohash.h>
+#include <zencore/logging.h>
+#include <zencore/process.h>
+#include <zencore/trace.h>
+
+#include <algorithm>
+#include <chrono>
+#include <cstring>
+
+namespace zen::horde {
+
+static LoggerRef
+Log()
+{
+ static auto s_Logger = zen::logging::Get("horde.bundle");
+ return s_Logger;
+}
+
+static constexpr uint8_t PacketSignature[3] = {'U', 'B', 'N'};
+static constexpr uint8_t PacketVersion = 5;
+static constexpr int32_t CurrentPacketBaseIdx = -2;
+static constexpr int ImportBias = 3;
+static constexpr uint32_t ChunkSize = 64 * 1024; // 64KB fixed chunks
+static constexpr uint32_t LargeFileThreshold = 128 * 1024; // 128KB
+
+// BlobType: 20 bytes each = FGuid (16 bytes, 4x uint32 LE) + Version (int32 LE)
+// Values from UE SDK: GUIDs stored as 4 uint32 LE values.
+
+// ChunkLeaf v1: {0xB27AFB68, 0x4A4B9E20, 0x8A78D8A4, 0x39D49840}
+static constexpr uint8_t BlobType_ChunkLeafV1[20] = {0x68, 0xFB, 0x7A, 0xB2, 0x20, 0x9E, 0x4B, 0x4A, 0xA4, 0xD8,
+ 0x78, 0x8A, 0x40, 0x98, 0xD4, 0x39, 0x01, 0x00, 0x00, 0x00}; // version 1
+
+// ChunkInterior v2: {0xF4DEDDBC, 0x4C7A70CB, 0x11F04783, 0xB9CDCCAF}
+static constexpr uint8_t BlobType_ChunkInteriorV2[20] = {0xBC, 0xDD, 0xDE, 0xF4, 0xCB, 0x70, 0x7A, 0x4C, 0x83, 0x47,
+ 0xF0, 0x11, 0xAF, 0xCC, 0xCD, 0xB9, 0x02, 0x00, 0x00, 0x00}; // version 2
+
+// Directory v1: {0x0714EC11, 0x4D07291A, 0x8AE77F86, 0x799980D6}
+static constexpr uint8_t BlobType_DirectoryV1[20] = {0x11, 0xEC, 0x14, 0x07, 0x1A, 0x29, 0x07, 0x4D, 0x86, 0x7F,
+ 0xE7, 0x8A, 0xD6, 0x80, 0x99, 0x79, 0x01, 0x00, 0x00, 0x00}; // version 1
+
+static constexpr size_t BlobTypeSize = 20;
+
+// ─── VarInt helpers (UE format) ─────────────────────────────────────────────
+
+static size_t
+MeasureVarInt(size_t Value)
+{
+ if (Value == 0)
+ {
+ return 1;
+ }
+ return (FloorLog2(static_cast<unsigned int>(Value)) / 7) + 1;
+}
+
+static void
+WriteVarInt(std::vector<uint8_t>& Buffer, size_t Value)
+{
+ const size_t ByteCount = MeasureVarInt(Value);
+ const size_t Offset = Buffer.size();
+ Buffer.resize(Offset + ByteCount);
+
+ uint8_t* Output = Buffer.data() + Offset;
+ for (size_t i = 1; i < ByteCount; ++i)
+ {
+ Output[ByteCount - i] = static_cast<uint8_t>(Value);
+ Value >>= 8;
+ }
+ Output[0] = static_cast<uint8_t>((0xFF << (9 - static_cast<int>(ByteCount))) | static_cast<uint8_t>(Value));
+}
+
+// ─── Binary helpers ─────────────────────────────────────────────────────────
+
+static void
+WriteLE32(std::vector<uint8_t>& Buffer, int32_t Value)
+{
+ uint8_t Bytes[4];
+ memcpy(Bytes, &Value, 4);
+ Buffer.insert(Buffer.end(), Bytes, Bytes + 4);
+}
+
+static void
+WriteByte(std::vector<uint8_t>& Buffer, uint8_t Value)
+{
+ Buffer.push_back(Value);
+}
+
+static void
+WriteBytes(std::vector<uint8_t>& Buffer, const void* Data, size_t Size)
+{
+ auto* Ptr = static_cast<const uint8_t*>(Data);
+ Buffer.insert(Buffer.end(), Ptr, Ptr + Size);
+}
+
+static void
+WriteString(std::vector<uint8_t>& Buffer, std::string_view Str)
+{
+ WriteVarInt(Buffer, Str.size());
+ WriteBytes(Buffer, Str.data(), Str.size());
+}
+
+static void
+AlignTo4(std::vector<uint8_t>& Buffer)
+{
+ while (Buffer.size() % 4 != 0)
+ {
+ Buffer.push_back(0);
+ }
+}
+
+static void
+PatchLE32(std::vector<uint8_t>& Buffer, size_t Offset, int32_t Value)
+{
+ memcpy(Buffer.data() + Offset, &Value, 4);
+}
+
+// ─── Packet builder ─────────────────────────────────────────────────────────
+
+// Builds a single uncompressed Horde V2 packet. Layout:
+// [Signature(3) + Version(1) + PacketLength(4)] 8 bytes (header)
+// [TypeTableOffset(4) + ImportTableOffset(4) + ExportTableOffset(4)] 12 bytes
+// [Export data...]
+// [Type table: count(4) + count * 20 bytes]
+// [Import table: count(4) + (count+1) offset entries(4 each) + import data]
+// [Export table: count(4) + (count+1) offset entries(4 each)]
+//
+// ALL offsets are absolute from byte 0 of the full packet (including the 8-byte header).
+// PacketLength in the header = total packet size including the 8-byte header.
+
+struct PacketBuilder
+{
+ std::vector<uint8_t> Data;
+ std::vector<int32_t> ExportOffsets; // Absolute byte offset of each export from byte 0
+
+ // Type table: unique 20-byte BlobType entries
+ std::vector<const uint8_t*> Types;
+
+ // Import table entries: (baseIdx, fragment)
+ struct ImportEntry
+ {
+ int32_t BaseIdx;
+ std::string Fragment;
+ };
+ std::vector<ImportEntry> Imports;
+
+ // Current export's start offset (absolute from byte 0)
+ size_t CurrentExportStart = 0;
+
+ PacketBuilder()
+ {
+ // Reserve packet header (8 bytes) + table offsets (12 bytes) = 20 bytes
+ Data.resize(20, 0);
+
+ // Write signature
+ Data[0] = PacketSignature[0];
+ Data[1] = PacketSignature[1];
+ Data[2] = PacketSignature[2];
+ Data[3] = PacketVersion;
+ // PacketLength, TypeTableOffset, ImportTableOffset, ExportTableOffset
+ // will be patched in Finish()
+ }
+
+ int AddType(const uint8_t* BlobType)
+ {
+ for (size_t i = 0; i < Types.size(); ++i)
+ {
+ if (memcmp(Types[i], BlobType, BlobTypeSize) == 0)
+ {
+ return static_cast<int>(i);
+ }
+ }
+ Types.push_back(BlobType);
+ return static_cast<int>(Types.size() - 1);
+ }
+
+ int AddImport(int32_t BaseIdx, std::string Fragment)
+ {
+ Imports.push_back({BaseIdx, std::move(Fragment)});
+ return static_cast<int>(Imports.size() - 1);
+ }
+
+ void BeginExport()
+ {
+ AlignTo4(Data);
+ CurrentExportStart = Data.size();
+ // Reserve space for payload length
+ WriteLE32(Data, 0);
+ }
+
+ // Write raw payload data into the current export
+ void WritePayload(const void* Payload, size_t Size) { WriteBytes(Data, Payload, Size); }
+
+ // Complete the current export: patches payload length, writes type+imports metadata
+ int CompleteExport(const uint8_t* BlobType, const std::vector<int>& ImportIndices)
+ {
+ const int ExportIndex = static_cast<int>(ExportOffsets.size());
+
+ // Patch payload length (does not include the 4-byte length field itself)
+ const size_t PayloadStart = CurrentExportStart + 4;
+ const int32_t PayloadLen = static_cast<int32_t>(Data.size() - PayloadStart);
+ PatchLE32(Data, CurrentExportStart, PayloadLen);
+
+ // Write type index (varint)
+ const int TypeIdx = AddType(BlobType);
+ WriteVarInt(Data, static_cast<size_t>(TypeIdx));
+
+ // Write import count + indices
+ WriteVarInt(Data, ImportIndices.size());
+ for (int Idx : ImportIndices)
+ {
+ WriteVarInt(Data, static_cast<size_t>(Idx));
+ }
+
+ // Record export offset (absolute from byte 0)
+ ExportOffsets.push_back(static_cast<int32_t>(CurrentExportStart));
+
+ return ExportIndex;
+ }
+
+ // Finalize the packet: write type/import/export tables, patch header.
+ std::vector<uint8_t> Finish()
+ {
+ AlignTo4(Data);
+
+ // ── Type table: count(int32) + count * BlobTypeSize bytes ──
+ const int32_t TypeTableOffset = static_cast<int32_t>(Data.size());
+ WriteLE32(Data, static_cast<int32_t>(Types.size()));
+ for (const uint8_t* TypeEntry : Types)
+ {
+ WriteBytes(Data, TypeEntry, BlobTypeSize);
+ }
+
+ // ── Import table: count(int32) + (count+1) offsets(int32 each) + import data ──
+ const int32_t ImportTableOffset = static_cast<int32_t>(Data.size());
+ const int32_t ImportCount = static_cast<int32_t>(Imports.size());
+ WriteLE32(Data, ImportCount);
+
+ // Reserve space for (count+1) offset entries — will be patched below
+ const size_t ImportOffsetsStart = Data.size();
+ for (int32_t i = 0; i <= ImportCount; ++i)
+ {
+ WriteLE32(Data, 0); // placeholder
+ }
+
+ // Write import data and record offsets
+ for (int32_t i = 0; i < ImportCount; ++i)
+ {
+ // Record absolute offset of this import's data
+ PatchLE32(Data, ImportOffsetsStart + static_cast<size_t>(i) * 4, static_cast<int32_t>(Data.size()));
+
+ ImportEntry& Imp = Imports[static_cast<size_t>(i)];
+ // BaseIdx encoded as unsigned VarInt with bias: VarInt(BaseIdx + ImportBias)
+ const size_t EncodedBaseIdx = static_cast<size_t>(static_cast<int64_t>(Imp.BaseIdx) + ImportBias);
+ WriteVarInt(Data, EncodedBaseIdx);
+ // Fragment: raw UTF-8 bytes, NO length prefix (length determined by offset table)
+ WriteBytes(Data, Imp.Fragment.data(), Imp.Fragment.size());
+ }
+
+ // Sentinel offset (points past the last import's data)
+ PatchLE32(Data, ImportOffsetsStart + static_cast<size_t>(ImportCount) * 4, static_cast<int32_t>(Data.size()));
+
+ // ── Export table: count(int32) + (count+1) offsets(int32 each) ──
+ const int32_t ExportTableOffset = static_cast<int32_t>(Data.size());
+ const int32_t ExportCount = static_cast<int32_t>(ExportOffsets.size());
+ WriteLE32(Data, ExportCount);
+
+ for (int32_t Off : ExportOffsets)
+ {
+ WriteLE32(Data, Off);
+ }
+ // Sentinel: points to the start of the type table (end of export data region)
+ WriteLE32(Data, TypeTableOffset);
+
+ // ── Patch header ──
+ // PacketLength = total packet size including the 8-byte header
+ const int32_t PacketLength = static_cast<int32_t>(Data.size());
+ PatchLE32(Data, 4, PacketLength);
+ PatchLE32(Data, 8, TypeTableOffset);
+ PatchLE32(Data, 12, ImportTableOffset);
+ PatchLE32(Data, 16, ExportTableOffset);
+
+ return std::move(Data);
+ }
+};
+
+// ─── Encoded packet wrapper ─────────────────────────────────────────────────
+
+// Wraps an uncompressed packet with the encoded header:
+// [Signature(3) + Version(1) + HeaderLength(4)] 8 bytes
+// [DecompressedLength(4)] 4 bytes
+// [CompressionFormat(1): 0=None] 1 byte
+// [PacketData...]
+//
+// HeaderLength = total encoded packet size INCLUDING the 8-byte outer header.
+
+static std::vector<uint8_t>
+EncodePacket(std::vector<uint8_t> UncompressedPacket)
+{
+ const int32_t DecompressedLen = static_cast<int32_t>(UncompressedPacket.size());
+ // HeaderLength includes the 8-byte outer signature header itself
+ const int32_t HeaderLength = 8 + 4 + 1 + DecompressedLen;
+
+ std::vector<uint8_t> Encoded;
+ Encoded.reserve(static_cast<size_t>(HeaderLength));
+
+ // Outer signature: 'U','B','N', version=5, HeaderLength (LE int32)
+ WriteByte(Encoded, PacketSignature[0]); // 'U'
+ WriteByte(Encoded, PacketSignature[1]); // 'B'
+ WriteByte(Encoded, PacketSignature[2]); // 'N'
+ WriteByte(Encoded, PacketVersion); // 5
+ WriteLE32(Encoded, HeaderLength);
+
+ // Decompressed length + compression format
+ WriteLE32(Encoded, DecompressedLen);
+ WriteByte(Encoded, 0); // CompressionFormat::None
+
+ // Packet data
+ WriteBytes(Encoded, UncompressedPacket.data(), UncompressedPacket.size());
+
+ return Encoded;
+}
+
+// ─── Bundle blob name generation ────────────────────────────────────────────
+
+static std::string
+GenerateBlobName()
+{
+ static std::atomic<uint32_t> s_Counter{0};
+
+ const int Pid = GetCurrentProcessId();
+
+ auto Now = std::chrono::steady_clock::now().time_since_epoch();
+ auto Ms = std::chrono::duration_cast<std::chrono::milliseconds>(Now).count();
+
+ ExtendableStringBuilder<64> Name;
+ Name << Pid << "_" << Ms << "_" << s_Counter.fetch_add(1);
+ return std::string(Name.ToView());
+}
+
+// ─── File info for bundling ─────────────────────────────────────────────────
+
+struct FileInfo
+{
+ std::filesystem::path Path;
+ std::string Name; // Filename only (for directory entry)
+ uint64_t FileSize;
+ IoHash ContentHash; // IoHash of file content
+ BLAKE3 StreamHash; // Full BLAKE3 for stream hash
+ int DirectoryExportImportIndex; // Import index referencing this file's root export
+ IoHash RootExportHash; // IoHash of the root export for this file
+};
+
+// ─── CreateBundle implementation ────────────────────────────────────────────
+
+bool
+BundleCreator::CreateBundle(const std::vector<BundleFile>& Files, const std::filesystem::path& OutputDir, BundleResult& OutResult)
+{
+ ZEN_TRACE_CPU("BundleCreator::CreateBundle");
+
+ std::error_code Ec;
+
+ // Collect files that exist
+ std::vector<FileInfo> ValidFiles;
+ for (const BundleFile& F : Files)
+ {
+ if (!std::filesystem::exists(F.Path, Ec))
+ {
+ if (F.Optional)
+ {
+ continue;
+ }
+ ZEN_ERROR("required bundle file does not exist: {}", F.Path.string());
+ return false;
+ }
+ FileInfo Info;
+ Info.Path = F.Path;
+ Info.Name = F.Path.filename().string();
+ Info.FileSize = std::filesystem::file_size(F.Path, Ec);
+ if (Ec)
+ {
+ ZEN_ERROR("failed to get file size: {}", F.Path.string());
+ return false;
+ }
+ ValidFiles.push_back(std::move(Info));
+ }
+
+ if (ValidFiles.empty())
+ {
+ ZEN_ERROR("no valid files to bundle");
+ return false;
+ }
+
+ std::filesystem::create_directories(OutputDir, Ec);
+ if (Ec)
+ {
+ ZEN_ERROR("failed to create output directory: {}", OutputDir.string());
+ return false;
+ }
+
+ const std::string BlobName = GenerateBlobName();
+ PacketBuilder Packet;
+
+ // Process each file: create chunk exports
+ for (FileInfo& Info : ValidFiles)
+ {
+ BasicFile File;
+ File.Open(Info.Path, BasicFile::Mode::kRead, Ec);
+ if (Ec)
+ {
+ ZEN_ERROR("failed to open file: {}", Info.Path.string());
+ return false;
+ }
+
+ // Compute stream hash (full BLAKE3) and content hash (IoHash) while reading
+ BLAKE3Stream StreamHasher;
+ IoHashStream ContentHasher;
+
+ if (Info.FileSize <= LargeFileThreshold)
+ {
+ // Small file: single chunk leaf export
+ IoBuffer Content = File.ReadAll();
+ const auto* Data = static_cast<const uint8_t*>(Content.GetData());
+ const size_t Size = Content.GetSize();
+
+ StreamHasher.Append(Data, Size);
+ ContentHasher.Append(Data, Size);
+
+ Packet.BeginExport();
+ Packet.WritePayload(Data, Size);
+
+ const IoHash ChunkHash = IoHash::HashBuffer(Data, Size);
+ const int ExportIndex = Packet.CompleteExport(BlobType_ChunkLeafV1, {});
+ Info.RootExportHash = ChunkHash;
+ Info.ContentHash = ContentHasher.GetHash();
+ Info.StreamHash = StreamHasher.GetHash();
+
+ // Add import for this file's root export (references export within same packet)
+ ExtendableStringBuilder<32> Fragment;
+ Fragment << "exp=" << ExportIndex;
+ Info.DirectoryExportImportIndex = Packet.AddImport(CurrentPacketBaseIdx, std::string(Fragment.ToView()));
+ }
+ else
+ {
+ // Large file: split into fixed 64KB chunks, then create interior node
+ std::vector<int> ChunkExportIndices;
+ std::vector<IoHash> ChunkHashes;
+
+ uint64_t Remaining = Info.FileSize;
+ uint64_t Offset = 0;
+
+ while (Remaining > 0)
+ {
+ const uint64_t ReadSize = std::min(static_cast<uint64_t>(ChunkSize), Remaining);
+ IoBuffer Chunk = File.ReadRange(Offset, ReadSize);
+ const auto* Data = static_cast<const uint8_t*>(Chunk.GetData());
+ const size_t Size = Chunk.GetSize();
+
+ StreamHasher.Append(Data, Size);
+ ContentHasher.Append(Data, Size);
+
+ Packet.BeginExport();
+ Packet.WritePayload(Data, Size);
+
+ const IoHash ChunkHash = IoHash::HashBuffer(Data, Size);
+ const int ExpIdx = Packet.CompleteExport(BlobType_ChunkLeafV1, {});
+
+ ChunkExportIndices.push_back(ExpIdx);
+ ChunkHashes.push_back(ChunkHash);
+
+ Offset += ReadSize;
+ Remaining -= ReadSize;
+ }
+
+ Info.ContentHash = ContentHasher.GetHash();
+ Info.StreamHash = StreamHasher.GetHash();
+
+ // Create interior node referencing all chunk leaves
+ // Interior payload: for each child: [IoHash(20)][node_type=1(1)] + imports
+ std::vector<int> InteriorImports;
+ for (size_t i = 0; i < ChunkExportIndices.size(); ++i)
+ {
+ ExtendableStringBuilder<32> Fragment;
+ Fragment << "exp=" << ChunkExportIndices[i];
+ const int ImportIdx = Packet.AddImport(CurrentPacketBaseIdx, std::string(Fragment.ToView()));
+ InteriorImports.push_back(ImportIdx);
+ }
+
+ Packet.BeginExport();
+
+ // Write interior payload: [hash(20)][type(1)] per child
+ for (size_t i = 0; i < ChunkHashes.size(); ++i)
+ {
+ Packet.WritePayload(ChunkHashes[i].Hash, sizeof(IoHash));
+ const uint8_t NodeType = 1; // ChunkNode type
+ Packet.WritePayload(&NodeType, 1);
+ }
+
+ // Hash the interior payload to get the interior node hash
+ const IoHash InteriorHash = IoHash::HashBuffer(Packet.Data.data() + (Packet.CurrentExportStart + 4),
+ Packet.Data.size() - (Packet.CurrentExportStart + 4));
+
+ const int InteriorExportIndex = Packet.CompleteExport(BlobType_ChunkInteriorV2, InteriorImports);
+
+ Info.RootExportHash = InteriorHash;
+
+ // Add import for directory to reference this interior node
+ ExtendableStringBuilder<32> Fragment;
+ Fragment << "exp=" << InteriorExportIndex;
+ Info.DirectoryExportImportIndex = Packet.AddImport(CurrentPacketBaseIdx, std::string(Fragment.ToView()));
+ }
+ }
+
+ // Create directory node export
+ // Payload: [flags(varint=0)] [file_count(varint)] [file_entries...] [dir_count(varint=0)]
+ // FileEntry: [import(varint)] [IoHash(20)] [name(string)] [flags(varint)] [length(varint)] [IoHash_stream(20)]
+
+ Packet.BeginExport();
+
+ // Build directory payload into a temporary buffer, then write it
+ std::vector<uint8_t> DirPayload;
+ WriteVarInt(DirPayload, 0); // flags
+ WriteVarInt(DirPayload, ValidFiles.size()); // file_count
+
+ std::vector<int> DirImports;
+ for (size_t i = 0; i < ValidFiles.size(); ++i)
+ {
+ FileInfo& Info = ValidFiles[i];
+ DirImports.push_back(Info.DirectoryExportImportIndex);
+
+ // IoHash of target (20 bytes) — import is consumed sequentially from the
+ // export's import list by ReadBlobRef, not encoded in the payload
+ WriteBytes(DirPayload, Info.RootExportHash.Hash, sizeof(IoHash));
+ // name (string)
+ WriteString(DirPayload, Info.Name);
+ // flags (varint): 1 = Executable
+ WriteVarInt(DirPayload, 1);
+ // length (varint)
+ WriteVarInt(DirPayload, static_cast<size_t>(Info.FileSize));
+ // stream hash: IoHash from full BLAKE3, truncated to 20 bytes
+ const IoHash StreamIoHash = IoHash::FromBLAKE3(Info.StreamHash);
+ WriteBytes(DirPayload, StreamIoHash.Hash, sizeof(IoHash));
+ }
+
+ WriteVarInt(DirPayload, 0); // dir_count
+
+ Packet.WritePayload(DirPayload.data(), DirPayload.size());
+ const int DirExportIndex = Packet.CompleteExport(BlobType_DirectoryV1, DirImports);
+
+ // Finalize packet and encode
+ std::vector<uint8_t> UncompressedPacket = Packet.Finish();
+ std::vector<uint8_t> EncodedPacket = EncodePacket(std::move(UncompressedPacket));
+
+ // Write .blob file
+ const std::filesystem::path BlobFilePath = OutputDir / (BlobName + ".blob");
+ {
+ BasicFile BlobFile(BlobFilePath, BasicFile::Mode::kTruncate, Ec);
+ if (Ec)
+ {
+ ZEN_ERROR("failed to create blob file: {}", BlobFilePath.string());
+ return false;
+ }
+ BlobFile.Write(EncodedPacket.data(), EncodedPacket.size(), 0);
+ }
+
+ // Build locator: <blob_name>#pkt=0,<encoded_len>&exp=<dir_export_index>
+ ExtendableStringBuilder<256> Locator;
+ Locator << BlobName << "#pkt=0," << uint64_t(EncodedPacket.size()) << "&exp=" << DirExportIndex;
+ const std::string LocatorStr(Locator.ToView());
+
+ // Write .ref file (use first file's name as the ref base)
+ const std::filesystem::path RefFilePath = OutputDir / (ValidFiles[0].Name + ".Bundle.ref");
+ {
+ BasicFile RefFile(RefFilePath, BasicFile::Mode::kTruncate, Ec);
+ if (Ec)
+ {
+ ZEN_ERROR("failed to create ref file: {}", RefFilePath.string());
+ return false;
+ }
+ RefFile.Write(LocatorStr.data(), LocatorStr.size(), 0);
+ }
+
+ OutResult.Locator = LocatorStr;
+ OutResult.BundleDir = OutputDir;
+
+ ZEN_INFO("created V2 bundle: blob={}.blob locator={} files={}", BlobName, LocatorStr, ValidFiles.size());
+ return true;
+}
+
+bool
+BundleCreator::ReadLocator(const std::filesystem::path& RefFile, std::string& OutLocator)
+{
+ BasicFile File;
+ std::error_code Ec;
+ File.Open(RefFile, BasicFile::Mode::kRead, Ec);
+ if (Ec)
+ {
+ return false;
+ }
+
+ IoBuffer Content = File.ReadAll();
+ OutLocator.assign(static_cast<const char*>(Content.GetData()), Content.GetSize());
+
+ // Strip trailing whitespace/newlines
+ while (!OutLocator.empty() && (OutLocator.back() == '\n' || OutLocator.back() == '\r' || OutLocator.back() == '\0'))
+ {
+ OutLocator.pop_back();
+ }
+
+ return !OutLocator.empty();
+}
+
+} // namespace zen::horde