aboutsummaryrefslogtreecommitdiff
path: root/zenstore/compactcas.cpp
diff options
context:
space:
mode:
authorStefan Boberg <[email protected]>2021-09-19 19:30:16 +0200
committerStefan Boberg <[email protected]>2021-09-19 19:30:16 +0200
commit8f82467ea5e8e90e459d78d603c67a7938ae8ead (patch)
treea7d1a3e2d897e9a53e075485c437c440f3684ce2 /zenstore/compactcas.cpp
parentAdded zenstore.h and made headers use it (diff)
downloadzen-8f82467ea5e8e90e459d78d603c67a7938ae8ead.tar.xz
zen-8f82467ea5e8e90e459d78d603c67a7938ae8ead.zip
Changed some code over from ATL to BasicFile and added Scrub() stubs.
Diffstat (limited to 'zenstore/compactcas.cpp')
-rw-r--r--zenstore/compactcas.cpp43
1 files changed, 36 insertions, 7 deletions
diff --git a/zenstore/compactcas.cpp b/zenstore/compactcas.cpp
index 4407d8b08..71d52e56a 100644
--- a/zenstore/compactcas.cpp
+++ b/zenstore/compactcas.cpp
@@ -10,13 +10,9 @@
#include <zencore/thread.h>
#include <zencore/uid.h>
-#include <gsl/gsl-lite.hpp>
-
-#include <functional>
-
-struct IUnknown; // Workaround for "combaseapi.h(229): error C2187: syntax error: 'identifier' was unexpected here" when using /permissive-
-#include <atlfile.h>
#include <filesystem>
+#include <functional>
+#include <gsl/gsl-lite.hpp>
//////////////////////////////////////////////////////////////////////////
@@ -43,7 +39,9 @@ CasContainerStrategy::Initialize(const std::string_view ContainerBaseName, uint6
uint64_t MaxFileOffset = 0;
{
- // This is not technically necessary but may help future static analysis
+ // This is not technically necessary (nobody should be accessing us from
+ // another thread at this stage) but may help static analysis
+
zen::RwLock::ExclusiveLockScope _(m_LocationMapLock);
m_CasLog.Replay([&](const CasDiskIndexEntry& Record) {
@@ -133,6 +131,13 @@ CasContainerStrategy::HaveChunk(const IoHash& ChunkHash)
void
CasContainerStrategy::FilterChunks(CasChunkSet& InOutChunks)
{
+ // This implementation is good enough for relatively small
+ // chunk sets (in terms of chunk identifiers), but would
+ // benefit from a better implementation which removes
+ // items incrementally for large sets, especially when
+ // we're likely to already have a large proportion of the
+ // chunks in the set
+
std::unordered_set<IoHash> HaveSet;
for (const IoHash& Hash : InOutChunks.GetChunkSet())
@@ -157,4 +162,28 @@ CasContainerStrategy::Flush()
m_SmallObjectFile.Flush();
}
+void
+CasContainerStrategy::Scrub()
+{
+ RwLock::SharedLockScope _(m_LocationMapLock);
+}
+
+void
+CasContainerStrategy::MakeSnapshot()
+{
+ RwLock::SharedLockScope _(m_LocationMapLock);
+
+ std::vector<CasDiskIndexEntry> Entries{m_LocationMap.size()};
+
+ uint64_t EntryIndex = 0;
+ for (auto& Entry : m_LocationMap)
+ {
+ CasDiskIndexEntry& IndexEntry = Entries[EntryIndex++];
+ IndexEntry.Key = Entry.first;
+ IndexEntry.Location = Entry.second;
+ }
+
+ m_SmallObjectIndex.Write(Entries.data(), Entries.size() * sizeof(CasDiskIndexEntry), 0);
+}
+
} // namespace zen