From fa24439ff3d8ab5b9efaf66ef4dae6713b88cb35 Mon Sep 17 00:00:00 2001 From: MarcoFalke Date: Sun, 13 Dec 2015 17:58:29 +0100 Subject: Bump copyright headers to 2015 --- src/support/allocators/secure.h | 2 +- src/support/allocators/zeroafterfree.h | 2 +- src/support/pagelocker.cpp | 2 +- src/support/pagelocker.h | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) (limited to 'src/support') diff --git a/src/support/allocators/secure.h b/src/support/allocators/secure.h index 5e7bb66ea..1ec40fe83 100644 --- a/src/support/allocators/secure.h +++ b/src/support/allocators/secure.h @@ -1,5 +1,5 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2013 The Bitcoin Core developers +// Copyright (c) 2009-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/support/allocators/zeroafterfree.h b/src/support/allocators/zeroafterfree.h index 41e23392e..28a940ad1 100644 --- a/src/support/allocators/zeroafterfree.h +++ b/src/support/allocators/zeroafterfree.h @@ -1,5 +1,5 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2013 The Bitcoin Core developers +// Copyright (c) 2009-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/support/pagelocker.cpp b/src/support/pagelocker.cpp index 440e0a519..7cea2d88c 100644 --- a/src/support/pagelocker.cpp +++ b/src/support/pagelocker.cpp @@ -1,4 +1,4 @@ -// Copyright (c) 2009-2013 The Bitcoin Core developers +// Copyright (c) 2009-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. diff --git a/src/support/pagelocker.h b/src/support/pagelocker.h index 88b95cce7..6b3979e55 100644 --- a/src/support/pagelocker.h +++ b/src/support/pagelocker.h @@ -1,5 +1,5 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2013 The Bitcoin Core developers +// Copyright (c) 2009-2015 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -- cgit v1.2.3 From 4731cab8fbff51a8178c85d572e2482040278616 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavel=20Jan=C3=ADk?= Date: Fri, 2 Sep 2016 18:19:01 +0200 Subject: Do not shadow variables --- src/support/pagelocker.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'src/support') diff --git a/src/support/pagelocker.h b/src/support/pagelocker.h index 6b3979e55..538bf3945 100644 --- a/src/support/pagelocker.h +++ b/src/support/pagelocker.h @@ -28,11 +28,11 @@ template class LockedPageManagerBase { public: - LockedPageManagerBase(size_t page_size) : page_size(page_size) + LockedPageManagerBase(size_t _page_size) : page_size(_page_size) { // Determine bitmask for extracting page from address - assert(!(page_size & (page_size - 1))); // size must be power of two - page_mask = ~(page_size - 1); + assert(!(_page_size & (_page_size - 1))); // size must be power of two + page_mask = ~(_page_size - 1); } ~LockedPageManagerBase() -- cgit v1.2.3 From f4d1fc259b5a62580d952c180b1189ccaa6af1bc Mon Sep 17 00:00:00 2001 From: "Wladimir J. van der Laan" Date: Sun, 18 Sep 2016 08:40:14 +0200 Subject: wallet: Get rid of LockObject and UnlockObject calls in key.h Replace these with vectors allocated from the secure allocator. This avoids mlock syscall churn on stack pages, as well as makes it possible to get rid of these functions. Please review this commit and the previous one carefully that no `sizeof(vectortype)` remains in the memcpys and memcmps usage (ick!), and `.data()` or `&vec[x]` is used as appropriate instead of &vec. --- src/support/pagelocker.h | 17 ----------------- 1 file changed, 17 deletions(-) (limited to 'src/support') diff --git a/src/support/pagelocker.h b/src/support/pagelocker.h index 538bf3945..042144fad 100644 --- a/src/support/pagelocker.h +++ b/src/support/pagelocker.h @@ -157,21 +157,4 @@ private: static boost::once_flag init_flag; }; -// -// Functions for directly locking/unlocking memory objects. -// Intended for non-dynamically allocated structures. -// -template -void LockObject(const T& t) -{ - LockedPageManager::Instance().LockRange((void*)(&t), sizeof(T)); -} - -template -void UnlockObject(const T& t) -{ - memory_cleanse((void*)(&t), sizeof(T)); - LockedPageManager::Instance().UnlockRange((void*)(&t), sizeof(T)); -} - #endif // BITCOIN_SUPPORT_PAGELOCKER_H -- cgit v1.2.3 From 4536148b15595229d0563fb60913b23cb78788ed Mon Sep 17 00:00:00 2001 From: "Wladimir J. van der Laan" Date: Sun, 18 Sep 2016 09:55:14 +0200 Subject: support: Add LockedPool Add a pool for locked memory chunks, replacing LockedPageManager. This is something I've been wanting to do for a long time. The current approach of locking objects where they happen to be on the stack or heap in-place causes a lot of mlock/munlock system call overhead, slowing down any handling of keys. Also locked memory is a limited resource on many operating systems (and using a lot of it bogs down the system), so the previous approach of locking every page that may contain any key information (but also other information) is wasteful. --- src/support/allocators/secure.h | 12 +- src/support/lockedpool.cpp | 383 ++++++++++++++++++++++++++++++++++++++++ src/support/lockedpool.h | 251 ++++++++++++++++++++++++++ src/support/pagelocker.cpp | 70 -------- src/support/pagelocker.h | 160 ----------------- 5 files changed, 638 insertions(+), 238 deletions(-) create mode 100644 src/support/lockedpool.cpp create mode 100644 src/support/lockedpool.h delete mode 100644 src/support/pagelocker.cpp delete mode 100644 src/support/pagelocker.h (limited to 'src/support') diff --git a/src/support/allocators/secure.h b/src/support/allocators/secure.h index 1ec40fe83..67064314e 100644 --- a/src/support/allocators/secure.h +++ b/src/support/allocators/secure.h @@ -6,7 +6,8 @@ #ifndef BITCOIN_SUPPORT_ALLOCATORS_SECURE_H #define BITCOIN_SUPPORT_ALLOCATORS_SECURE_H -#include "support/pagelocker.h" +#include "support/lockedpool.h" +#include "support/cleanse.h" #include @@ -39,20 +40,15 @@ struct secure_allocator : public std::allocator { T* allocate(std::size_t n, const void* hint = 0) { - T* p; - p = std::allocator::allocate(n, hint); - if (p != NULL) - LockedPageManager::Instance().LockRange(p, sizeof(T) * n); - return p; + return static_cast(LockedPoolManager::Instance().alloc(sizeof(T) * n)); } void deallocate(T* p, std::size_t n) { if (p != NULL) { memory_cleanse(p, sizeof(T) * n); - LockedPageManager::Instance().UnlockRange(p, sizeof(T) * n); } - std::allocator::deallocate(p, n); + LockedPoolManager::Instance().free(p); } }; diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp new file mode 100644 index 000000000..63050f006 --- /dev/null +++ b/src/support/lockedpool.cpp @@ -0,0 +1,383 @@ +// Copyright (c) 2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include "support/lockedpool.h" +#include "support/cleanse.h" + +#if defined(HAVE_CONFIG_H) +#include "config/bitcoin-config.h" +#endif + +#ifdef WIN32 +#ifdef _WIN32_WINNT +#undef _WIN32_WINNT +#endif +#define _WIN32_WINNT 0x0501 +#define WIN32_LEAN_AND_MEAN 1 +#ifndef NOMINMAX +#define NOMINMAX +#endif +#include +#else +#include // for mmap +#include // for getrlimit +#include // for PAGESIZE +#include // for sysconf +#endif + +LockedPoolManager* LockedPoolManager::_instance = NULL; +std::once_flag LockedPoolManager::init_flag; + +/*******************************************************************************/ +// Utilities +// +/** Align up to power of 2 */ +static inline size_t align_up(size_t x, size_t align) +{ + return (x + align - 1) & ~(align - 1); +} + +/*******************************************************************************/ +// Implementation: Arena + +Arena::Arena(void *base_in, size_t size_in, size_t alignment_in): + base(static_cast(base_in)), end(static_cast(base_in) + size_in), alignment(alignment_in) +{ + // Start with one free chunk that covers the entire arena + chunks.emplace(base, Chunk(size_in, false)); +} + +Arena::~Arena() +{ +} + +void* Arena::alloc(size_t size) +{ + // Round to next multiple of alignment + size = align_up(size, alignment); + + // Don't handle zero-sized chunks, or those bigger than MAX_SIZE + if (size == 0 || size >= Chunk::MAX_SIZE) { + return nullptr; + } + + for (auto& chunk: chunks) { + if (!chunk.second.isInUse() && size <= chunk.second.getSize()) { + char* base = chunk.first; + size_t leftover = chunk.second.getSize() - size; + if (leftover > 0) { // Split chunk + chunks.emplace(base + size, Chunk(leftover, false)); + chunk.second.setSize(size); + } + chunk.second.setInUse(true); + return reinterpret_cast(base); + } + } + return nullptr; +} + +void Arena::free(void *ptr) +{ + // Freeing the NULL pointer is OK. + if (ptr == nullptr) { + return; + } + auto i = chunks.find(static_cast(ptr)); + if (i == chunks.end() || !i->second.isInUse()) { + throw std::runtime_error("Arena: invalid or double free"); + } + + i->second.setInUse(false); + + if (i != chunks.begin()) { // Absorb into previous chunk if exists and free + auto prev = i; + --prev; + if (!prev->second.isInUse()) { + // Absorb current chunk size into previous chunk. + prev->second.setSize(prev->second.getSize() + i->second.getSize()); + // Erase current chunk. Erasing does not invalidate current + // iterators for a map, except for that pointing to the object + // itself, which will be overwritten in the next statement. + chunks.erase(i); + // From here on, the previous chunk is our current chunk. + i = prev; + } + } + auto next = i; + ++next; + if (next != chunks.end()) { // Absorb next chunk if exists and free + if (!next->second.isInUse()) { + // Absurb next chunk size into current chunk + i->second.setSize(i->second.getSize() + next->second.getSize()); + // Erase next chunk. + chunks.erase(next); + } + } +} + +Arena::Stats Arena::stats() const +{ + Arena::Stats r; + r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0; + for (const auto& chunk: chunks) { + if (chunk.second.isInUse()) { + r.used += chunk.second.getSize(); + r.chunks_used += 1; + } else { + r.free += chunk.second.getSize(); + r.chunks_free += 1; + } + r.total += chunk.second.getSize(); + } + return r; +} + +#ifdef ARENA_DEBUG +void Arena::walk() const +{ + for (const auto& chunk: chunks) { + std::cout << + "0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.first << + " 0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.second.getSize() << + " 0x" << chunk.second.isInUse() << std::endl; + } + std::cout << std::endl; +} +#endif + +/*******************************************************************************/ +// Implementation: Win32LockedPageAllocator + +#ifdef WIN32 +/** LockedPageAllocator specialized for Windows. + */ +class Win32LockedPageAllocator: public LockedPageAllocator +{ +public: + Win32LockedPageAllocator(); + void* AllocateLocked(size_t len, bool *lockingSuccess); + void FreeLocked(void* addr, size_t len); + size_t GetLimit(); +private: + size_t page_size; +}; + +Win32LockedPageAllocator::Win32LockedPageAllocator() +{ + // Determine system page size in bytes + SYSTEM_INFO sSysInfo; + GetSystemInfo(&sSysInfo); + page_size = sSysInfo.dwPageSize; +} +void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess) +{ + len = align_up(len, page_size); + void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); + if (addr) { + // VirtualLock is used to attempt to keep keying material out of swap. Note + // that it does not provide this as a guarantee, but, in practice, memory + // that has been VirtualLock'd almost never gets written to the pagefile + // except in rare circumstances where memory is extremely low. + *lockingSuccess = VirtualLock(const_cast(addr), len) != 0; + } + return addr; +} +void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len) +{ + len = align_up(len, page_size); + memory_cleanse(addr, len); + VirtualUnlock(const_cast(addr), len); +} + +size_t Win32LockedPageAllocator::GetLimit() +{ + // TODO is there a limit on windows, how to get it? + return std::numeric_limits::max(); +} +#endif + +/*******************************************************************************/ +// Implementation: PosixLockedPageAllocator + +#ifndef WIN32 +/** LockedPageAllocator specialized for OSes that don't try to be + * special snowflakes. + */ +class PosixLockedPageAllocator: public LockedPageAllocator +{ +public: + PosixLockedPageAllocator(); + void* AllocateLocked(size_t len, bool *lockingSuccess); + void FreeLocked(void* addr, size_t len); + size_t GetLimit(); +private: + size_t page_size; +}; + +PosixLockedPageAllocator::PosixLockedPageAllocator() +{ + // Determine system page size in bytes +#if defined(PAGESIZE) // defined in limits.h + page_size = PAGESIZE; +#else // assume some POSIX OS + page_size = sysconf(_SC_PAGESIZE); +#endif +} +void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess) +{ + void *addr; + len = align_up(len, page_size); + addr = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); + if (addr) { + *lockingSuccess = mlock(addr, len) == 0; + } + return addr; +} +void PosixLockedPageAllocator::FreeLocked(void* addr, size_t len) +{ + len = align_up(len, page_size); + memory_cleanse(addr, len); + munlock(addr, len); + munmap(addr, len); +} +size_t PosixLockedPageAllocator::GetLimit() +{ +#ifdef RLIMIT_MEMLOCK + struct rlimit rlim; + if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) { + if (rlim.rlim_cur != RLIM_INFINITY) { + return rlim.rlim_cur; + } + } +#endif + return std::numeric_limits::max(); +} +#endif + +/*******************************************************************************/ +// Implementation: LockedPool + +LockedPool::LockedPool(std::unique_ptr allocator_in, LockingFailed_Callback lf_cb_in): + allocator(std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0) +{ +} + +LockedPool::~LockedPool() +{ +} +void* LockedPool::alloc(size_t size) +{ + std::lock_guard lock(mutex); + // Try allocating from each current arena + for (auto &arena: arenas) { + void *addr = arena.alloc(size); + if (addr) { + return addr; + } + } + // If that fails, create a new one + if (new_arena(ARENA_SIZE, ARENA_ALIGN)) { + return arenas.back().alloc(size); + } + return nullptr; +} + +void LockedPool::free(void *ptr) +{ + std::lock_guard lock(mutex); + // TODO we can do better than this linear search by keeping a map of arena + // extents to arena, and looking up the address. + for (auto &arena: arenas) { + if (arena.addressInArena(ptr)) { + arena.free(ptr); + return; + } + } + throw std::runtime_error("LockedPool: invalid address not pointing to any arena"); +} + +LockedPool::Stats LockedPool::stats() const +{ + std::lock_guard lock(mutex); + LockedPool::Stats r; + r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0; + r.locked = cumulative_bytes_locked; + for (const auto &arena: arenas) { + Arena::Stats i = arena.stats(); + r.used += i.used; + r.free += i.free; + r.total += i.total; + r.chunks_used += i.chunks_used; + r.chunks_free += i.chunks_free; + } + return r; +} + +bool LockedPool::new_arena(size_t size, size_t align) +{ + bool locked; + // If this is the first arena, handle this specially: Cap the upper size + // by the process limit. This makes sure that the first arena will at least + // be locked. An exception to this is if the process limit is 0: + // in this case no memory can be locked at all so we'll skip past this logic. + if (arenas.empty()) { + size_t limit = allocator->GetLimit(); + if (limit > 0) { + size = std::min(size, limit); + } + } + void *addr = allocator->AllocateLocked(size, &locked); + if (!addr) { + return false; + } + if (locked) { + cumulative_bytes_locked += size; + } else if (lf_cb) { // Call the locking-failed callback if locking failed + if (!lf_cb()) { // If the callback returns false, free the memory and fail, otherwise consider the user warned and proceed. + allocator->FreeLocked(addr, size); + return false; + } + } + arenas.emplace_back(allocator.get(), addr, size, align); + return true; +} + +LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator *allocator_in, void *base_in, size_t size_in, size_t align_in): + Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in) +{ +} +LockedPool::LockedPageArena::~LockedPageArena() +{ + allocator->FreeLocked(base, size); +} + +/*******************************************************************************/ +// Implementation: LockedPoolManager +// +LockedPoolManager::LockedPoolManager(std::unique_ptr allocator): + LockedPool(std::move(allocator), &LockedPoolManager::LockingFailed) +{ +} + +bool LockedPoolManager::LockingFailed() +{ + // TODO: log something but how? without including util.h + return true; +} + +void LockedPoolManager::CreateInstance() +{ + // Using a local static instance guarantees that the object is initialized + // when it's first needed and also deinitialized after all objects that use + // it are done with it. I can think of one unlikely scenario where we may + // have a static deinitialization order/problem, but the check in + // LockedPoolManagerBase's destructor helps us detect if that ever happens. +#ifdef WIN32 + std::unique_ptr allocator(new Win32LockedPageAllocator()); +#else + std::unique_ptr allocator(new PosixLockedPageAllocator()); +#endif + static LockedPoolManager instance(std::move(allocator)); + LockedPoolManager::_instance = &instance; +} diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h new file mode 100644 index 000000000..526c17a73 --- /dev/null +++ b/src/support/lockedpool.h @@ -0,0 +1,251 @@ +// Copyright (c) 2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H +#define BITCOIN_SUPPORT_LOCKEDPOOL_H + +#include +#include +#include +#include +#include + +/** + * OS-dependent allocation and deallocation of locked/pinned memory pages. + * Abstract base class. + */ +class LockedPageAllocator +{ +public: + virtual ~LockedPageAllocator() {} + /** Allocate and lock memory pages. + * If len is not a multiple of the system page size, it is rounded up. + * Returns 0 in case of allocation failure. + * + * If locking the memory pages could not be accomplished it will still + * return the memory, however the lockingSuccess flag will be false. + * lockingSuccess is undefined if the allocation fails. + */ + virtual void* AllocateLocked(size_t len, bool *lockingSuccess) = 0; + + /** Unlock and free memory pages. + * Clear the memory before unlocking. + */ + virtual void FreeLocked(void* addr, size_t len) = 0; + + /** Get the total limit on the amount of memory that may be locked by this + * process, in bytes. Return size_t max if there is no limit or the limit + * is unknown. Return 0 if no memory can be locked at all. + */ + virtual size_t GetLimit() = 0; +}; + +/* An arena manages a contiguous region of memory by dividing it into + * chunks. + */ +class Arena +{ +public: + Arena(void *base, size_t size, size_t alignment); + virtual ~Arena(); + + /** A chunk of memory. + */ + struct Chunk + { + /** Most significant bit of size_t. This is used to mark + * in-usedness of chunk. + */ + const static size_t SIZE_MSB = 1LLU << ((sizeof(size_t)*8)-1); + /** Maximum size of a chunk */ + const static size_t MAX_SIZE = SIZE_MSB - 1; + + Chunk(size_t size_in, bool used_in): + size(size_in | (used_in ? SIZE_MSB : 0)) {} + + bool isInUse() const { return size & SIZE_MSB; } + void setInUse(bool used_in) { size = (size & ~SIZE_MSB) | (used_in ? SIZE_MSB : 0); } + size_t getSize() const { return size & ~SIZE_MSB; } + void setSize(size_t size_in) { size = (size & SIZE_MSB) | size_in; } + private: + size_t size; + }; + /** Memory statistics. */ + struct Stats + { + size_t used; + size_t free; + size_t total; + size_t chunks_used; + size_t chunks_free; + }; + + /** Allocate size bytes from this arena. + * Returns pointer on success, or 0 if memory is full or + * the application tried to allocate 0 bytes. + */ + void* alloc(size_t size); + + /** Free a previously allocated chunk of memory. + * Freeing the zero pointer has no effect. + * Raises std::runtime_error in case of error. + */ + void free(void *ptr); + + /** Get arena usage statistics */ + Stats stats() const; + +#ifdef ARENA_DEBUG + void walk() const; +#endif + + /** Return whether a pointer points inside this arena. + * This returns base <= ptr < (base+size) so only use it for (inclusive) + * chunk starting addresses. + */ + bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; } +private: + Arena(const Arena& other) = delete; // non construction-copyable + Arena& operator=(const Arena&) = delete; // non copyable + + /** Map of chunk address to chunk information. This class makes use of the + * sorted order to merge previous and next chunks during deallocation. + */ + std::map chunks; + /** Base address of arena */ + char* base; + /** End address of arena */ + char* end; + /** Minimum chunk alignment */ + size_t alignment; +}; + +/** Pool for locked memory chunks. + * + * To avoid sensitive key data from being swapped to disk, the memory in this pool + * is locked/pinned. + * + * An arena manages a contiguous region of memory. The pool starts out with one arena + * but can grow to multiple arenas if the need arises. + * + * Unlike a normal C heap, the administrative structures are seperate from the managed + * memory. This has been done as the sizes and bases of objects are not in themselves sensitive + * information, as to conserve precious locked memory. In some operating systems + * the amount of memory that can be locked is small. + */ +class LockedPool +{ +public: + /** Size of one arena of locked memory. This is a compromise. + * Do not set this too low, as managing many arenas will increase + * allocation and deallocation overhead. Setting it too high allocates + * more locked memory from the OS than strictly necessary. + */ + static const size_t ARENA_SIZE = 256*1024; + /** Chunk alignment. Another compromise. Setting this too high will waste + * memory, setting it too low will facilitate fragmentation. + */ + static const size_t ARENA_ALIGN = 16; + + /** Callback when allocation succeeds but locking fails. + */ + typedef bool (*LockingFailed_Callback)(); + + /** Memory statistics. */ + struct Stats + { + size_t used; + size_t free; + size_t total; + size_t locked; + size_t chunks_used; + size_t chunks_free; + }; + + /** Create a new LockedPool. This takes ownership of the MemoryPageLocker, + * you can only instantiate this with LockedPool(std::move(...)). + * + * The second argument is an optional callback when locking a newly allocated arena failed. + * If this callback is provided and returns false, the allocation fails (hard fail), if + * it returns true the allocation proceeds, but it could warn. + */ + LockedPool(std::unique_ptr allocator, LockingFailed_Callback lf_cb_in = 0); + ~LockedPool(); + + /** Allocate size bytes from this arena. + * Returns pointer on success, or 0 if memory is full or + * the application tried to allocate 0 bytes. + */ + void* alloc(size_t size); + + /** Free a previously allocated chunk of memory. + * Freeing the zero pointer has no effect. + * Raises std::runtime_error in case of error. + */ + void free(void *ptr); + + /** Get pool usage statistics */ + Stats stats() const; +private: + LockedPool(const LockedPool& other) = delete; // non construction-copyable + LockedPool& operator=(const LockedPool&) = delete; // non copyable + + std::unique_ptr allocator; + + /** Create an arena from locked pages */ + class LockedPageArena: public Arena + { + public: + LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align); + ~LockedPageArena(); + private: + void *base; + size_t size; + LockedPageAllocator *allocator; + }; + + bool new_arena(size_t size, size_t align); + + std::list arenas; + LockingFailed_Callback lf_cb; + size_t cumulative_bytes_locked; + /** Mutex protects access to this pool's data structures, including arenas. + */ + mutable std::mutex mutex; +}; + +/** + * Singleton class to keep track of locked (ie, non-swappable) memory, for use in + * std::allocator templates. + * + * Some implementations of the STL allocate memory in some constructors (i.e., see + * MSVC's vector implementation where it allocates 1 byte of memory in the allocator.) + * Due to the unpredictable order of static initializers, we have to make sure the + * LockedPoolManager instance exists before any other STL-based objects that use + * secure_allocator are created. So instead of having LockedPoolManager also be + * static-initialized, it is created on demand. + */ +class LockedPoolManager : public LockedPool +{ +public: + /** Return the current instance, or create it once */ + static LockedPoolManager& Instance() + { + std::call_once(LockedPoolManager::init_flag, LockedPoolManager::CreateInstance); + return *LockedPoolManager::_instance; + } + +private: + LockedPoolManager(std::unique_ptr allocator); + + /** Create a new LockedPoolManager specialized to the OS */ + static void CreateInstance(); + /** Called when locking fails, warn the user here */ + static bool LockingFailed(); + + static LockedPoolManager* _instance; + static std::once_flag init_flag; +}; + +#endif // BITCOIN_SUPPORT_LOCKEDPOOL_H diff --git a/src/support/pagelocker.cpp b/src/support/pagelocker.cpp deleted file mode 100644 index 7cea2d88c..000000000 --- a/src/support/pagelocker.cpp +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright (c) 2009-2015 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#include "support/pagelocker.h" - -#if defined(HAVE_CONFIG_H) -#include "config/bitcoin-config.h" -#endif - -#ifdef WIN32 -#ifdef _WIN32_WINNT -#undef _WIN32_WINNT -#endif -#define _WIN32_WINNT 0x0501 -#define WIN32_LEAN_AND_MEAN 1 -#ifndef NOMINMAX -#define NOMINMAX -#endif -#include -// This is used to attempt to keep keying material out of swap -// Note that VirtualLock does not provide this as a guarantee on Windows, -// but, in practice, memory that has been VirtualLock'd almost never gets written to -// the pagefile except in rare circumstances where memory is extremely low. -#else -#include -#include // for PAGESIZE -#include // for sysconf -#endif - -LockedPageManager* LockedPageManager::_instance = NULL; -boost::once_flag LockedPageManager::init_flag = BOOST_ONCE_INIT; - -/** Determine system page size in bytes */ -static inline size_t GetSystemPageSize() -{ - size_t page_size; -#if defined(WIN32) - SYSTEM_INFO sSysInfo; - GetSystemInfo(&sSysInfo); - page_size = sSysInfo.dwPageSize; -#elif defined(PAGESIZE) // defined in limits.h - page_size = PAGESIZE; -#else // assume some POSIX OS - page_size = sysconf(_SC_PAGESIZE); -#endif - return page_size; -} - -bool MemoryPageLocker::Lock(const void* addr, size_t len) -{ -#ifdef WIN32 - return VirtualLock(const_cast(addr), len) != 0; -#else - return mlock(addr, len) == 0; -#endif -} - -bool MemoryPageLocker::Unlock(const void* addr, size_t len) -{ -#ifdef WIN32 - return VirtualUnlock(const_cast(addr), len) != 0; -#else - return munlock(addr, len) == 0; -#endif -} - -LockedPageManager::LockedPageManager() : LockedPageManagerBase(GetSystemPageSize()) -{ -} diff --git a/src/support/pagelocker.h b/src/support/pagelocker.h deleted file mode 100644 index 042144fad..000000000 --- a/src/support/pagelocker.h +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2015 The Bitcoin Core developers -// Distributed under the MIT software license, see the accompanying -// file COPYING or http://www.opensource.org/licenses/mit-license.php. - -#ifndef BITCOIN_SUPPORT_PAGELOCKER_H -#define BITCOIN_SUPPORT_PAGELOCKER_H - -#include "support/cleanse.h" - -#include - -#include -#include - -/** - * Thread-safe class to keep track of locked (ie, non-swappable) memory pages. - * - * Memory locks do not stack, that is, pages which have been locked several times by calls to mlock() - * will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when - * those functions are used naively. This class simulates stacking memory locks by keeping a counter per page. - * - * @note By using a map from each page base address to lock count, this class is optimized for - * small objects that span up to a few pages, mostly smaller than a page. To support large allocations, - * something like an interval tree would be the preferred data structure. - */ -template -class LockedPageManagerBase -{ -public: - LockedPageManagerBase(size_t _page_size) : page_size(_page_size) - { - // Determine bitmask for extracting page from address - assert(!(_page_size & (_page_size - 1))); // size must be power of two - page_mask = ~(_page_size - 1); - } - - ~LockedPageManagerBase() - { - } - - - // For all pages in affected range, increase lock count - void LockRange(void* p, size_t size) - { - boost::mutex::scoped_lock lock(mutex); - if (!size) - return; - const size_t base_addr = reinterpret_cast(p); - const size_t start_page = base_addr & page_mask; - const size_t end_page = (base_addr + size - 1) & page_mask; - for (size_t page = start_page; page <= end_page; page += page_size) { - Histogram::iterator it = histogram.find(page); - if (it == histogram.end()) // Newly locked page - { - locker.Lock(reinterpret_cast(page), page_size); - histogram.insert(std::make_pair(page, 1)); - } else // Page was already locked; increase counter - { - it->second += 1; - } - } - } - - // For all pages in affected range, decrease lock count - void UnlockRange(void* p, size_t size) - { - boost::mutex::scoped_lock lock(mutex); - if (!size) - return; - const size_t base_addr = reinterpret_cast(p); - const size_t start_page = base_addr & page_mask; - const size_t end_page = (base_addr + size - 1) & page_mask; - for (size_t page = start_page; page <= end_page; page += page_size) { - Histogram::iterator it = histogram.find(page); - assert(it != histogram.end()); // Cannot unlock an area that was not locked - // Decrease counter for page, when it is zero, the page will be unlocked - it->second -= 1; - if (it->second == 0) // Nothing on the page anymore that keeps it locked - { - // Unlock page and remove the count from histogram - locker.Unlock(reinterpret_cast(page), page_size); - histogram.erase(it); - } - } - } - - // Get number of locked pages for diagnostics - int GetLockedPageCount() - { - boost::mutex::scoped_lock lock(mutex); - return histogram.size(); - } - -private: - Locker locker; - boost::mutex mutex; - size_t page_size, page_mask; - // map of page base address to lock count - typedef std::map Histogram; - Histogram histogram; -}; - - -/** - * OS-dependent memory page locking/unlocking. - * Defined as policy class to make stubbing for test possible. - */ -class MemoryPageLocker -{ -public: - /** Lock memory pages. - * addr and len must be a multiple of the system page size - */ - bool Lock(const void* addr, size_t len); - /** Unlock memory pages. - * addr and len must be a multiple of the system page size - */ - bool Unlock(const void* addr, size_t len); -}; - -/** - * Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in - * std::allocator templates. - * - * Some implementations of the STL allocate memory in some constructors (i.e., see - * MSVC's vector implementation where it allocates 1 byte of memory in the allocator.) - * Due to the unpredictable order of static initializers, we have to make sure the - * LockedPageManager instance exists before any other STL-based objects that use - * secure_allocator are created. So instead of having LockedPageManager also be - * static-initialized, it is created on demand. - */ -class LockedPageManager : public LockedPageManagerBase -{ -public: - static LockedPageManager& Instance() - { - boost::call_once(LockedPageManager::CreateInstance, LockedPageManager::init_flag); - return *LockedPageManager::_instance; - } - -private: - LockedPageManager(); - - static void CreateInstance() - { - // Using a local static instance guarantees that the object is initialized - // when it's first needed and also deinitialized after all objects that use - // it are done with it. I can think of one unlikely scenario where we may - // have a static deinitialization order/problem, but the check in - // LockedPageManagerBase's destructor helps us detect if that ever happens. - static LockedPageManager instance; - LockedPageManager::_instance = &instance; - } - - static LockedPageManager* _instance; - static boost::once_flag init_flag; -}; - -#endif // BITCOIN_SUPPORT_PAGELOCKER_H -- cgit v1.2.3 From 4a9f3c50ccc8a467fda84bfd8ff7cfd831edb7c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Pavel=20Jan=C3=ADk?= Date: Wed, 2 Nov 2016 14:18:19 +0100 Subject: Do not shadow variable, use deprecated MAP_ANON if MAP_ANONYMOUS is not defined. --- src/support/lockedpool.cpp | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) (limited to 'src/support') diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp index 63050f006..813869a13 100644 --- a/src/support/lockedpool.cpp +++ b/src/support/lockedpool.cpp @@ -64,14 +64,14 @@ void* Arena::alloc(size_t size) for (auto& chunk: chunks) { if (!chunk.second.isInUse() && size <= chunk.second.getSize()) { - char* base = chunk.first; + char* _base = chunk.first; size_t leftover = chunk.second.getSize() - size; if (leftover > 0) { // Split chunk - chunks.emplace(base + size, Chunk(leftover, false)); + chunks.emplace(_base + size, Chunk(leftover, false)); chunk.second.setSize(size); } chunk.second.setInUse(true); - return reinterpret_cast(base); + return reinterpret_cast(_base); } } return nullptr; @@ -224,6 +224,13 @@ PosixLockedPageAllocator::PosixLockedPageAllocator() page_size = sysconf(_SC_PAGESIZE); #endif } + +// Some systems (at least OS X) do not define MAP_ANONYMOUS yet and define +// MAP_ANON which is deprecated +#ifndef MAP_ANONYMOUS +#define MAP_ANONYMOUS MAP_ANON +#endif + void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess) { void *addr; -- cgit v1.2.3 From 0b59f80625923978583efca08f8e763ea1710bb2 Mon Sep 17 00:00:00 2001 From: Kaz Wesley Date: Wed, 2 Nov 2016 14:11:07 -0700 Subject: LockedPool: fix explosion for illegal-sized alloc Check for unreasonable alloc size in LockedPool rather than lancing through new Arenas until we improbably find one worthy of the quixotic request or the system can support no more Arenas. --- src/support/lockedpool.cpp | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'src/support') diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp index 813869a13..be5aac822 100644 --- a/src/support/lockedpool.cpp +++ b/src/support/lockedpool.cpp @@ -276,6 +276,11 @@ LockedPool::~LockedPool() void* LockedPool::alloc(size_t size) { std::lock_guard lock(mutex); + + // Don't handle impossible sizes + if (size == 0 || size > ARENA_SIZE) + return nullptr; + // Try allocating from each current arena for (auto &arena: arenas) { void *addr = arena.alloc(size); -- cgit v1.2.3 From b3ddc5e76f457d504b05273429e06e684e76f5de Mon Sep 17 00:00:00 2001 From: Kaz Wesley Date: Wed, 2 Nov 2016 14:09:03 -0700 Subject: LockedPool: avoid quadratic-time allocation Use separate maps for used/free chunks to avoid linear scan through alloced chunks for each alloc. --- src/support/lockedpool.cpp | 122 +++++++++++++++++++++------------------------ src/support/lockedpool.h | 24 +-------- 2 files changed, 58 insertions(+), 88 deletions(-) (limited to 'src/support') diff --git a/src/support/lockedpool.cpp b/src/support/lockedpool.cpp index be5aac822..01273c979 100644 --- a/src/support/lockedpool.cpp +++ b/src/support/lockedpool.cpp @@ -26,6 +26,8 @@ #include // for sysconf #endif +#include + LockedPoolManager* LockedPoolManager::_instance = NULL; std::once_flag LockedPoolManager::init_flag; @@ -45,7 +47,7 @@ Arena::Arena(void *base_in, size_t size_in, size_t alignment_in): base(static_cast(base_in)), end(static_cast(base_in) + size_in), alignment(alignment_in) { // Start with one free chunk that covers the entire arena - chunks.emplace(base, Chunk(size_in, false)); + chunks_free.emplace(base, size_in); } Arena::~Arena() @@ -57,24 +59,30 @@ void* Arena::alloc(size_t size) // Round to next multiple of alignment size = align_up(size, alignment); - // Don't handle zero-sized chunks, or those bigger than MAX_SIZE - if (size == 0 || size >= Chunk::MAX_SIZE) { + // Don't handle zero-sized chunks + if (size == 0) return nullptr; - } - for (auto& chunk: chunks) { - if (!chunk.second.isInUse() && size <= chunk.second.getSize()) { - char* _base = chunk.first; - size_t leftover = chunk.second.getSize() - size; - if (leftover > 0) { // Split chunk - chunks.emplace(_base + size, Chunk(leftover, false)); - chunk.second.setSize(size); - } - chunk.second.setInUse(true); - return reinterpret_cast(_base); - } + // Pick a large enough free-chunk + auto it = std::find_if(chunks_free.begin(), chunks_free.end(), + [=](const std::map::value_type& chunk){ return chunk.second >= size; }); + if (it == chunks_free.end()) + return nullptr; + + // Create the used-chunk, taking its space from the end of the free-chunk + auto alloced = chunks_used.emplace(it->first + it->second - size, size).first; + if (!(it->second -= size)) + chunks_free.erase(it); + return reinterpret_cast(alloced->first); +} + +/* extend the Iterator if other begins at its end */ +template bool extend(Iterator it, const Pair& other) { + if (it->first + it->second == other.first) { + it->second += other.second; + return true; } - return nullptr; + return false; } void Arena::free(void *ptr) @@ -83,65 +91,49 @@ void Arena::free(void *ptr) if (ptr == nullptr) { return; } - auto i = chunks.find(static_cast(ptr)); - if (i == chunks.end() || !i->second.isInUse()) { - throw std::runtime_error("Arena: invalid or double free"); - } - i->second.setInUse(false); - - if (i != chunks.begin()) { // Absorb into previous chunk if exists and free - auto prev = i; - --prev; - if (!prev->second.isInUse()) { - // Absorb current chunk size into previous chunk. - prev->second.setSize(prev->second.getSize() + i->second.getSize()); - // Erase current chunk. Erasing does not invalidate current - // iterators for a map, except for that pointing to the object - // itself, which will be overwritten in the next statement. - chunks.erase(i); - // From here on, the previous chunk is our current chunk. - i = prev; - } - } - auto next = i; - ++next; - if (next != chunks.end()) { // Absorb next chunk if exists and free - if (!next->second.isInUse()) { - // Absurb next chunk size into current chunk - i->second.setSize(i->second.getSize() + next->second.getSize()); - // Erase next chunk. - chunks.erase(next); - } + // Remove chunk from used map + auto i = chunks_used.find(static_cast(ptr)); + if (i == chunks_used.end()) { + throw std::runtime_error("Arena: invalid or double free"); } + auto freed = *i; + chunks_used.erase(i); + + // Add space to free map, coalescing contiguous chunks + auto next = chunks_free.upper_bound(freed.first); + auto prev = (next == chunks_free.begin()) ? chunks_free.end() : std::prev(next); + if (prev == chunks_free.end() || !extend(prev, freed)) + prev = chunks_free.emplace_hint(next, freed); + if (next != chunks_free.end() && extend(prev, *next)) + chunks_free.erase(next); } Arena::Stats Arena::stats() const { - Arena::Stats r; - r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0; - for (const auto& chunk: chunks) { - if (chunk.second.isInUse()) { - r.used += chunk.second.getSize(); - r.chunks_used += 1; - } else { - r.free += chunk.second.getSize(); - r.chunks_free += 1; - } - r.total += chunk.second.getSize(); - } + Arena::Stats r{ 0, 0, 0, chunks_used.size(), chunks_free.size() }; + for (const auto& chunk: chunks_used) + r.used += chunk.second; + for (const auto& chunk: chunks_free) + r.free += chunk.second; + r.total = r.used + r.free; return r; } #ifdef ARENA_DEBUG +void printchunk(char* base, size_t sz, bool used) { + std::cout << + "0x" << std::hex << std::setw(16) << std::setfill('0') << base << + " 0x" << std::hex << std::setw(16) << std::setfill('0') << sz << + " 0x" << used << std::endl; +} void Arena::walk() const { - for (const auto& chunk: chunks) { - std::cout << - "0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.first << - " 0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.second.getSize() << - " 0x" << chunk.second.isInUse() << std::endl; - } + for (const auto& chunk: chunks_used) + printchunk(chunk.first, chunk.second, true); + std::cout << std::endl; + for (const auto& chunk: chunks_free) + printchunk(chunk.first, chunk.second, false); std::cout << std::endl; } #endif @@ -312,9 +304,7 @@ void LockedPool::free(void *ptr) LockedPool::Stats LockedPool::stats() const { std::lock_guard lock(mutex); - LockedPool::Stats r; - r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0; - r.locked = cumulative_bytes_locked; + LockedPool::Stats r{0, 0, 0, cumulative_bytes_locked, 0, 0}; for (const auto &arena: arenas) { Arena::Stats i = arena.stats(); r.used += i.used; diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h index 526c17a73..340341543 100644 --- a/src/support/lockedpool.h +++ b/src/support/lockedpool.h @@ -50,27 +50,6 @@ public: Arena(void *base, size_t size, size_t alignment); virtual ~Arena(); - /** A chunk of memory. - */ - struct Chunk - { - /** Most significant bit of size_t. This is used to mark - * in-usedness of chunk. - */ - const static size_t SIZE_MSB = 1LLU << ((sizeof(size_t)*8)-1); - /** Maximum size of a chunk */ - const static size_t MAX_SIZE = SIZE_MSB - 1; - - Chunk(size_t size_in, bool used_in): - size(size_in | (used_in ? SIZE_MSB : 0)) {} - - bool isInUse() const { return size & SIZE_MSB; } - void setInUse(bool used_in) { size = (size & ~SIZE_MSB) | (used_in ? SIZE_MSB : 0); } - size_t getSize() const { return size & ~SIZE_MSB; } - void setSize(size_t size_in) { size = (size & SIZE_MSB) | size_in; } - private: - size_t size; - }; /** Memory statistics. */ struct Stats { @@ -112,7 +91,8 @@ private: /** Map of chunk address to chunk information. This class makes use of the * sorted order to merge previous and next chunks during deallocation. */ - std::map chunks; + std::map chunks_free; + std::map chunks_used; /** Base address of arena */ char* base; /** End address of arena */ -- cgit v1.2.3 From 15fa95d7e576041a20d47907356a2be5a327b4ca Mon Sep 17 00:00:00 2001 From: fsb4000 Date: Mon, 28 Nov 2016 15:19:05 +0700 Subject: Fix some typos --- src/support/lockedpool.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/support') diff --git a/src/support/lockedpool.h b/src/support/lockedpool.h index 340341543..f5212bc26 100644 --- a/src/support/lockedpool.h +++ b/src/support/lockedpool.h @@ -109,7 +109,7 @@ private: * An arena manages a contiguous region of memory. The pool starts out with one arena * but can grow to multiple arenas if the need arises. * - * Unlike a normal C heap, the administrative structures are seperate from the managed + * Unlike a normal C heap, the administrative structures are separate from the managed * memory. This has been done as the sizes and bases of objects are not in themselves sensitive * information, as to conserve precious locked memory. In some operating systems * the amount of memory that can be locked is small. -- cgit v1.2.3 From e5534d2f0198e341c40e4ffa606fb65f3cd06863 Mon Sep 17 00:00:00 2001 From: Karl-Johan Alm Date: Tue, 20 Dec 2016 15:59:42 +0900 Subject: Added std::unique_ptr<> wrappers with deleters for libevent modules. --- src/support/events.h | 55 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) create mode 100644 src/support/events.h (limited to 'src/support') diff --git a/src/support/events.h b/src/support/events.h new file mode 100644 index 000000000..16378086f --- /dev/null +++ b/src/support/events.h @@ -0,0 +1,55 @@ +// Copyright (c) 2016 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_SUPPORT_EVENTS_H +#define BITCOIN_SUPPORT_EVENTS_H + +#include + +#include +#include + +#define MAKE_RAII(type) \ +/* deleter */\ +struct type##_deleter {\ + void operator()(struct type* ob) {\ + type##_free(ob);\ + }\ +};\ +/* unique ptr typedef */\ +typedef std::unique_ptr raii_##type + +MAKE_RAII(event_base); +MAKE_RAII(event); +MAKE_RAII(evhttp); +MAKE_RAII(evhttp_request); +MAKE_RAII(evhttp_connection); + +raii_event_base obtain_event_base() { + auto result = raii_event_base(event_base_new()); + if (!result.get()) + throw std::runtime_error("cannot create event_base"); + return result; +} + +raii_event obtain_event(struct event_base* base, evutil_socket_t s, short events, event_callback_fn cb, void* arg) { + return raii_event(event_new(base, s, events, cb, arg)); +} + +raii_evhttp obtain_evhttp(struct event_base* base) { + return raii_evhttp(evhttp_new(base)); +} + +raii_evhttp_request obtain_evhttp_request(void(*cb)(struct evhttp_request *, void *), void *arg) { + return raii_evhttp_request(evhttp_request_new(cb, arg)); +} + +raii_evhttp_connection obtain_evhttp_connection_base(struct event_base* base, std::string host, uint16_t port) { + auto result = raii_evhttp_connection(evhttp_connection_base_new(base, NULL, host.c_str(), port)); + if (!result.get()) + throw std::runtime_error("create connection failed"); + return result; +} + +#endif // BITCOIN_SUPPORT_EVENTS_H -- cgit v1.2.3 From 280a5599ebccbb54fb650f5e3939b0878b2f667a Mon Sep 17 00:00:00 2001 From: Karl-Johan Alm Date: Wed, 21 Dec 2016 13:43:49 +0900 Subject: Added some simple tests for the RAII-style events. --- src/support/events.h | 1 + 1 file changed, 1 insertion(+) (limited to 'src/support') diff --git a/src/support/events.h b/src/support/events.h index 16378086f..4f2f3cf9e 100644 --- a/src/support/events.h +++ b/src/support/events.h @@ -6,6 +6,7 @@ #define BITCOIN_SUPPORT_EVENTS_H #include +#include #include #include -- cgit v1.2.3 From 27765b6403cece54320374b37afb01a0cfe571c3 Mon Sep 17 00:00:00 2001 From: isle2983 Date: Sat, 31 Dec 2016 11:01:21 -0700 Subject: Increment MIT Licence copyright header year on files modified in 2016 Edited via: $ contrib/devtools/copyright_header.py update . --- src/support/allocators/secure.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src/support') diff --git a/src/support/allocators/secure.h b/src/support/allocators/secure.h index 67064314e..9daba86ef 100644 --- a/src/support/allocators/secure.h +++ b/src/support/allocators/secure.h @@ -1,5 +1,5 @@ // Copyright (c) 2009-2010 Satoshi Nakamoto -// Copyright (c) 2009-2015 The Bitcoin Core developers +// Copyright (c) 2009-2016 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -- cgit v1.2.3 From 28b8b8b6038c45ce2dc2e7dc11f88dc834ddd19a Mon Sep 17 00:00:00 2001 From: Thomas Snider Date: Thu, 23 Mar 2017 14:07:51 -0700 Subject: [wallet] Securely erase potentially sensitive keys/values Github-Pull: #10308 Rebased-From: 6c914ac176624468c66febdb1ad0e24ff2118a5f --- src/support/cleanse.h | 1 + 1 file changed, 1 insertion(+) (limited to 'src/support') diff --git a/src/support/cleanse.h b/src/support/cleanse.h index 3e02aa8fd..f020216c7 100644 --- a/src/support/cleanse.h +++ b/src/support/cleanse.h @@ -8,6 +8,7 @@ #include +// Attempt to overwrite data in the specified memory span. void memory_cleanse(void *ptr, size_t len); #endif // BITCOIN_SUPPORT_CLEANSE_H -- cgit v1.2.3