Browse Source
0.14444c673
bench: Add benchmark for lockedpool allocation/deallocation (Wladimir J. van der Laan)6567999
rpc: Add `getmemoryinfo` call (Wladimir J. van der Laan)4536148
support: Add LockedPool (Wladimir J. van der Laan)f4d1fc2
wallet: Get rid of LockObject and UnlockObject calls in key.h (Wladimir J. van der Laan)999e4c9
wallet: Change CCrypter to use vectors with secure allocator (Wladimir J. van der Laan)
Wladimir J. van der Laan
8 years ago
15 changed files with 966 additions and 410 deletions
@ -0,0 +1,47 @@
@@ -0,0 +1,47 @@
|
||||
// Copyright (c) 2016 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#include "bench.h" |
||||
|
||||
#include "support/lockedpool.h" |
||||
|
||||
#include <iostream> |
||||
#include <vector> |
||||
|
||||
#define ASIZE 2048 |
||||
#define BITER 5000 |
||||
#define MSIZE 2048 |
||||
|
||||
static void LockedPool(benchmark::State& state) |
||||
{ |
||||
void *synth_base = reinterpret_cast<void*>(0x08000000); |
||||
const size_t synth_size = 1024*1024; |
||||
Arena b(synth_base, synth_size, 16); |
||||
|
||||
std::vector<void*> addr; |
||||
for (int x=0; x<ASIZE; ++x) |
||||
addr.push_back(0); |
||||
uint32_t s = 0x12345678; |
||||
while (state.KeepRunning()) { |
||||
for (int x=0; x<BITER; ++x) { |
||||
int idx = s & (addr.size()-1); |
||||
if (s & 0x80000000) { |
||||
b.free(addr[idx]); |
||||
addr[idx] = 0; |
||||
} else if(!addr[idx]) { |
||||
addr[idx] = b.alloc((s >> 16) & (MSIZE-1)); |
||||
} |
||||
bool lsb = s & 1; |
||||
s >>= 1; |
||||
if (lsb) |
||||
s ^= 0xf00f00f0; // LFSR period 0xf7ffffe0
|
||||
} |
||||
} |
||||
for (void *ptr: addr) |
||||
b.free(ptr); |
||||
addr.clear(); |
||||
} |
||||
|
||||
BENCHMARK(LockedPool); |
||||
|
@ -0,0 +1,383 @@
@@ -0,0 +1,383 @@
|
||||
// Copyright (c) 2016 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#include "support/lockedpool.h" |
||||
#include "support/cleanse.h" |
||||
|
||||
#if defined(HAVE_CONFIG_H) |
||||
#include "config/bitcoin-config.h" |
||||
#endif |
||||
|
||||
#ifdef WIN32 |
||||
#ifdef _WIN32_WINNT |
||||
#undef _WIN32_WINNT |
||||
#endif |
||||
#define _WIN32_WINNT 0x0501 |
||||
#define WIN32_LEAN_AND_MEAN 1 |
||||
#ifndef NOMINMAX |
||||
#define NOMINMAX |
||||
#endif |
||||
#include <windows.h> |
||||
#else |
||||
#include <sys/mman.h> // for mmap |
||||
#include <sys/resource.h> // for getrlimit |
||||
#include <limits.h> // for PAGESIZE |
||||
#include <unistd.h> // for sysconf |
||||
#endif |
||||
|
||||
LockedPoolManager* LockedPoolManager::_instance = NULL; |
||||
std::once_flag LockedPoolManager::init_flag; |
||||
|
||||
/*******************************************************************************/ |
||||
// Utilities
|
||||
//
|
||||
/** Align up to power of 2 */ |
||||
static inline size_t align_up(size_t x, size_t align) |
||||
{ |
||||
return (x + align - 1) & ~(align - 1); |
||||
} |
||||
|
||||
/*******************************************************************************/ |
||||
// Implementation: Arena
|
||||
|
||||
Arena::Arena(void *base_in, size_t size_in, size_t alignment_in): |
||||
base(static_cast<char*>(base_in)), end(static_cast<char*>(base_in) + size_in), alignment(alignment_in) |
||||
{ |
||||
// Start with one free chunk that covers the entire arena
|
||||
chunks.emplace(base, Chunk(size_in, false)); |
||||
} |
||||
|
||||
Arena::~Arena() |
||||
{ |
||||
} |
||||
|
||||
void* Arena::alloc(size_t size) |
||||
{ |
||||
// Round to next multiple of alignment
|
||||
size = align_up(size, alignment); |
||||
|
||||
// Don't handle zero-sized chunks, or those bigger than MAX_SIZE
|
||||
if (size == 0 || size >= Chunk::MAX_SIZE) { |
||||
return nullptr; |
||||
} |
||||
|
||||
for (auto& chunk: chunks) { |
||||
if (!chunk.second.isInUse() && size <= chunk.second.getSize()) { |
||||
char* base = chunk.first; |
||||
size_t leftover = chunk.second.getSize() - size; |
||||
if (leftover > 0) { // Split chunk
|
||||
chunks.emplace(base + size, Chunk(leftover, false)); |
||||
chunk.second.setSize(size); |
||||
} |
||||
chunk.second.setInUse(true); |
||||
return reinterpret_cast<void*>(base); |
||||
} |
||||
} |
||||
return nullptr; |
||||
} |
||||
|
||||
void Arena::free(void *ptr) |
||||
{ |
||||
// Freeing the NULL pointer is OK.
|
||||
if (ptr == nullptr) { |
||||
return; |
||||
} |
||||
auto i = chunks.find(static_cast<char*>(ptr)); |
||||
if (i == chunks.end() || !i->second.isInUse()) { |
||||
throw std::runtime_error("Arena: invalid or double free"); |
||||
} |
||||
|
||||
i->second.setInUse(false); |
||||
|
||||
if (i != chunks.begin()) { // Absorb into previous chunk if exists and free
|
||||
auto prev = i; |
||||
--prev; |
||||
if (!prev->second.isInUse()) { |
||||
// Absorb current chunk size into previous chunk.
|
||||
prev->second.setSize(prev->second.getSize() + i->second.getSize()); |
||||
// Erase current chunk. Erasing does not invalidate current
|
||||
// iterators for a map, except for that pointing to the object
|
||||
// itself, which will be overwritten in the next statement.
|
||||
chunks.erase(i); |
||||
// From here on, the previous chunk is our current chunk.
|
||||
i = prev; |
||||
} |
||||
} |
||||
auto next = i; |
||||
++next; |
||||
if (next != chunks.end()) { // Absorb next chunk if exists and free
|
||||
if (!next->second.isInUse()) { |
||||
// Absurb next chunk size into current chunk
|
||||
i->second.setSize(i->second.getSize() + next->second.getSize()); |
||||
// Erase next chunk.
|
||||
chunks.erase(next); |
||||
} |
||||
} |
||||
} |
||||
|
||||
Arena::Stats Arena::stats() const |
||||
{ |
||||
Arena::Stats r; |
||||
r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0; |
||||
for (const auto& chunk: chunks) { |
||||
if (chunk.second.isInUse()) { |
||||
r.used += chunk.second.getSize(); |
||||
r.chunks_used += 1; |
||||
} else { |
||||
r.free += chunk.second.getSize(); |
||||
r.chunks_free += 1; |
||||
} |
||||
r.total += chunk.second.getSize(); |
||||
} |
||||
return r; |
||||
} |
||||
|
||||
#ifdef ARENA_DEBUG |
||||
void Arena::walk() const |
||||
{ |
||||
for (const auto& chunk: chunks) { |
||||
std::cout << |
||||
"0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.first << |
||||
" 0x" << std::hex << std::setw(16) << std::setfill('0') << chunk.second.getSize() << |
||||
" 0x" << chunk.second.isInUse() << std::endl; |
||||
} |
||||
std::cout << std::endl; |
||||
} |
||||
#endif |
||||
|
||||
/*******************************************************************************/ |
||||
// Implementation: Win32LockedPageAllocator
|
||||
|
||||
#ifdef WIN32 |
||||
/** LockedPageAllocator specialized for Windows.
|
||||
*/ |
||||
class Win32LockedPageAllocator: public LockedPageAllocator |
||||
{ |
||||
public: |
||||
Win32LockedPageAllocator(); |
||||
void* AllocateLocked(size_t len, bool *lockingSuccess); |
||||
void FreeLocked(void* addr, size_t len); |
||||
size_t GetLimit(); |
||||
private: |
||||
size_t page_size; |
||||
}; |
||||
|
||||
Win32LockedPageAllocator::Win32LockedPageAllocator() |
||||
{ |
||||
// Determine system page size in bytes
|
||||
SYSTEM_INFO sSysInfo; |
||||
GetSystemInfo(&sSysInfo); |
||||
page_size = sSysInfo.dwPageSize; |
||||
} |
||||
void *Win32LockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess) |
||||
{ |
||||
len = align_up(len, page_size); |
||||
void *addr = VirtualAlloc(nullptr, len, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE); |
||||
if (addr) { |
||||
// VirtualLock is used to attempt to keep keying material out of swap. Note
|
||||
// that it does not provide this as a guarantee, but, in practice, memory
|
||||
// that has been VirtualLock'd almost never gets written to the pagefile
|
||||
// except in rare circumstances where memory is extremely low.
|
||||
*lockingSuccess = VirtualLock(const_cast<void*>(addr), len) != 0; |
||||
} |
||||
return addr; |
||||
} |
||||
void Win32LockedPageAllocator::FreeLocked(void* addr, size_t len) |
||||
{ |
||||
len = align_up(len, page_size); |
||||
memory_cleanse(addr, len); |
||||
VirtualUnlock(const_cast<void*>(addr), len); |
||||
} |
||||
|
||||
size_t Win32LockedPageAllocator::GetLimit() |
||||
{ |
||||
// TODO is there a limit on windows, how to get it?
|
||||
return std::numeric_limits<size_t>::max(); |
||||
} |
||||
#endif |
||||
|
||||
/*******************************************************************************/ |
||||
// Implementation: PosixLockedPageAllocator
|
||||
|
||||
#ifndef WIN32 |
||||
/** LockedPageAllocator specialized for OSes that don't try to be
|
||||
* special snowflakes. |
||||
*/ |
||||
class PosixLockedPageAllocator: public LockedPageAllocator |
||||
{ |
||||
public: |
||||
PosixLockedPageAllocator(); |
||||
void* AllocateLocked(size_t len, bool *lockingSuccess); |
||||
void FreeLocked(void* addr, size_t len); |
||||
size_t GetLimit(); |
||||
private: |
||||
size_t page_size; |
||||
}; |
||||
|
||||
PosixLockedPageAllocator::PosixLockedPageAllocator() |
||||
{ |
||||
// Determine system page size in bytes
|
||||
#if defined(PAGESIZE) // defined in limits.h
|
||||
page_size = PAGESIZE; |
||||
#else // assume some POSIX OS
|
||||
page_size = sysconf(_SC_PAGESIZE); |
||||
#endif |
||||
} |
||||
void *PosixLockedPageAllocator::AllocateLocked(size_t len, bool *lockingSuccess) |
||||
{ |
||||
void *addr; |
||||
len = align_up(len, page_size); |
||||
addr = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); |
||||
if (addr) { |
||||
*lockingSuccess = mlock(addr, len) == 0; |
||||
} |
||||
return addr; |
||||
} |
||||
void PosixLockedPageAllocator::FreeLocked(void* addr, size_t len) |
||||
{ |
||||
len = align_up(len, page_size); |
||||
memory_cleanse(addr, len); |
||||
munlock(addr, len); |
||||
munmap(addr, len); |
||||
} |
||||
size_t PosixLockedPageAllocator::GetLimit() |
||||
{ |
||||
#ifdef RLIMIT_MEMLOCK |
||||
struct rlimit rlim; |
||||
if (getrlimit(RLIMIT_MEMLOCK, &rlim) == 0) { |
||||
if (rlim.rlim_cur != RLIM_INFINITY) { |
||||
return rlim.rlim_cur; |
||||
} |
||||
} |
||||
#endif |
||||
return std::numeric_limits<size_t>::max(); |
||||
} |
||||
#endif |
||||
|
||||
/*******************************************************************************/ |
||||
// Implementation: LockedPool
|
||||
|
||||
LockedPool::LockedPool(std::unique_ptr<LockedPageAllocator> allocator_in, LockingFailed_Callback lf_cb_in): |
||||
allocator(std::move(allocator_in)), lf_cb(lf_cb_in), cumulative_bytes_locked(0) |
||||
{ |
||||
} |
||||
|
||||
LockedPool::~LockedPool() |
||||
{ |
||||
} |
||||
void* LockedPool::alloc(size_t size) |
||||
{ |
||||
std::lock_guard<std::mutex> lock(mutex); |
||||
// Try allocating from each current arena
|
||||
for (auto &arena: arenas) { |
||||
void *addr = arena.alloc(size); |
||||
if (addr) { |
||||
return addr; |
||||
} |
||||
} |
||||
// If that fails, create a new one
|
||||
if (new_arena(ARENA_SIZE, ARENA_ALIGN)) { |
||||
return arenas.back().alloc(size); |
||||
} |
||||
return nullptr; |
||||
} |
||||
|
||||
void LockedPool::free(void *ptr) |
||||
{ |
||||
std::lock_guard<std::mutex> lock(mutex); |
||||
// TODO we can do better than this linear search by keeping a map of arena
|
||||
// extents to arena, and looking up the address.
|
||||
for (auto &arena: arenas) { |
||||
if (arena.addressInArena(ptr)) { |
||||
arena.free(ptr); |
||||
return; |
||||
} |
||||
} |
||||
throw std::runtime_error("LockedPool: invalid address not pointing to any arena"); |
||||
} |
||||
|
||||
LockedPool::Stats LockedPool::stats() const |
||||
{ |
||||
std::lock_guard<std::mutex> lock(mutex); |
||||
LockedPool::Stats r; |
||||
r.used = r.free = r.total = r.chunks_used = r.chunks_free = 0; |
||||
r.locked = cumulative_bytes_locked; |
||||
for (const auto &arena: arenas) { |
||||
Arena::Stats i = arena.stats(); |
||||
r.used += i.used; |
||||
r.free += i.free; |
||||
r.total += i.total; |
||||
r.chunks_used += i.chunks_used; |
||||
r.chunks_free += i.chunks_free; |
||||
} |
||||
return r; |
||||
} |
||||
|
||||
bool LockedPool::new_arena(size_t size, size_t align) |
||||
{ |
||||
bool locked; |
||||
// If this is the first arena, handle this specially: Cap the upper size
|
||||
// by the process limit. This makes sure that the first arena will at least
|
||||
// be locked. An exception to this is if the process limit is 0:
|
||||
// in this case no memory can be locked at all so we'll skip past this logic.
|
||||
if (arenas.empty()) { |
||||
size_t limit = allocator->GetLimit(); |
||||
if (limit > 0) { |
||||
size = std::min(size, limit); |
||||
} |
||||
} |
||||
void *addr = allocator->AllocateLocked(size, &locked); |
||||
if (!addr) { |
||||
return false; |
||||
} |
||||
if (locked) { |
||||
cumulative_bytes_locked += size; |
||||
} else if (lf_cb) { // Call the locking-failed callback if locking failed
|
||||
if (!lf_cb()) { // If the callback returns false, free the memory and fail, otherwise consider the user warned and proceed.
|
||||
allocator->FreeLocked(addr, size); |
||||
return false; |
||||
} |
||||
} |
||||
arenas.emplace_back(allocator.get(), addr, size, align); |
||||
return true; |
||||
} |
||||
|
||||
LockedPool::LockedPageArena::LockedPageArena(LockedPageAllocator *allocator_in, void *base_in, size_t size_in, size_t align_in): |
||||
Arena(base_in, size_in, align_in), base(base_in), size(size_in), allocator(allocator_in) |
||||
{ |
||||
} |
||||
LockedPool::LockedPageArena::~LockedPageArena() |
||||
{ |
||||
allocator->FreeLocked(base, size); |
||||
} |
||||
|
||||
/*******************************************************************************/ |
||||
// Implementation: LockedPoolManager
|
||||
//
|
||||
LockedPoolManager::LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator): |
||||
LockedPool(std::move(allocator), &LockedPoolManager::LockingFailed) |
||||
{ |
||||
} |
||||
|
||||
bool LockedPoolManager::LockingFailed() |
||||
{ |
||||
// TODO: log something but how? without including util.h
|
||||
return true; |
||||
} |
||||
|
||||
void LockedPoolManager::CreateInstance() |
||||
{ |
||||
// Using a local static instance guarantees that the object is initialized
|
||||
// when it's first needed and also deinitialized after all objects that use
|
||||
// it are done with it. I can think of one unlikely scenario where we may
|
||||
// have a static deinitialization order/problem, but the check in
|
||||
// LockedPoolManagerBase's destructor helps us detect if that ever happens.
|
||||
#ifdef WIN32 |
||||
std::unique_ptr<LockedPageAllocator> allocator(new Win32LockedPageAllocator()); |
||||
#else |
||||
std::unique_ptr<LockedPageAllocator> allocator(new PosixLockedPageAllocator()); |
||||
#endif |
||||
static LockedPoolManager instance(std::move(allocator)); |
||||
LockedPoolManager::_instance = &instance; |
||||
} |
@ -0,0 +1,251 @@
@@ -0,0 +1,251 @@
|
||||
// Copyright (c) 2016 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#ifndef BITCOIN_SUPPORT_LOCKEDPOOL_H |
||||
#define BITCOIN_SUPPORT_LOCKEDPOOL_H |
||||
|
||||
#include <stdint.h> |
||||
#include <list> |
||||
#include <map> |
||||
#include <mutex> |
||||
#include <memory> |
||||
|
||||
/**
|
||||
* OS-dependent allocation and deallocation of locked/pinned memory pages. |
||||
* Abstract base class. |
||||
*/ |
||||
class LockedPageAllocator |
||||
{ |
||||
public: |
||||
virtual ~LockedPageAllocator() {} |
||||
/** Allocate and lock memory pages.
|
||||
* If len is not a multiple of the system page size, it is rounded up. |
||||
* Returns 0 in case of allocation failure. |
||||
* |
||||
* If locking the memory pages could not be accomplished it will still |
||||
* return the memory, however the lockingSuccess flag will be false. |
||||
* lockingSuccess is undefined if the allocation fails. |
||||
*/ |
||||
virtual void* AllocateLocked(size_t len, bool *lockingSuccess) = 0; |
||||
|
||||
/** Unlock and free memory pages.
|
||||
* Clear the memory before unlocking. |
||||
*/ |
||||
virtual void FreeLocked(void* addr, size_t len) = 0; |
||||
|
||||
/** Get the total limit on the amount of memory that may be locked by this
|
||||
* process, in bytes. Return size_t max if there is no limit or the limit |
||||
* is unknown. Return 0 if no memory can be locked at all. |
||||
*/ |
||||
virtual size_t GetLimit() = 0; |
||||
}; |
||||
|
||||
/* An arena manages a contiguous region of memory by dividing it into
|
||||
* chunks. |
||||
*/ |
||||
class Arena |
||||
{ |
||||
public: |
||||
Arena(void *base, size_t size, size_t alignment); |
||||
virtual ~Arena(); |
||||
|
||||
/** A chunk of memory.
|
||||
*/ |
||||
struct Chunk |
||||
{ |
||||
/** Most significant bit of size_t. This is used to mark
|
||||
* in-usedness of chunk. |
||||
*/ |
||||
const static size_t SIZE_MSB = 1LLU << ((sizeof(size_t)*8)-1); |
||||
/** Maximum size of a chunk */ |
||||
const static size_t MAX_SIZE = SIZE_MSB - 1; |
||||
|
||||
Chunk(size_t size_in, bool used_in): |
||||
size(size_in | (used_in ? SIZE_MSB : 0)) {} |
||||
|
||||
bool isInUse() const { return size & SIZE_MSB; } |
||||
void setInUse(bool used_in) { size = (size & ~SIZE_MSB) | (used_in ? SIZE_MSB : 0); } |
||||
size_t getSize() const { return size & ~SIZE_MSB; } |
||||
void setSize(size_t size_in) { size = (size & SIZE_MSB) | size_in; } |
||||
private: |
||||
size_t size; |
||||
}; |
||||
/** Memory statistics. */ |
||||
struct Stats |
||||
{ |
||||
size_t used; |
||||
size_t free; |
||||
size_t total; |
||||
size_t chunks_used; |
||||
size_t chunks_free; |
||||
}; |
||||
|
||||
/** Allocate size bytes from this arena.
|
||||
* Returns pointer on success, or 0 if memory is full or |
||||
* the application tried to allocate 0 bytes. |
||||
*/ |
||||
void* alloc(size_t size); |
||||
|
||||
/** Free a previously allocated chunk of memory.
|
||||
* Freeing the zero pointer has no effect. |
||||
* Raises std::runtime_error in case of error. |
||||
*/ |
||||
void free(void *ptr); |
||||
|
||||
/** Get arena usage statistics */ |
||||
Stats stats() const; |
||||
|
||||
#ifdef ARENA_DEBUG |
||||
void walk() const; |
||||
#endif |
||||
|
||||
/** Return whether a pointer points inside this arena.
|
||||
* This returns base <= ptr < (base+size) so only use it for (inclusive) |
||||
* chunk starting addresses. |
||||
*/ |
||||
bool addressInArena(void *ptr) const { return ptr >= base && ptr < end; } |
||||
private: |
||||
Arena(const Arena& other) = delete; // non construction-copyable
|
||||
Arena& operator=(const Arena&) = delete; // non copyable
|
||||
|
||||
/** Map of chunk address to chunk information. This class makes use of the
|
||||
* sorted order to merge previous and next chunks during deallocation. |
||||
*/ |
||||
std::map<char*, Chunk> chunks; |
||||
/** Base address of arena */ |
||||
char* base; |
||||
/** End address of arena */ |
||||
char* end; |
||||
/** Minimum chunk alignment */ |
||||
size_t alignment; |
||||
}; |
||||
|
||||
/** Pool for locked memory chunks.
|
||||
* |
||||
* To avoid sensitive key data from being swapped to disk, the memory in this pool |
||||
* is locked/pinned. |
||||
* |
||||
* An arena manages a contiguous region of memory. The pool starts out with one arena |
||||
* but can grow to multiple arenas if the need arises. |
||||
* |
||||
* Unlike a normal C heap, the administrative structures are seperate from the managed |
||||
* memory. This has been done as the sizes and bases of objects are not in themselves sensitive |
||||
* information, as to conserve precious locked memory. In some operating systems |
||||
* the amount of memory that can be locked is small. |
||||
*/ |
||||
class LockedPool |
||||
{ |
||||
public: |
||||
/** Size of one arena of locked memory. This is a compromise.
|
||||
* Do not set this too low, as managing many arenas will increase |
||||
* allocation and deallocation overhead. Setting it too high allocates |
||||
* more locked memory from the OS than strictly necessary. |
||||
*/ |
||||
static const size_t ARENA_SIZE = 256*1024; |
||||
/** Chunk alignment. Another compromise. Setting this too high will waste
|
||||
* memory, setting it too low will facilitate fragmentation. |
||||
*/ |
||||
static const size_t ARENA_ALIGN = 16; |
||||
|
||||
/** Callback when allocation succeeds but locking fails.
|
||||
*/ |
||||
typedef bool (*LockingFailed_Callback)(); |
||||
|
||||
/** Memory statistics. */ |
||||
struct Stats |
||||
{ |
||||
size_t used; |
||||
size_t free; |
||||
size_t total; |
||||
size_t locked; |
||||
size_t chunks_used; |
||||
size_t chunks_free; |
||||
}; |
||||
|
||||
/** Create a new LockedPool. This takes ownership of the MemoryPageLocker,
|
||||
* you can only instantiate this with LockedPool(std::move(...)). |
||||
* |
||||
* The second argument is an optional callback when locking a newly allocated arena failed. |
||||
* If this callback is provided and returns false, the allocation fails (hard fail), if |
||||
* it returns true the allocation proceeds, but it could warn. |
||||
*/ |
||||
LockedPool(std::unique_ptr<LockedPageAllocator> allocator, LockingFailed_Callback lf_cb_in = 0); |
||||
~LockedPool(); |
||||
|
||||
/** Allocate size bytes from this arena.
|
||||
* Returns pointer on success, or 0 if memory is full or |
||||
* the application tried to allocate 0 bytes. |
||||
*/ |
||||
void* alloc(size_t size); |
||||
|
||||
/** Free a previously allocated chunk of memory.
|
||||
* Freeing the zero pointer has no effect. |
||||
* Raises std::runtime_error in case of error. |
||||
*/ |
||||
void free(void *ptr); |
||||
|
||||
/** Get pool usage statistics */ |
||||
Stats stats() const; |
||||
private: |
||||
LockedPool(const LockedPool& other) = delete; // non construction-copyable
|
||||
LockedPool& operator=(const LockedPool&) = delete; // non copyable
|
||||
|
||||
std::unique_ptr<LockedPageAllocator> allocator; |
||||
|
||||
/** Create an arena from locked pages */ |
||||
class LockedPageArena: public Arena |
||||
{ |
||||
public: |
||||
LockedPageArena(LockedPageAllocator *alloc_in, void *base_in, size_t size, size_t align); |
||||
~LockedPageArena(); |
||||
private: |
||||
void *base; |
||||
size_t size; |
||||
LockedPageAllocator *allocator; |
||||
}; |
||||
|
||||
bool new_arena(size_t size, size_t align); |
||||
|
||||
std::list<LockedPageArena> arenas; |
||||
LockingFailed_Callback lf_cb; |
||||
size_t cumulative_bytes_locked; |
||||
/** Mutex protects access to this pool's data structures, including arenas.
|
||||
*/ |
||||
mutable std::mutex mutex; |
||||
}; |
||||
|
||||
/**
|
||||
* Singleton class to keep track of locked (ie, non-swappable) memory, for use in |
||||
* std::allocator templates. |
||||
* |
||||
* Some implementations of the STL allocate memory in some constructors (i.e., see |
||||
* MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.) |
||||
* Due to the unpredictable order of static initializers, we have to make sure the |
||||
* LockedPoolManager instance exists before any other STL-based objects that use |
||||
* secure_allocator are created. So instead of having LockedPoolManager also be |
||||
* static-initialized, it is created on demand. |
||||
*/ |
||||
class LockedPoolManager : public LockedPool |
||||
{ |
||||
public: |
||||
/** Return the current instance, or create it once */ |
||||
static LockedPoolManager& Instance() |
||||
{ |
||||
std::call_once(LockedPoolManager::init_flag, LockedPoolManager::CreateInstance); |
||||
return *LockedPoolManager::_instance; |
||||
} |
||||
|
||||
private: |
||||
LockedPoolManager(std::unique_ptr<LockedPageAllocator> allocator); |
||||
|
||||
/** Create a new LockedPoolManager specialized to the OS */ |
||||
static void CreateInstance(); |
||||
/** Called when locking fails, warn the user here */ |
||||
static bool LockingFailed(); |
||||
|
||||
static LockedPoolManager* _instance; |
||||
static std::once_flag init_flag; |
||||
}; |
||||
|
||||
#endif // BITCOIN_SUPPORT_LOCKEDPOOL_H
|
@ -1,70 +0,0 @@
@@ -1,70 +0,0 @@
|
||||
// Copyright (c) 2009-2015 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#include "support/pagelocker.h" |
||||
|
||||
#if defined(HAVE_CONFIG_H) |
||||
#include "config/bitcoin-config.h" |
||||
#endif |
||||
|
||||
#ifdef WIN32 |
||||
#ifdef _WIN32_WINNT |
||||
#undef _WIN32_WINNT |
||||
#endif |
||||
#define _WIN32_WINNT 0x0501 |
||||
#define WIN32_LEAN_AND_MEAN 1 |
||||
#ifndef NOMINMAX |
||||
#define NOMINMAX |
||||
#endif |
||||
#include <windows.h> |
||||
// This is used to attempt to keep keying material out of swap
|
||||
// Note that VirtualLock does not provide this as a guarantee on Windows,
|
||||
// but, in practice, memory that has been VirtualLock'd almost never gets written to
|
||||
// the pagefile except in rare circumstances where memory is extremely low.
|
||||
#else |
||||
#include <sys/mman.h> |
||||
#include <limits.h> // for PAGESIZE |
||||
#include <unistd.h> // for sysconf |
||||
#endif |
||||
|
||||
LockedPageManager* LockedPageManager::_instance = NULL; |
||||
boost::once_flag LockedPageManager::init_flag = BOOST_ONCE_INIT; |
||||
|
||||
/** Determine system page size in bytes */ |
||||
static inline size_t GetSystemPageSize() |
||||
{ |
||||
size_t page_size; |
||||
#if defined(WIN32) |
||||
SYSTEM_INFO sSysInfo; |
||||
GetSystemInfo(&sSysInfo); |
||||
page_size = sSysInfo.dwPageSize; |
||||
#elif defined(PAGESIZE) // defined in limits.h
|
||||
page_size = PAGESIZE; |
||||
#else // assume some POSIX OS
|
||||
page_size = sysconf(_SC_PAGESIZE); |
||||
#endif |
||||
return page_size; |
||||
} |
||||
|
||||
bool MemoryPageLocker::Lock(const void* addr, size_t len) |
||||
{ |
||||
#ifdef WIN32 |
||||
return VirtualLock(const_cast<void*>(addr), len) != 0; |
||||
#else |
||||
return mlock(addr, len) == 0; |
||||
#endif |
||||
} |
||||
|
||||
bool MemoryPageLocker::Unlock(const void* addr, size_t len) |
||||
{ |
||||
#ifdef WIN32 |
||||
return VirtualUnlock(const_cast<void*>(addr), len) != 0; |
||||
#else |
||||
return munlock(addr, len) == 0; |
||||
#endif |
||||
} |
||||
|
||||
LockedPageManager::LockedPageManager() : LockedPageManagerBase<MemoryPageLocker>(GetSystemPageSize()) |
||||
{ |
||||
} |
@ -1,177 +0,0 @@
@@ -1,177 +0,0 @@
|
||||
// Copyright (c) 2009-2010 Satoshi Nakamoto
|
||||
// Copyright (c) 2009-2015 The Bitcoin Core developers
|
||||
// Distributed under the MIT software license, see the accompanying
|
||||
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
|
||||
|
||||
#ifndef BITCOIN_SUPPORT_PAGELOCKER_H |
||||
#define BITCOIN_SUPPORT_PAGELOCKER_H |
||||
|
||||
#include "support/cleanse.h" |
||||
|
||||
#include <map> |
||||
|
||||
#include <boost/thread/mutex.hpp> |
||||
#include <boost/thread/once.hpp> |
||||
|
||||
/**
|
||||
* Thread-safe class to keep track of locked (ie, non-swappable) memory pages. |
||||
* |
||||
* Memory locks do not stack, that is, pages which have been locked several times by calls to mlock() |
||||
* will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when |
||||
* those functions are used naively. This class simulates stacking memory locks by keeping a counter per page. |
||||
* |
||||
* @note By using a map from each page base address to lock count, this class is optimized for |
||||
* small objects that span up to a few pages, mostly smaller than a page. To support large allocations, |
||||
* something like an interval tree would be the preferred data structure. |
||||
*/ |
||||
template <class Locker> |
||||
class LockedPageManagerBase |
||||
{ |
||||
public: |
||||
LockedPageManagerBase(size_t _page_size) : page_size(_page_size) |
||||
{ |
||||
// Determine bitmask for extracting page from address
|
||||
assert(!(_page_size & (_page_size - 1))); // size must be power of two
|
||||
page_mask = ~(_page_size - 1); |
||||
} |
||||
|
||||
~LockedPageManagerBase() |
||||
{ |
||||
} |
||||
|
||||
|
||||
// For all pages in affected range, increase lock count
|
||||
void LockRange(void* p, size_t size) |
||||
{ |
||||
boost::mutex::scoped_lock lock(mutex); |
||||
if (!size) |
||||
return; |
||||
const size_t base_addr = reinterpret_cast<size_t>(p); |
||||
const size_t start_page = base_addr & page_mask; |
||||
const size_t end_page = (base_addr + size - 1) & page_mask; |
||||
for (size_t page = start_page; page <= end_page; page += page_size) { |
||||
Histogram::iterator it = histogram.find(page); |
||||
if (it == histogram.end()) // Newly locked page
|
||||
{ |
||||
locker.Lock(reinterpret_cast<void*>(page), page_size); |
||||
histogram.insert(std::make_pair(page, 1)); |
||||
} else // Page was already locked; increase counter
|
||||
{ |
||||
it->second += 1; |
||||
} |
||||
} |
||||
} |
||||
|
||||
// For all pages in affected range, decrease lock count
|
||||
void UnlockRange(void* p, size_t size) |
||||
{ |
||||
boost::mutex::scoped_lock lock(mutex); |
||||
if (!size) |
||||
return; |
||||
const size_t base_addr = reinterpret_cast<size_t>(p); |
||||
const size_t start_page = base_addr & page_mask; |
||||
const size_t end_page = (base_addr + size - 1) & page_mask; |
||||
for (size_t page = start_page; page <= end_page; page += page_size) { |
||||
Histogram::iterator it = histogram.find(page); |
||||
assert(it != histogram.end()); // Cannot unlock an area that was not locked
|
||||
// Decrease counter for page, when it is zero, the page will be unlocked
|
||||
it->second -= 1; |
||||
if (it->second == 0) // Nothing on the page anymore that keeps it locked
|
||||
{ |
||||
// Unlock page and remove the count from histogram
|
||||
locker.Unlock(reinterpret_cast<void*>(page), page_size); |
||||
histogram.erase(it); |
||||
} |
||||
} |
||||
} |
||||
|
||||
// Get number of locked pages for diagnostics
|
||||
int GetLockedPageCount() |
||||
{ |
||||
boost::mutex::scoped_lock lock(mutex); |
||||
return histogram.size(); |
||||
} |
||||
|
||||
private: |
||||
Locker locker; |
||||
boost::mutex mutex; |
||||
size_t page_size, page_mask; |
||||
// map of page base address to lock count
|
||||
typedef std::map<size_t, int> Histogram; |
||||
Histogram histogram; |
||||
}; |
||||
|
||||
|
||||
/**
|
||||
* OS-dependent memory page locking/unlocking. |
||||
* Defined as policy class to make stubbing for test possible. |
||||
*/ |
||||
class MemoryPageLocker |
||||
{ |
||||
public: |
||||
/** Lock memory pages.
|
||||
* addr and len must be a multiple of the system page size |
||||
*/ |
||||
bool Lock(const void* addr, size_t len); |
||||
/** Unlock memory pages.
|
||||
* addr and len must be a multiple of the system page size |
||||
*/ |
||||
bool Unlock(const void* addr, size_t len); |
||||
}; |
||||
|
||||
/**
|
||||
* Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in |
||||
* std::allocator templates. |
||||
* |
||||
* Some implementations of the STL allocate memory in some constructors (i.e., see |
||||
* MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.) |
||||
* Due to the unpredictable order of static initializers, we have to make sure the |
||||
* LockedPageManager instance exists before any other STL-based objects that use |
||||
* secure_allocator are created. So instead of having LockedPageManager also be |
||||
* static-initialized, it is created on demand. |
||||
*/ |
||||
class LockedPageManager : public LockedPageManagerBase<MemoryPageLocker> |
||||
{ |
||||
public: |
||||
static LockedPageManager& Instance() |
||||
{ |
||||
boost::call_once(LockedPageManager::CreateInstance, LockedPageManager::init_flag); |
||||
return *LockedPageManager::_instance; |
||||
} |
||||
|
||||
private: |
||||
LockedPageManager(); |
||||
|
||||
static void CreateInstance() |
||||
{ |
||||
// Using a local static instance guarantees that the object is initialized
|
||||
// when it's first needed and also deinitialized after all objects that use
|
||||
// it are done with it. I can think of one unlikely scenario where we may
|
||||
// have a static deinitialization order/problem, but the check in
|
||||
// LockedPageManagerBase's destructor helps us detect if that ever happens.
|
||||
static LockedPageManager instance; |
||||
LockedPageManager::_instance = &instance; |
||||
} |
||||
|
||||
static LockedPageManager* _instance; |
||||
static boost::once_flag init_flag; |
||||
}; |
||||
|
||||
//
|
||||
// Functions for directly locking/unlocking memory objects.
|
||||
// Intended for non-dynamically allocated structures.
|
||||
//
|
||||
template <typename T> |
||||
void LockObject(const T& t) |
||||
{ |
||||
LockedPageManager::Instance().LockRange((void*)(&t), sizeof(T)); |
||||
} |
||||
|
||||
template <typename T> |
||||
void UnlockObject(const T& t) |
||||
{ |
||||
memory_cleanse((void*)(&t), sizeof(T)); |
||||
LockedPageManager::Instance().UnlockRange((void*)(&t), sizeof(T)); |
||||
} |
||||
|
||||
#endif // BITCOIN_SUPPORT_PAGELOCKER_H
|
Loading…
Reference in new issue