1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2012 The Bitcoin developers
3 // Distributed under the MIT/X11 software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 #ifndef BITCOIN_ALLOCATORS_H
6 #define BITCOIN_ALLOCATORS_H
8 #include <openssl/crypto.h> // for OPENSSL_cleanse()
18 * Thread-safe class to keep track of locked (ie, non-swappable) memory pages.
20 * Memory locks do not stack, that is, pages which have been locked several times by calls to mlock()
21 * will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when
22 * those functions are used naively. This class simulates stacking memory locks by keeping a counter per page.
24 * @note By using a map from each page base address to lock count, this class is optimized for
25 * small objects that span up to a few pages, mostly smaller than a page. To support large allocations,
26 * something like an interval tree would be the preferred data structure.
28 template <class Locker> class LockedPageManagerBase
31 LockedPageManagerBase(size_t page_size):
34 // Determine bitmask for extracting page from address
35 assert(!(page_size & (page_size-1))); // size must be power of two
36 page_mask = ~(page_size - 1);
39 ~LockedPageManagerBase()
41 assert(this->GetLockedPageCount() == 0);
44 // For all pages in affected range, increase lock count
45 void LockRange(void *p, size_t size)
47 std::scoped_lock lock(mutex);
49 const size_t base_addr = reinterpret_cast<size_t>(p);
50 const size_t start_page = base_addr & page_mask;
51 const size_t end_page = (base_addr + size - 1) & page_mask;
52 for(size_t page = start_page; page <= end_page; page += page_size)
54 Histogram::iterator it = histogram.find(page);
55 if(it == histogram.end()) // Newly locked page
57 locker.Lock(reinterpret_cast<void*>(page), page_size);
58 histogram.insert(std::make_pair(page, 1));
60 else // Page was already locked; increase counter
67 // For all pages in affected range, decrease lock count
68 void UnlockRange(void *p, size_t size)
70 std::scoped_lock lock(mutex);
72 const size_t base_addr = reinterpret_cast<size_t>(p);
73 const size_t start_page = base_addr & page_mask;
74 const size_t end_page = (base_addr + size - 1) & page_mask;
75 for(size_t page = start_page; page <= end_page; page += page_size)
77 Histogram::iterator it = histogram.find(page);
78 assert(it != histogram.end()); // Cannot unlock an area that was not locked
79 // Decrease counter for page, when it is zero, the page will be unlocked
81 if(it->second == 0) // Nothing on the page anymore that keeps it locked
83 // Unlock page and remove the count from histogram
84 locker.Unlock(reinterpret_cast<void*>(page), page_size);
90 // Get number of locked pages for diagnostics
91 int GetLockedPageCount()
93 std::scoped_lock lock(mutex);
94 return histogram.size();
100 size_t page_size, page_mask;
101 // map of page base address to lock count
102 typedef std::map<size_t,int> Histogram;
107 * OS-dependent memory page locking/unlocking.
108 * Defined as policy class to make stubbing for test possible.
110 class MemoryPageLocker
113 /** Lock memory pages.
114 * addr and len must be a multiple of the system page size
116 bool Lock(const void *addr, size_t len);
117 /** Unlock memory pages.
118 * addr and len must be a multiple of the system page size
120 bool Unlock(const void *addr, size_t len);
124 * Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in
125 * std::allocator templates.
127 * Some implementations of the STL allocate memory in some constructors (i.e., see
128 * MSVC's vector<T> implementation where it allocates 1 byte of memory in the allocator.)
129 * Due to the unpredictable order of static initializers, we have to make sure the
130 * LockedPageManager instance exists before any other STL-based objects that use
131 * secure_allocator are created. So instead of having LockedPageManager also be
132 * static-intialized, it is created on demand.
134 class LockedPageManager: public LockedPageManagerBase<MemoryPageLocker>
137 static LockedPageManager& Instance()
139 std::call_once(LockedPageManager::init_flag, LockedPageManager::CreateInstance);
140 return *LockedPageManager::_instance;
145 static void CreateInstance()
147 // Using a local static instance guarantees that the object is initialized
148 // when it's first needed and also deinitialized after all objects that use
149 // it are done with it. I can think of one unlikely scenario where we may
150 // have a static deinitialization order/problem, but the check in
151 // LockedPageManagerBase's destructor helps us detect if that ever happens.
152 static LockedPageManager instance;
153 LockedPageManager::_instance = &instance;
156 static LockedPageManager* _instance;
157 static std::once_flag init_flag;
161 // Allocator that locks its contents from being paged
162 // out of memory and clears its contents before deletion.
165 struct secure_allocator : public std::allocator<T>
167 // MSVC8 default copy constructor is broken
168 typedef std::allocator<T> base;
169 typedef typename base::size_type size_type;
170 typedef typename base::difference_type difference_type;
171 typedef typename base::pointer pointer;
172 typedef typename base::const_pointer const_pointer;
173 typedef typename base::reference reference;
174 typedef typename base::const_reference const_reference;
175 typedef typename base::value_type value_type;
176 secure_allocator() throw() {}
177 secure_allocator(const secure_allocator& a) throw() : base(a) {}
178 template <typename U>
179 secure_allocator(const secure_allocator<U>& a) throw() : base(a) {}
180 ~secure_allocator() throw() {}
181 template<typename _Other> struct rebind
182 { typedef secure_allocator<_Other> other; };
184 T* allocate(std::size_t n, const void *hint = nullptr)
187 p = std::allocator<T>::allocate(n, hint);
189 LockedPageManager::Instance().LockRange(p, sizeof(T) * n);
193 void deallocate(T* p, std::size_t n)
197 OPENSSL_cleanse(p, sizeof(T) * n);
198 LockedPageManager::Instance().UnlockRange(p, sizeof(T) * n);
200 std::allocator<T>::deallocate(p, n);
206 // Allocator that clears its contents before deletion.
209 struct zero_after_free_allocator : public std::allocator<T>
211 // MSVC8 default copy constructor is broken
212 typedef std::allocator<T> base;
213 typedef typename base::size_type size_type;
214 typedef typename base::difference_type difference_type;
215 typedef typename base::pointer pointer;
216 typedef typename base::const_pointer const_pointer;
217 typedef typename base::reference reference;
218 typedef typename base::const_reference const_reference;
219 typedef typename base::value_type value_type;
220 zero_after_free_allocator() throw() {}
221 zero_after_free_allocator(const zero_after_free_allocator& a) throw() : base(a) {}
222 template <typename U>
223 zero_after_free_allocator(const zero_after_free_allocator<U>& a) throw() : base(a) {}
224 ~zero_after_free_allocator() throw() {}
225 template<typename _Other> struct rebind
226 { typedef zero_after_free_allocator<_Other> other; };
228 void deallocate(T* p, std::size_t n)
231 OPENSSL_cleanse(p, sizeof(T) * n);
232 std::allocator<T>::deallocate(p, n);
236 // This is exactly like std::string, but with a custom allocator.
237 typedef std::basic_string<char, std::char_traits<char>, secure_allocator<char> > SecureString;
239 // Byte-vector that clears its contents before deletion.
240 typedef std::vector<char, zero_after_free_allocator<char> > CSerializeData;