1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2012 The Bitcoin developers
3 // Distributed under the MIT/X11 software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 #ifndef BITCOIN_ALLOCATORS_H
6 #define BITCOIN_ALLOCATORS_H
8 #include <openssl/crypto.h> // for OPENSSL_cleanse()
18 #if (_WIN32_WINNT != _WIN32_WINNT_WIN7)
19 #define _WIN32_WINNT 0x601
21 #define WIN32_LEAN_AND_MEAN 1
26 // This is used to attempt to keep keying material out of swap
27 // Note that VirtualLock does not provide this as a guarantee on Windows,
28 // but, in practice, memory that has been VirtualLock'd almost never gets written to
29 // the pagefile except in rare circumstances where memory is extremely low.
32 #include <climits> // for PAGESIZE
33 #include <unistd.h> // for sysconf
37 * Thread-safe class to keep track of locked (ie, non-swappable) memory pages.
39 * Memory locks do not stack, that is, pages which have been locked several times by calls to mlock()
40 * will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when
41 * those functions are used naively. This class simulates stacking memory locks by keeping a counter per page.
43 * @note By using a map from each page base address to lock count, this class is optimized for
44 * small objects that span up to a few pages, mostly smaller than a page. To support large allocations,
45 * something like an interval tree would be the preferred data structure.
47 template <class Locker> class LockedPageManagerBase
50 LockedPageManagerBase(size_t page_size):
53 // Determine bitmask for extracting page from address
54 assert(!(page_size & (page_size-1))); // size must be power of two
55 page_mask = ~(page_size - 1);
58 // For all pages in affected range, increase lock count
59 void LockRange(void *p, size_t size)
61 std::scoped_lock lock(mutex);
63 const size_t base_addr = reinterpret_cast<size_t>(p);
64 const size_t start_page = base_addr & page_mask;
65 const size_t end_page = (base_addr + size - 1) & page_mask;
66 for(size_t page = start_page; page <= end_page; page += page_size)
68 Histogram::iterator it = histogram.find(page);
69 if(it == histogram.end()) // Newly locked page
71 locker.Lock(reinterpret_cast<void*>(page), page_size);
72 histogram.insert(std::make_pair(page, 1));
74 else // Page was already locked; increase counter
81 // For all pages in affected range, decrease lock count
82 void UnlockRange(void *p, size_t size)
84 std::scoped_lock lock(mutex);
86 const size_t base_addr = reinterpret_cast<size_t>(p);
87 const size_t start_page = base_addr & page_mask;
88 const size_t end_page = (base_addr + size - 1) & page_mask;
89 for(size_t page = start_page; page <= end_page; page += page_size)
91 Histogram::iterator it = histogram.find(page);
92 assert(it != histogram.end()); // Cannot unlock an area that was not locked
93 // Decrease counter for page, when it is zero, the page will be unlocked
95 if(it->second == 0) // Nothing on the page anymore that keeps it locked
97 // Unlock page and remove the count from histogram
98 locker.Unlock(reinterpret_cast<void*>(page), page_size);
104 // Get number of locked pages for diagnostics
105 int GetLockedPageCount()
107 std::scoped_lock lock(mutex);
108 return histogram.size();
114 size_t page_size, page_mask;
115 // map of page base address to lock count
116 typedef std::map<size_t,int> Histogram;
120 /** Determine system page size in bytes */
121 static inline size_t GetSystemPageSize()
125 SYSTEM_INFO sSysInfo;
126 GetSystemInfo(&sSysInfo);
127 page_size = sSysInfo.dwPageSize;
128 #elif defined(PAGESIZE) // defined in limits.h
129 page_size = PAGESIZE;
130 #else // assume some POSIX OS
131 page_size = sysconf(_SC_PAGESIZE);
137 * OS-dependent memory page locking/unlocking.
138 * Defined as policy class to make stubbing for test possible.
140 class MemoryPageLocker
143 /** Lock memory pages.
144 * addr and len must be a multiple of the system page size
146 bool Lock(const void *addr, size_t len)
149 return VirtualLock(const_cast<void*>(addr), len) != 0;
151 return mlock(addr, len) == 0;
154 /** Unlock memory pages.
155 * addr and len must be a multiple of the system page size
157 bool Unlock(const void *addr, size_t len)
160 return VirtualUnlock(const_cast<void*>(addr), len) != 0;
162 return munlock(addr, len) == 0;
168 * Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in
169 * std::allocator templates.
171 class LockedPageManager: public LockedPageManagerBase<MemoryPageLocker>
174 static LockedPageManager instance; // instantiated in util.cpp
177 LockedPageManagerBase<MemoryPageLocker>(GetSystemPageSize())
182 // Allocator that locks its contents from being paged
183 // out of memory and clears its contents before deletion.
186 struct secure_allocator : public std::allocator<T>
188 // MSVC8 default copy constructor is broken
189 typedef std::allocator<T> base;
190 typedef typename base::size_type size_type;
191 typedef typename base::difference_type difference_type;
192 typedef typename base::pointer pointer;
193 typedef typename base::const_pointer const_pointer;
194 typedef typename base::reference reference;
195 typedef typename base::const_reference const_reference;
196 typedef typename base::value_type value_type;
197 secure_allocator() throw() {}
198 secure_allocator(const secure_allocator& a) throw() : base(a) {}
199 template <typename U>
200 secure_allocator(const secure_allocator<U>& a) throw() : base(a) {}
201 ~secure_allocator() throw() {}
202 template<typename _Other> struct rebind
203 { typedef secure_allocator<_Other> other; };
205 T* allocate(std::size_t n, const void *hint = 0)
208 p = std::allocator<T>::allocate(n, hint);
210 LockedPageManager::instance.LockRange(p, sizeof(T) * n);
214 void deallocate(T* p, std::size_t n)
218 OPENSSL_cleanse(p, sizeof(T) * n);
219 LockedPageManager::instance.UnlockRange(p, sizeof(T) * n);
221 std::allocator<T>::deallocate(p, n);
227 // Allocator that clears its contents before deletion.
230 struct zero_after_free_allocator : public std::allocator<T>
232 // MSVC8 default copy constructor is broken
233 typedef std::allocator<T> base;
234 typedef typename base::size_type size_type;
235 typedef typename base::difference_type difference_type;
236 typedef typename base::pointer pointer;
237 typedef typename base::const_pointer const_pointer;
238 typedef typename base::reference reference;
239 typedef typename base::const_reference const_reference;
240 typedef typename base::value_type value_type;
241 zero_after_free_allocator() throw() {}
242 zero_after_free_allocator(const zero_after_free_allocator& a) throw() : base(a) {}
243 template <typename U>
244 zero_after_free_allocator(const zero_after_free_allocator<U>& a) throw() : base(a) {}
245 ~zero_after_free_allocator() throw() {}
246 template<typename _Other> struct rebind
247 { typedef zero_after_free_allocator<_Other> other; };
249 void deallocate(T* p, std::size_t n)
252 OPENSSL_cleanse(p, sizeof(T) * n);
253 std::allocator<T>::deallocate(p, n);
257 // This is exactly like std::string, but with a custom allocator.
258 typedef std::basic_string<char, std::char_traits<char>, secure_allocator<char> > SecureString;
260 // Byte-vector that clears its contents before deletion.
261 typedef std::vector<char, zero_after_free_allocator<char> > CSerializeData;