1 // Copyright (c) 2009-2010 Satoshi Nakamoto
2 // Copyright (c) 2009-2012 The Bitcoin developers
3 // Distributed under the MIT/X11 software license, see the accompanying
4 // file COPYING or http://www.opensource.org/licenses/mit-license.php.
5 #ifndef BITCOIN_ALLOCATORS_H
6 #define BITCOIN_ALLOCATORS_H
8 #include <openssl/crypto.h> // for OPENSSL_cleanse()
21 #define _WIN32_WINNT 0x0601
22 #define WIN32_LEAN_AND_MEAN 1
27 // This is used to attempt to keep keying material out of swap
28 // Note that VirtualLock does not provide this as a guarantee on Windows,
29 // but, in practice, memory that has been VirtualLock'd almost never gets written to
30 // the pagefile except in rare circumstances where memory is extremely low.
33 #include <climits> // for PAGESIZE
34 #include <unistd.h> // for sysconf
38 * Thread-safe class to keep track of locked (ie, non-swappable) memory pages.
40 * Memory locks do not stack, that is, pages which have been locked several times by calls to mlock()
41 * will be unlocked by a single call to munlock(). This can result in keying material ending up in swap when
42 * those functions are used naively. This class simulates stacking memory locks by keeping a counter per page.
44 * @note By using a map from each page base address to lock count, this class is optimized for
45 * small objects that span up to a few pages, mostly smaller than a page. To support large allocations,
46 * something like an interval tree would be the preferred data structure.
48 template <class Locker> class LockedPageManagerBase
51 LockedPageManagerBase(size_t page_size):
54 // Determine bitmask for extracting page from address
55 assert(!(page_size & (page_size-1))); // size must be power of two
56 page_mask = ~(page_size - 1);
59 // For all pages in affected range, increase lock count
60 void LockRange(void *p, size_t size)
62 std::scoped_lock lock(mutex);
64 const size_t base_addr = reinterpret_cast<size_t>(p);
65 const size_t start_page = base_addr & page_mask;
66 const size_t end_page = (base_addr + size - 1) & page_mask;
67 for(size_t page = start_page; page <= end_page; page += page_size)
69 Histogram::iterator it = histogram.find(page);
70 if(it == histogram.end()) // Newly locked page
72 locker.Lock(reinterpret_cast<void*>(page), page_size);
73 histogram.insert(std::make_pair(page, 1));
75 else // Page was already locked; increase counter
82 // For all pages in affected range, decrease lock count
83 void UnlockRange(void *p, size_t size)
85 std::scoped_lock lock(mutex);
87 const size_t base_addr = reinterpret_cast<size_t>(p);
88 const size_t start_page = base_addr & page_mask;
89 const size_t end_page = (base_addr + size - 1) & page_mask;
90 for(size_t page = start_page; page <= end_page; page += page_size)
92 Histogram::iterator it = histogram.find(page);
93 assert(it != histogram.end()); // Cannot unlock an area that was not locked
94 // Decrease counter for page, when it is zero, the page will be unlocked
96 if(it->second == 0) // Nothing on the page anymore that keeps it locked
98 // Unlock page and remove the count from histogram
99 locker.Unlock(reinterpret_cast<void*>(page), page_size);
105 // Get number of locked pages for diagnostics
106 int GetLockedPageCount()
108 std::scoped_lock lock(mutex);
109 return histogram.size();
115 size_t page_size, page_mask;
116 // map of page base address to lock count
117 typedef std::map<size_t,int> Histogram;
121 /** Determine system page size in bytes */
122 static inline size_t GetSystemPageSize()
126 SYSTEM_INFO sSysInfo;
127 GetSystemInfo(&sSysInfo);
128 page_size = sSysInfo.dwPageSize;
129 #elif defined(PAGESIZE) // defined in limits.h
130 page_size = PAGESIZE;
131 #else // assume some POSIX OS
132 page_size = sysconf(_SC_PAGESIZE);
138 * OS-dependent memory page locking/unlocking.
139 * Defined as policy class to make stubbing for test possible.
141 class MemoryPageLocker
144 /** Lock memory pages.
145 * addr and len must be a multiple of the system page size
147 bool Lock(const void *addr, size_t len)
150 return VirtualLock(const_cast<void*>(addr), len) != 0;
152 return mlock(addr, len) == 0;
155 /** Unlock memory pages.
156 * addr and len must be a multiple of the system page size
158 bool Unlock(const void *addr, size_t len)
161 return VirtualUnlock(const_cast<void*>(addr), len) != 0;
163 return munlock(addr, len) == 0;
169 * Singleton class to keep track of locked (ie, non-swappable) memory pages, for use in
170 * std::allocator templates.
172 class LockedPageManager: public LockedPageManagerBase<MemoryPageLocker>
175 static LockedPageManager instance; // instantiated in util.cpp
178 LockedPageManagerBase<MemoryPageLocker>(GetSystemPageSize())
183 // Allocator that locks its contents from being paged
184 // out of memory and clears its contents before deletion.
187 struct secure_allocator : public std::allocator<T>
189 // MSVC8 default copy constructor is broken
190 typedef std::allocator<T> base;
191 typedef typename base::size_type size_type;
192 typedef typename base::difference_type difference_type;
193 typedef typename base::pointer pointer;
194 typedef typename base::const_pointer const_pointer;
195 typedef typename base::reference reference;
196 typedef typename base::const_reference const_reference;
197 typedef typename base::value_type value_type;
198 secure_allocator() throw() {}
199 secure_allocator(const secure_allocator& a) throw() : base(a) {}
200 template <typename U>
201 secure_allocator(const secure_allocator<U>& a) throw() : base(a) {}
202 ~secure_allocator() throw() {}
203 template<typename _Other> struct rebind
204 { typedef secure_allocator<_Other> other; };
206 T* allocate(std::size_t n, const void *hint = 0)
209 p = std::allocator<T>::allocate(n, hint);
211 LockedPageManager::instance.LockRange(p, sizeof(T) * n);
215 void deallocate(T* p, std::size_t n)
219 OPENSSL_cleanse(p, sizeof(T) * n);
220 LockedPageManager::instance.UnlockRange(p, sizeof(T) * n);
222 std::allocator<T>::deallocate(p, n);
228 // Allocator that clears its contents before deletion.
231 struct zero_after_free_allocator : public std::allocator<T>
233 // MSVC8 default copy constructor is broken
234 typedef std::allocator<T> base;
235 typedef typename base::size_type size_type;
236 typedef typename base::difference_type difference_type;
237 typedef typename base::pointer pointer;
238 typedef typename base::const_pointer const_pointer;
239 typedef typename base::reference reference;
240 typedef typename base::const_reference const_reference;
241 typedef typename base::value_type value_type;
242 zero_after_free_allocator() throw() {}
243 zero_after_free_allocator(const zero_after_free_allocator& a) throw() : base(a) {}
244 template <typename U>
245 zero_after_free_allocator(const zero_after_free_allocator<U>& a) throw() : base(a) {}
246 ~zero_after_free_allocator() throw() {}
247 template<typename _Other> struct rebind
248 { typedef zero_after_free_allocator<_Other> other; };
250 void deallocate(T* p, std::size_t n)
253 OPENSSL_cleanse(p, sizeof(T) * n);
254 std::allocator<T>::deallocate(p, n);
258 // This is exactly like std::string, but with a custom allocator.
259 typedef std::basic_string<char, std::char_traits<char>, secure_allocator<char> > SecureString;
261 // Byte-vector that clears its contents before deletion.
262 typedef std::vector<char, zero_after_free_allocator<char> > CSerializeData;