Wire Sysio Wire Sysion 1.0.0
Loading...
Searching...
No Matches
allocator.hpp
Go to the documentation of this file.
1#pragma once
2
3#include <sysio/vm/constants.hpp>
4#include <sysio/vm/exceptions.hpp>
5
6#include <cassert>
7#include <cstddef>
8#include <cstdint>
9#include <cstring>
10#include <map>
11#include <set>
12#include <memory>
13#include <mutex>
14#include <utility>
15#include <vector>
16
17#include <sys/mman.h>
18#include <unistd.h>
19
20namespace sysio { namespace vm {
21 class bounded_allocator {
22 public:
23 bounded_allocator(size_t size) {
24 mem_size = size;
25 raw = std::unique_ptr<uint8_t[]>(new uint8_t[mem_size]);
26 }
27 template <typename T>
28 T* alloc(size_t size = 1) {
29 SYS_VM_ASSERT((sizeof(T) * size) + index <= mem_size, wasm_bad_alloc, "wasm failed to allocate native");
30 T* ret = (T*)(raw.get() + index);
31 index += sizeof(T) * size;
32 return ret;
33 }
34
35 template <typename T>
36 void reclaim(const T* ptr, size_t size=0) { /* noop for now */ }
37
38 void free() {
39 SYS_VM_ASSERT(index > 0, wasm_double_free, "double free");
40 index = 0;
41 }
42 void reset() { index = 0; }
43 size_t mem_size;
44 std::unique_ptr<uint8_t[]> raw;
45 size_t index = 0;
46 };
47
48 // Conditionally allocates a new stack leaving enough room
49 // for host function and signal handler execution. If
50 // the required stack size is small enough to fit in the
51 // regular program stack, does nothing and returns nullptr.
52 class stack_allocator {
53 public:
54 explicit stack_allocator(std::size_t min_size) {
55 if(min_size > 4*1024*1024) {
56 std::size_t pagesize = static_cast<std::size_t>(::sysconf(_SC_PAGESIZE));
57 _size = ((min_size + pagesize - 1) & ~(pagesize - 1)) + 4*1024*1024;
58 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
59#ifdef MAP_STACK
60 flags |= MAP_STACK;
61#endif
62 _ptr = ::mmap(nullptr, _size, PROT_READ | PROT_WRITE, flags, -1, 0);
63 }
64 }
66 if(_ptr) {
67 ::munmap(_ptr, _size);
68 }
69 }
70 void* top() const {
71 if(_ptr) {
72 return static_cast<char*>(_ptr) + _size;
73 } else {
74 return nullptr;
75 }
76 }
77 private:
78 void* _ptr = nullptr;
79 std::size_t _size;
80 };
81
82 class contiguous_allocator {
83 public:
84 template<std::size_t align_amt>
85 static constexpr size_t align_offset(size_t offset) { return (offset + align_amt - 1) & ~(align_amt - 1); }
86
87 static std::size_t align_to_page(std::size_t offset) {
88 std::size_t pagesize = static_cast<std::size_t>(::sysconf(_SC_PAGESIZE));
89 return (offset + pagesize - 1) & ~(pagesize - 1);
90 }
91
92 contiguous_allocator(size_t size) {
93 _size = align_to_page(size);
94 _base = (char*)mmap(NULL, _size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
95 SYS_VM_ASSERT(_base != MAP_FAILED, wasm_bad_alloc, "mmap failed.");
96 }
97 ~contiguous_allocator() { munmap(_base, align_to_page(_size)); }
98
99 template <typename T>
100 T* alloc(size_t size = 0) {
101 _offset = align_offset<alignof(T)>(_offset);
102 size_t aligned = (sizeof(T) * size) + _offset;
103 if (aligned > _size) {
104 size_t new_size = align_to_page(aligned);
105 char* new_base = (char*)mmap(NULL, new_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
106 SYS_VM_ASSERT(new_base != MAP_FAILED, wasm_bad_alloc, "mmap failed.");
107 memcpy(new_base, _base, _size);
108 munmap(_base, _size);
109 _size = new_size;
110 _base = new_base;
111 }
112 T* ptr = (T*)(_base + _offset);
113 _offset = aligned;
114 return ptr;
115 }
116 template <typename T>
117 void reclaim(const T* ptr, size_t size=0) { /* noop for now */ }
118 void free() { /* noop for now */ }
119
120 private:
121 size_t _offset = 0;
122 size_t _size = 0;
123 char* _base;
124 };
125
126 class jit_allocator {
127 static constexpr std::size_t segment_size = std::size_t{1024u} * 1024u * 1024u;
128 public:
129 // allocates page aligned memory with executable permission
130 void * alloc(std::size_t size) {
131 std::lock_guard l{_mutex};
132 size = round_to_page(size);
133 auto best = free_blocks_by_size.lower_bound(size);
134 if(best == free_blocks_by_size.end()) {
135 best = allocate_segment(size);
136 }
137 if (best->first > size) {
138 best = split_block(best, size);
139 }
140 transfer_node(free_blocks, allocated_blocks, best->second);
141 best = transfer_node(free_blocks_by_size, allocated_blocks_by_size, *best);
142 return best->second;
143 }
144 // ptr must be previously allocated by a call to alloc
145 void free(void* ptr) noexcept {
146 std::lock_guard l{_mutex};
147 auto pos = transfer_node(allocated_blocks, free_blocks, ptr);
148 transfer_node(allocated_blocks_by_size, free_blocks_by_size, {pos->second, pos->first});
149
150 // merge the freed block with adjacent free blocks
151 if(pos != free_blocks.begin()) {
152 auto prev = pos;
153 --prev;
154 pos = maybe_consolidate_blocks(prev, pos);
155 }
156 auto next = pos;
157 ++next;
158 if (next != free_blocks.end()) {
159 maybe_consolidate_blocks(pos, next);
160 }
161 }
163 static jit_allocator the_jit_allocator;
164 return the_jit_allocator;
165 }
166 private:
167 struct segment {
168 segment(void * base, std::size_t size) : base(base), size(size) {}
169 segment(segment&& other) : base(other.base), size(other.size) {
170 other.base = nullptr;
171 other.size = 0;
172 }
173 segment& operator=(const segment& other) = delete;
174 ~segment() {
175 if(base) {
176 ::munmap(base, size);
177 }
178 }
179 void * base;
180 std::size_t size;
181 };
182 using block = std::pair<std::size_t, void*>;
183 struct by_size {
184 using is_transparent = void;
185 bool operator()(const block& lhs, const block& rhs) const {
186 return lhs.first < rhs.first || (lhs.first == rhs.first && std::less<void*>{}(lhs.second, rhs.second));
187 }
188 bool operator()(const block& lhs, std::size_t rhs) const {
189 return lhs.first < rhs;
190 }
191 bool operator()(std::size_t lhs, const block& rhs) const {
192 return lhs < rhs.first;
193 }
194 };
195 std::vector<segment> _segments;
196 std::set<block, by_size> free_blocks_by_size;
197 std::set<block, by_size> allocated_blocks_by_size;
198 std::map<void*, std::size_t> free_blocks;
199 std::map<void*, std::size_t> allocated_blocks;
200 std::mutex _mutex;
201 using blocks_by_size_t = std::set<block, by_size>;
202 using blocks_t = std::map<void*, size_t>;
203
204 // moves an element from one associative container to another
205 // @pre key must be present in from, but not in to
206 template<typename C>
207 static typename C::iterator transfer_node(C& from, C& to, typename C::key_type key) noexcept {
208 auto node = from.extract(key);
209 assert(node);
210 auto [pos, inserted, _] = to.insert(std::move(node));
211 assert(inserted);
212 return pos;
213 }
214
215 blocks_by_size_t::iterator allocate_segment(std::size_t min_size) {
216 std::size_t size = std::max(min_size, segment_size);
217 void* base = mmap(nullptr, size, PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
218 segment s{base, size};
219 SYS_VM_ASSERT(base != MAP_FAILED, wasm_bad_alloc, "failed to allocate jit segment");
220 _segments.emplace_back(std::move(s));
221 bool success = false;
222 auto guard_1 = scope_guard{[&] { if(!success) { _segments.pop_back(); } }};
223 auto pos2 = free_blocks_by_size.insert({size, base}).first;
224 auto guard_2 = scope_guard{[&] { if(!success) { free_blocks_by_size.erase(pos2); }}};
225 free_blocks.insert({base, size});
226 success = true;
227 return pos2;
228 }
229
230 blocks_by_size_t::iterator split_block(blocks_by_size_t::iterator pos, std::size_t size) {
231 bool success = false;
232 auto new1 = free_blocks_by_size.insert({size, pos->second}).first;
233 auto guard1 = scope_guard{[&]{ if(!success) { free_blocks_by_size.erase(new1); } }};
234 auto new2 = free_blocks_by_size.insert({pos->first - size, static_cast<char*>(pos->second) + size}).first;
235 auto guard2 = scope_guard{[&]{ if(!success) { free_blocks_by_size.erase(new2); } }};
236 free_blocks.insert({new2->second, new2->first});
237 // the rest is nothrow
238 free_blocks_by_size.erase(pos);
239 free_blocks[new1->second] = new1->first;
240 success = true;
241 return new1;
242 }
243
244 blocks_t::iterator maybe_consolidate_blocks(blocks_t::iterator lhs, blocks_t::iterator rhs) noexcept {
245 if(static_cast<char*>(lhs->first) + lhs->second == rhs->first) {
246 // merge blocks in free_blocks_by_size
247 auto node = free_blocks_by_size.extract({lhs->second, lhs->first});
248 assert(node);
249 node.value().first += rhs->second;
250 free_blocks_by_size.insert(std::move(node));
251 free_blocks_by_size.erase({rhs->second, rhs->first});
252 // merge the blocks in free_blocks
253 lhs->second += rhs->second;
254 free_blocks.erase(rhs);
255 return lhs;
256 } else {
257 return rhs;
258 }
259 }
260
261 static std::size_t round_to_page(std::size_t offset) {
262 std::size_t pagesize = static_cast<std::size_t>(::sysconf(_SC_PAGESIZE));
263 return (offset + pagesize - 1) & ~(pagesize - 1);
264 }
265 };
266
267 class growable_allocator {
268 public:
269 static constexpr size_t max_memory_size = 1024 * 1024 * 1024; // 1GB
270 static constexpr size_t chunk_size = 128 * 1024; // 128KB
271 template<std::size_t align_amt>
272 static constexpr size_t align_offset(size_t offset) { return (offset + align_amt - 1) & ~(align_amt - 1); }
273
274 static std::size_t align_to_page(std::size_t offset) {
275 std::size_t pagesize = static_cast<std::size_t>(::sysconf(_SC_PAGESIZE));
276 assert(max_memory_size % page_size == 0);
277 return (offset + pagesize - 1) & ~(pagesize - 1);
278 }
279
280 // size in bytes
281 growable_allocator(size_t size) {
282 SYS_VM_ASSERT(size <= max_memory_size, wasm_bad_alloc, "Too large initial memory size");
283 _base = (char*)mmap(NULL, max_memory_size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
284 SYS_VM_ASSERT(_base != MAP_FAILED, wasm_bad_alloc, "mmap failed.");
285 if (size != 0) {
286 size_t chunks_to_alloc = (align_offset<chunk_size>(size) / chunk_size);
287 _size += (chunk_size * chunks_to_alloc);
288 mprotect((char*)_base, _size, PROT_READ | PROT_WRITE);
289 }
291 }
292
294 munmap(_base, _capacity);
295 if (is_jit) {
297 }
298 }
299
300 // TODO use Outcome library
301 template <typename T>
302 T* alloc(size_t size = 0) {
303 static_assert(max_memory_size % alignof(T) == 0, "alignment must divide max_memory_size.");
305 // Evaluating the inequality in this form cannot cause integer overflow.
306 // Once this assertion passes, the rest of the function is safe.
307 SYS_VM_ASSERT ((max_memory_size - _offset) / sizeof(T) >= size, wasm_bad_alloc, "Allocated too much memory");
308 size_t aligned = (sizeof(T) * size) + _offset;
309 if (aligned > _size) {
310 size_t chunks_to_alloc = align_offset<chunk_size>(aligned - _size) / chunk_size;
311 mprotect((char*)_base + _size, (chunk_size * chunks_to_alloc), PROT_READ | PROT_WRITE);
312 _size += (chunk_size * chunks_to_alloc);
313 }
314
315 T* ptr = (T*)(_base + _offset);
317 return ptr;
318 }
319
320 void * start_code() {
322 return _base + _offset;
323 }
324 template<bool IsJit>
325 void end_code(void * code_base) {
326 assert((char*)code_base >= _base);
327 assert((char*)code_base <= (_base+_offset));
329 _code_base = (char*)code_base;
330 _code_size = _offset - ((char*)code_base - _base);
331 if constexpr (IsJit) {
332 auto & jit_alloc = jit_allocator::instance();
333 void * executable_code = jit_alloc.alloc(_code_size);
334 int err = mprotect(executable_code, _code_size, PROT_READ | PROT_WRITE);
335 SYS_VM_ASSERT(err == 0, wasm_bad_alloc, "mprotect failed");
336 std::memcpy(executable_code, _code_base, _code_size);
337 is_jit = true;
338 _code_base = (char*)executable_code;
339 _offset = (char*)code_base - _base;
340 }
341 enable_code(IsJit);
342 }
343
344 // Sets protection on code pages to allow them to be executed.
345 void enable_code(bool is_jit) {
346 mprotect(_code_base, _code_size, is_jit?PROT_EXEC:(PROT_READ|PROT_WRITE));
347 }
348 // Make code pages unexecutable
350 mprotect(_code_base, _code_size, PROT_NONE);
351 }
352
353 const void* get_code_start() const { return _code_base; }
354
355 /* different semantics than free,
356 * the memory must be at the end of the most recently allocated block.
357 */
358 template <typename T>
359 void reclaim(const T* ptr, size_t size=0) {
360 SYS_VM_ASSERT( _offset / sizeof(T) >= size, wasm_bad_alloc, "reclaimed too much memory" );
361 SYS_VM_ASSERT( size == 0 || (char*)(ptr + size) == (_base + _offset), wasm_bad_alloc, "reclaiming memory must be strictly LIFO");
362 if ( size != 0 )
363 _offset = ((char*)ptr - _base);
364 }
365
366 /*
367 * Finalize the memory by unmapping any excess pages, this means that the allocator will no longer grow
368 */
369 void finalize() {
370 if(_capacity != _offset) {
371 std::size_t final_size = align_to_page(_offset);
372 SYS_VM_ASSERT(munmap(_base + final_size, _capacity - final_size) == 0, wasm_bad_alloc, "failed to finalize growable_allocator");
373 _capacity = _size = _offset = final_size;
374 }
375 }
376
377 void free() { SYS_VM_ASSERT(false, wasm_bad_alloc, "unimplemented"); }
378
379 void reset() { _offset = 0; }
380
381 size_t _offset = 0;
382 size_t _size = 0;
383 std::size_t _capacity = 0;
384 char* _base;
385 char* _code_base = nullptr;
386 size_t _code_size = 0;
387 bool is_jit = false;
388 };
389
390 template <typename T>
392 private:
393 T* raw = nullptr;
394 size_t max_size = 0;
395
396 public:
397 template <typename U>
398 void free() {
399 munmap(raw, max_memory);
400 }
401 fixed_stack_allocator(size_t max_size) : max_size(max_size) {
402 raw = (T*)mmap(NULL, max_memory, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
403 SYS_VM_ASSERT( raw != MAP_FAILED, wasm_bad_alloc, "mmap failed to alloca pages" );
404 mprotect(raw, max_size * sizeof(T), PROT_READ | PROT_WRITE);
405 }
406 inline T* get_base_ptr() const { return raw; }
407 };
408
409 class wasm_allocator {
410 private:
411 char* raw = nullptr;
412 int32_t page = 0;
413
414 public:
415 template <typename T>
416 void alloc(size_t size = 1 /*in pages*/) {
417 if (size == 0) return;
418 SYS_VM_ASSERT(page >= 0, wasm_bad_alloc, "require memory to allocate");
419 SYS_VM_ASSERT(size + page <= max_pages, wasm_bad_alloc, "exceeded max number of pages");
420 int err = mprotect(raw + (page_size * page), (page_size * size), PROT_READ | PROT_WRITE);
421 SYS_VM_ASSERT(err == 0, wasm_bad_alloc, "mprotect failed");
422 T* ptr = (T*)(raw + (page_size * page));
423 memset(ptr, 0, page_size * size);
424 page += size;
425 }
426 template <typename T>
427 void free(std::size_t size) {
428 if (size == 0) return;
429 SYS_VM_ASSERT(page >= 0, wasm_bad_alloc, "require memory to deallocate");
430 SYS_VM_ASSERT(size <= static_cast<uint32_t>(page), wasm_bad_alloc, "freed too many pages");
431 page -= size;
432 int err = mprotect(raw + (page_size * page), (page_size * size), PROT_NONE);
433 SYS_VM_ASSERT(err == 0, wasm_bad_alloc, "mprotect failed");
434 }
435 void free() {
436 std::size_t syspagesize = static_cast<std::size_t>(::sysconf(_SC_PAGESIZE));
437 munmap(raw - syspagesize, max_memory + 2*syspagesize);
438 }
440 std::size_t syspagesize = static_cast<std::size_t>(::sysconf(_SC_PAGESIZE));
441 raw = (char*)mmap(NULL, max_memory + 2*syspagesize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
442 SYS_VM_ASSERT( raw != MAP_FAILED, wasm_bad_alloc, "mmap failed to alloca pages" );
443 int err = mprotect(raw, syspagesize, PROT_READ);
444 SYS_VM_ASSERT(err == 0, wasm_bad_alloc, "mprotect failed");
445 raw += syspagesize;
446 page = 0;
447 }
448 // Initializes the memory controlled by the allocator.
449 //
450 // \post get_current_page() == new_pages
451 // \post all allocated pages are zero-filled.
452 void reset(uint32_t new_pages) {
453 if (page >= 0) {
454 memset(raw, '\0', page_size * page); // zero the memory
455 } else {
456 std::size_t syspagesize = static_cast<std::size_t>(::sysconf(_SC_PAGESIZE));
457 int err = mprotect(raw - syspagesize, syspagesize, PROT_READ);
458 SYS_VM_ASSERT(err == 0, wasm_bad_alloc, "mprotect failed");
459 page = 0;
460 }
461 if(new_pages > static_cast<uint32_t>(page)) {
462 alloc<char>(new_pages - page);
463 } else if(new_pages < static_cast<uint32_t>(page)) {
464 free<char>(page - new_pages);
465 }
466 }
467
468 // Signal no memory defined
469 void reset() {
470 if (page >= 0) {
471 std::size_t syspagesize = static_cast<std::size_t>(::sysconf(_SC_PAGESIZE));
472 memset(raw, '\0', page_size * page); // zero the memory
473 int err = mprotect(raw - syspagesize, page_size * page + syspagesize, PROT_NONE);
474 SYS_VM_ASSERT(err == 0, wasm_bad_alloc, "mprotect failed");
475 }
476 page = -1;
477 }
478
479 template <typename T>
480 inline T* get_base_ptr() const {
481 return reinterpret_cast<T*>(raw);
482 }
483 template <typename T>
484 inline T* create_pointer(uint32_t offset) { return reinterpret_cast<T*>(raw + offset); }
485 inline int32_t get_current_page() const { return page; }
486 bool is_in_region(char* p) { return p >= raw && p < raw + max_memory; }
487 };
488}} // namespace sysio::vm
const mie::Vuint & p
Definition bn.cpp:27
T * alloc(size_t size=1)
Definition allocator.hpp:28
void reclaim(const T *ptr, size_t size=0)
Definition allocator.hpp:36
std::unique_ptr< uint8_t[]> raw
Definition allocator.hpp:44
static std::size_t align_to_page(std::size_t offset)
Definition allocator.hpp:87
void reclaim(const T *ptr, size_t size=0)
static constexpr size_t align_offset(size_t offset)
Definition allocator.hpp:85
fixed_stack_allocator(size_t max_size)
static constexpr size_t max_memory_size
const void * get_code_start() const
static constexpr size_t align_offset(size_t offset)
static constexpr size_t chunk_size
void enable_code(bool is_jit)
static std::size_t align_to_page(std::size_t offset)
void reclaim(const T *ptr, size_t size=0)
void end_code(void *code_base)
static jit_allocator & instance()
void free(void *ptr) noexcept
void * alloc(std::size_t size)
stack_allocator(std::size_t min_size)
Definition allocator.hpp:54
T * create_pointer(uint32_t offset)
void free(std::size_t size)
void alloc(size_t size=1)
void reset(uint32_t new_pages)
int32_t get_current_page() const
#define _(str)
Definition localize.hpp:7
#define T(meth, val, expected)
unsigned int uint32_t
Definition stdint.h:126
signed int int32_t
Definition stdint.h:123
unsigned char uint8_t
Definition stdint.h:124
#define SYS_VM_ASSERT(expr, exc_type, msg)
Definition exceptions.hpp:8
CK_RV ret
char * s
int l
memset(pInfo->slotDescription, ' ', 64)
pInfo flags
memcpy((char *) pInfo->slotDescription, s, l)