3#include <sysio/vm/constants.hpp>
4#include <sysio/vm/exceptions.hpp>
20namespace sysio {
namespace vm {
21 class bounded_allocator {
31 index +=
sizeof(
T) * size;
44 std::unique_ptr<uint8_t[]>
raw;
55 if(min_size > 4*1024*1024) {
56 std::size_t pagesize =
static_cast<std::size_t
>(::sysconf(_SC_PAGESIZE));
57 _size = ((min_size + pagesize - 1) & ~(pagesize - 1)) + 4*1024*1024;
58 int flags = MAP_PRIVATE | MAP_ANONYMOUS;
62 _ptr = ::mmap(
nullptr, _size, PROT_READ | PROT_WRITE,
flags, -1, 0);
67 ::munmap(_ptr, _size);
72 return static_cast<char*
>(_ptr) + _size;
82 class contiguous_allocator {
84 template<std::
size_t align_amt>
85 static constexpr size_t align_offset(
size_t offset) {
return (offset + align_amt - 1) & ~(align_amt - 1); }
88 std::size_t pagesize =
static_cast<std::size_t
>(::sysconf(_SC_PAGESIZE));
89 return (offset + pagesize - 1) & ~(pagesize - 1);
94 _base = (
char*)mmap(NULL, _size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
95 SYS_VM_ASSERT(_base != MAP_FAILED, wasm_bad_alloc,
"mmap failed.");
102 size_t aligned = (
sizeof(
T) * size) + _offset;
105 char* new_base = (
char*)mmap(NULL, new_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
106 SYS_VM_ASSERT(new_base != MAP_FAILED, wasm_bad_alloc,
"mmap failed.");
107 memcpy(new_base, _base, _size);
108 munmap(_base, _size);
112 T* ptr = (
T*)(_base + _offset);
116 template <
typename T>
126 class jit_allocator {
127 static constexpr std::size_t segment_size = std::size_t{1024u} * 1024u * 1024u;
131 std::lock_guard
l{_mutex};
132 size = round_to_page(size);
133 auto best = free_blocks_by_size.lower_bound(size);
134 if(best == free_blocks_by_size.end()) {
135 best = allocate_segment(size);
137 if (best->first > size) {
138 best = split_block(best, size);
140 transfer_node(free_blocks, allocated_blocks, best->second);
141 best = transfer_node(free_blocks_by_size, allocated_blocks_by_size, *best);
145 void free(
void* ptr)
noexcept {
146 std::lock_guard
l{_mutex};
147 auto pos = transfer_node(allocated_blocks, free_blocks, ptr);
148 transfer_node(allocated_blocks_by_size, free_blocks_by_size, {pos->second, pos->first});
151 if(pos != free_blocks.begin()) {
154 pos = maybe_consolidate_blocks(prev, pos);
158 if (next != free_blocks.end()) {
159 maybe_consolidate_blocks(pos, next);
164 return the_jit_allocator;
168 segment(
void * base, std::size_t size) : base(base), size(size) {}
169 segment(segment&& other) : base(other.base), size(other.size) {
170 other.base =
nullptr;
173 segment& operator=(
const segment& other) =
delete;
176 ::munmap(base, size);
182 using block = std::pair<std::size_t, void*>;
184 using is_transparent = void;
185 bool operator()(
const block& lhs,
const block& rhs)
const {
186 return lhs.first < rhs.first || (lhs.first == rhs.first && std::less<void*>{}(lhs.second, rhs.second));
188 bool operator()(
const block& lhs, std::size_t rhs)
const {
189 return lhs.first < rhs;
191 bool operator()(std::size_t lhs,
const block& rhs)
const {
192 return lhs < rhs.first;
195 std::vector<segment> _segments;
196 std::set<block, by_size> free_blocks_by_size;
197 std::set<block, by_size> allocated_blocks_by_size;
198 std::map<void*, std::size_t> free_blocks;
199 std::map<void*, std::size_t> allocated_blocks;
201 using blocks_by_size_t = std::set<block, by_size>;
202 using blocks_t = std::map<void*, size_t>;
207 static typename C::iterator transfer_node(C& from, C& to,
typename C::key_type key)
noexcept {
208 auto node = from.extract(key);
210 auto [pos, inserted,
_] = to.insert(std::move(node));
215 blocks_by_size_t::iterator allocate_segment(std::size_t min_size) {
216 std::size_t size = std::max(min_size, segment_size);
217 void* base = mmap(
nullptr, size, PROT_EXEC, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
218 segment
s{base, size};
219 SYS_VM_ASSERT(base != MAP_FAILED, wasm_bad_alloc,
"failed to allocate jit segment");
220 _segments.emplace_back(std::move(
s));
222 auto guard_1 = scope_guard{[&] {
if(!success) { _segments.pop_back(); } }};
223 auto pos2 = free_blocks_by_size.insert({size, base}).first;
224 auto guard_2 = scope_guard{[&] {
if(!success) { free_blocks_by_size.erase(pos2); }}};
225 free_blocks.insert({base, size});
230 blocks_by_size_t::iterator split_block(blocks_by_size_t::iterator pos, std::size_t size) {
232 auto new1 = free_blocks_by_size.insert({size, pos->second}).first;
233 auto guard1 = scope_guard{[&]{
if(!success) { free_blocks_by_size.erase(new1); } }};
234 auto new2 = free_blocks_by_size.insert({pos->first - size,
static_cast<char*
>(pos->second) + size}).first;
235 auto guard2 = scope_guard{[&]{
if(!success) { free_blocks_by_size.erase(new2); } }};
236 free_blocks.insert({new2->second, new2->first});
238 free_blocks_by_size.erase(pos);
239 free_blocks[new1->second] = new1->first;
244 blocks_t::iterator maybe_consolidate_blocks(blocks_t::iterator lhs, blocks_t::iterator rhs)
noexcept {
245 if(
static_cast<char*
>(lhs->first) + lhs->second == rhs->first) {
247 auto node = free_blocks_by_size.extract({lhs->second, lhs->first});
249 node.value().first += rhs->second;
250 free_blocks_by_size.insert(std::move(node));
251 free_blocks_by_size.erase({rhs->second, rhs->first});
253 lhs->second += rhs->second;
254 free_blocks.erase(rhs);
261 static std::size_t round_to_page(std::size_t offset) {
262 std::size_t pagesize =
static_cast<std::size_t
>(::sysconf(_SC_PAGESIZE));
263 return (offset + pagesize - 1) & ~(pagesize - 1);
267 class growable_allocator {
271 template<std::
size_t align_amt>
272 static constexpr size_t align_offset(
size_t offset) {
return (offset + align_amt - 1) & ~(align_amt - 1); }
275 std::size_t pagesize =
static_cast<std::size_t
>(::sysconf(_SC_PAGESIZE));
277 return (offset + pagesize - 1) & ~(pagesize - 1);
288 mprotect((
char*)
_base,
_size, PROT_READ | PROT_WRITE);
301 template <
typename T>
303 static_assert(
max_memory_size %
alignof(
T) == 0,
"alignment must divide max_memory_size.");
326 assert((
char*)code_base >=
_base);
331 if constexpr (IsJit) {
333 void * executable_code = jit_alloc.alloc(
_code_size);
334 int err = mprotect(executable_code,
_code_size, PROT_READ | PROT_WRITE);
358 template <
typename T>
361 SYS_VM_ASSERT( size == 0 || (
char*)(ptr + size) == (
_base +
_offset), wasm_bad_alloc,
"reclaiming memory must be strictly LIFO");
390 template <
typename T>
397 template <
typename U>
402 raw = (
T*)mmap(NULL,
max_memory, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
403 SYS_VM_ASSERT( raw != MAP_FAILED, wasm_bad_alloc,
"mmap failed to alloca pages" );
404 mprotect(raw, max_size *
sizeof(
T), PROT_READ | PROT_WRITE);
409 class wasm_allocator {
415 template <
typename T>
417 if (size == 0)
return;
418 SYS_VM_ASSERT(page >= 0, wasm_bad_alloc,
"require memory to allocate");
420 int err = mprotect(raw + (
page_size * page), (
page_size * size), PROT_READ | PROT_WRITE);
426 template <
typename T>
428 if (size == 0)
return;
429 SYS_VM_ASSERT(page >= 0, wasm_bad_alloc,
"require memory to deallocate");
436 std::size_t syspagesize =
static_cast<std::size_t
>(::sysconf(_SC_PAGESIZE));
437 munmap(raw - syspagesize,
max_memory + 2*syspagesize);
440 std::size_t syspagesize =
static_cast<std::size_t
>(::sysconf(_SC_PAGESIZE));
441 raw = (
char*)mmap(NULL,
max_memory + 2*syspagesize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
442 SYS_VM_ASSERT( raw != MAP_FAILED, wasm_bad_alloc,
"mmap failed to alloca pages" );
443 int err = mprotect(raw, syspagesize, PROT_READ);
456 std::size_t syspagesize =
static_cast<std::size_t
>(::sysconf(_SC_PAGESIZE));
457 int err = mprotect(raw - syspagesize, syspagesize, PROT_READ);
461 if(new_pages >
static_cast<uint32_t>(page)) {
463 }
else if(new_pages <
static_cast<uint32_t>(page)) {
471 std::size_t syspagesize =
static_cast<std::size_t
>(::sysconf(_SC_PAGESIZE));
473 int err = mprotect(raw - syspagesize,
page_size * page + syspagesize, PROT_NONE);
479 template <
typename T>
481 return reinterpret_cast<T*
>(raw);
483 template <
typename T>
void reclaim(const T *ptr, size_t size=0)
bounded_allocator(size_t size)
std::unique_ptr< uint8_t[]> raw
static std::size_t align_to_page(std::size_t offset)
contiguous_allocator(size_t size)
void reclaim(const T *ptr, size_t size=0)
static constexpr size_t align_offset(size_t offset)
fixed_stack_allocator(size_t max_size)
growable_allocator(size_t size)
static constexpr size_t max_memory_size
const void * get_code_start() const
static constexpr size_t align_offset(size_t offset)
static constexpr size_t chunk_size
void enable_code(bool is_jit)
static std::size_t align_to_page(std::size_t offset)
void reclaim(const T *ptr, size_t size=0)
void end_code(void *code_base)
static jit_allocator & instance()
void free(void *ptr) noexcept
void * alloc(std::size_t size)
stack_allocator(std::size_t min_size)
T * create_pointer(uint32_t offset)
void free(std::size_t size)
void alloc(size_t size=1)
bool is_in_region(char *p)
void reset(uint32_t new_pages)
int32_t get_current_page() const
#define T(meth, val, expected)
#define SYS_VM_ASSERT(expr, exc_type, msg)
memset(pInfo->slotDescription, ' ', 64)
memcpy((char *) pInfo->slotDescription, s, l)