Wire Sysio Wire Sysion 1.0.0
Loading...
Searching...
No Matches
execution_context.hpp
Go to the documentation of this file.
1#pragma once
2
3#include <sysio/vm/allocator.hpp>
4#include <sysio/vm/constants.hpp>
5#include <sysio/vm/exceptions.hpp>
6#include <sysio/vm/execution_interface.hpp>
7#include <sysio/vm/host_function.hpp>
8#include <sysio/vm/opcodes.hpp>
9#include <sysio/vm/signals.hpp>
10#include <sysio/vm/types.hpp>
11#include <sysio/vm/utils.hpp>
12#include <sysio/vm/wasm_stack.hpp>
13
14#include <algorithm>
15#include <cassert>
16#include <signal.h>
17#include <cstddef>
18#include <cstdint>
19#include <cstring>
20#include <iostream>
21#include <limits>
22#include <optional>
23#include <string_view>
24#include <system_error>
25#include <utility>
26
27// OSX requires _XOPEN_SOURCE to #include <ucontext.h>
28#ifdef __APPLE__
29#ifndef _XOPEN_SOURCE
30#define _XOPEN_SOURCE 700
31#endif
32#endif
33#include <ucontext.h>
34
35namespace sysio { namespace vm {
36
37 struct null_host_functions {
38 template<typename... A>
39 void operator()(A&&...) const {
40 SYS_VM_ASSERT(false, wasm_interpreter_exception,
41 "Should never get here because it's impossible to link a module "
42 "that imports any host functions, when no host functions are available");
43 }
44 };
45
46 namespace detail {
47 template <typename HostFunctions>
48 struct host_type {
49 using type = typename HostFunctions::host_type_t;
50 };
51 template <>
52 struct host_type<std::nullptr_t> {
53 using type = std::nullptr_t;
54 };
55
56 template <typename HF>
58
59 template <typename HostFunctions>
60 struct type_converter {
61 using type = typename HostFunctions::type_converter_t;
62 };
63 template <>
64 struct type_converter<std::nullptr_t> {
66 };
67
68 template <typename HF>
70
71 template <typename HostFunctions>
72 struct host_invoker {
73 using type = HostFunctions;
74 };
75 template <>
76 struct host_invoker<std::nullptr_t> {
78 };
79 template <typename HF>
81 }
82
83 template<typename Derived, typename Host>
85 using host_type = detail::host_type_t<Host>;
86 public:
87 Derived& derived() { return static_cast<Derived&>(*this); }
89
92 if (pages < 0) {
93 if (sz + pages < 0)
94 return -1;
95 _wasm_alloc->free<char>(-pages);
96 } else {
97 if (!_mod.memories.size() || _max_pages - sz < static_cast<uint32_t>(pages) ||
98 (_mod.memories[0].limits.flags && (static_cast<int32_t>(_mod.memories[0].limits.maximum) - sz < pages)))
99 return -1;
100 _wasm_alloc->alloc<char>(pages);
101 }
102 return sz;
103 }
104
106 inline void exit(std::error_code err = std::error_code()) {
107 // FIXME: system_error?
108 _error_code = err;
109 throw wasm_exit_exception{"Exiting"};
110 }
111
112 inline module& get_module() { return _mod; }
113 inline void set_wasm_allocator(wasm_allocator* alloc) { _wasm_alloc = alloc; }
114 inline auto get_wasm_allocator() { return _wasm_alloc; }
115 inline char* linear_memory() { return _linear_memory; }
116 inline auto& get_operand_stack() { return _os; }
117 inline const auto& get_operand_stack() const { return _os; }
118 inline auto get_interface() { return execution_interface{ _linear_memory, &_os }; }
119 void set_max_pages(std::uint32_t max_pages) { _max_pages = std::min(max_pages, static_cast<std::uint32_t>(vm::max_pages)); }
120
121 inline std::error_code get_error_code() const { return _error_code; }
122
123 inline void reset() {
124 SYS_VM_ASSERT(_mod.error == nullptr, wasm_interpreter_exception, _mod.error);
125
126 _linear_memory = _wasm_alloc->get_base_ptr<char>();
127 if(_mod.memories.size()) {
128 SYS_VM_ASSERT(_mod.memories[0].limits.initial <= _max_pages, wasm_bad_alloc, "Cannot allocate initial linear memory.");
129 _wasm_alloc->reset(_mod.memories[0].limits.initial);
130 } else
131 _wasm_alloc->reset();
132
133 for (uint32_t i = 0; i < _mod.data.size(); i++) {
134 const auto& data_seg = _mod.data[i];
135 uint32_t offset = data_seg.offset.value.i32; // force to unsigned
136 auto available_memory = _mod.memories[0].limits.initial * static_cast<uint64_t>(page_size);
137 auto required_memory = static_cast<uint64_t>(offset) + data_seg.data.size();
138 SYS_VM_ASSERT(required_memory <= available_memory, wasm_memory_exception, "data out of range");
139 auto addr = _linear_memory + offset;
140 memcpy((char*)(addr), data_seg.data.raw(), data_seg.data.size());
141 }
142
143 // reset the mutable globals
144 for (uint32_t i = 0; i < _mod.globals.size(); i++) {
145 if (_mod.globals[i].type.mutability)
146 _mod.globals[i].current = _mod.globals[i].init;
147 }
148 }
149
150 template <typename Visitor, typename... Args>
151 inline std::optional<operand_stack_elem> execute(host_type* host, Visitor&& visitor, const std::string_view func,
152 Args... args) {
153 uint32_t func_index = _mod.get_exported_function(func);
154 return derived().execute(host, std::forward<Visitor>(visitor), func_index, std::forward<Args>(args)...);
155 }
156
157 template <typename Visitor, typename... Args>
158 inline void execute_start(host_type* host, Visitor&& visitor) {
159 if (_mod.start != std::numeric_limits<uint32_t>::max())
160 derived().execute(host, std::forward<Visitor>(visitor), _mod.start);
161 }
162
163 protected:
164
165 template<typename... Args>
166 static void type_check_args(const func_type& ft, Args&&...) {
167 SYS_VM_ASSERT(sizeof...(Args) == ft.param_types.size(), wasm_interpreter_exception, "wrong number of arguments");
168 uint32_t i = 0;
169 SYS_VM_ASSERT((... && (to_wasm_type_v<detail::type_converter_t<Host>, Args> == ft.param_types.at(i++))), wasm_interpreter_exception, "unexpected argument type");
170 }
171
172 static void handle_signal(int sig) {
173 switch(sig) {
174 case SIGSEGV:
175 case SIGBUS:
176 case SIGFPE:
177 break;
178 default:
179 /* TODO fix this */
180 assert(!"??????");
181 }
182 throw wasm_memory_exception{ "wasm memory out-of-bounds" };
183 }
184
185 char* _linear_memory = nullptr;
186 module& _mod;
188 uint32_t _max_pages = max_pages;
190 std::error_code _error_code;
192 };
193
194 struct jit_visitor { template<typename T> jit_visitor(T&&) {} };
195
196 template<typename Host>
197 class null_execution_context : public execution_context_base<null_execution_context<Host>, Host> {
199 public:
200 null_execution_context(module& m, std::uint32_t max_call_depth) : base_type(m) {}
201 };
202
203 template<bool EnableBacktrace>
204 struct frame_info_holder {};
205 template<>
206 struct frame_info_holder<true> {
207 void* volatile _bottom_frame = nullptr;
208 void* volatile _top_frame = nullptr;
209 };
210
211 template<typename Host, bool EnableBacktrace = false>
212 class jit_execution_context : public frame_info_holder<EnableBacktrace>, public execution_context_base<jit_execution_context<Host, EnableBacktrace>, Host> {
214 using host_type = detail::host_type_t<Host>;
215 public:
216 using base_type::execute;
217 using base_type::base_type;
218 using base_type::_mod;
219 using base_type::_rhf;
220 using base_type::_error_code;
221 using base_type::handle_signal;
222 using base_type::get_operand_stack;
223 using base_type::linear_memory;
224 using base_type::get_interface;
225
226 jit_execution_context(module& m, std::uint32_t max_call_depth) : base_type(m), _remaining_call_depth(max_call_depth) {}
227
228 void set_max_call_depth(std::uint32_t max_call_depth) {
229 _remaining_call_depth = max_call_depth;
230 }
231
233 const auto& ft = _mod.get_function_type(index);
234 uint32_t num_params = ft.param_types.size();
235#ifndef NDEBUG
236 uint32_t original_operands = get_operand_stack().size();
237#endif
238 for(uint32_t i = 0; i < ft.param_types.size(); ++i) {
239 switch(ft.param_types[i]) {
240 case i32: get_operand_stack().push(i32_const_t{stack[num_params - i - 1].i32}); break;
241 case i64: get_operand_stack().push(i64_const_t{stack[num_params - i - 1].i64}); break;
242 case f32: get_operand_stack().push(f32_const_t{stack[num_params - i - 1].f32}); break;
243 case f64: get_operand_stack().push(f64_const_t{stack[num_params - i - 1].f64}); break;
244 default: assert(!"Unexpected type in param_types.");
245 }
246 }
247 _rhf(_host, get_interface(), _mod.import_functions[index]);
248 native_value result{uint64_t{0}};
249 // guarantee that the junk bits are zero, to avoid problems.
250 auto set_result = [&result](auto val) { std::memcpy(&result, &val, sizeof(val)); };
251 if(ft.return_count) {
252 operand_stack_elem el = get_operand_stack().pop();
253 switch(ft.return_type) {
254 case i32: set_result(el.to_ui32()); break;
255 case i64: set_result(el.to_ui64()); break;
256 case f32: set_result(el.to_f32()); break;
257 case f64: set_result(el.to_f64()); break;
258 default: assert(!"Unexpected function return type.");
259 }
260 }
261
262 assert(get_operand_stack().size() == original_operands);
263 return result;
264 }
265
266 inline void reset() {
267 base_type::reset();
268 get_operand_stack().eat(0);
269 }
270
271 template <typename... Args>
272 inline std::optional<operand_stack_elem> execute(host_type* host, jit_visitor, uint32_t func_index, Args... args) {
273 auto saved_host = _host;
274 auto saved_os_size = get_operand_stack().size();
275 auto g = scope_guard([&](){ _host = saved_host; get_operand_stack().eat(saved_os_size); });
276
277 _host = host;
278
279 const func_type& ft = _mod.get_function_type(func_index);
280 this->type_check_args(ft, static_cast<Args&&>(args)...);
281 native_value result;
282 native_value args_raw[] = { transform_arg(static_cast<Args&&>(args))... };
283
284 try {
285 if (func_index < _mod.get_imported_functions_size()) {
286 std::reverse(args_raw + 0, args_raw + sizeof...(Args));
287 result = call_host_function(args_raw, func_index);
288 } else {
289 std::size_t maximum_stack_usage =
290 (_mod.maximum_stack + 2 /*frame ptr + return ptr*/) * (_remaining_call_depth + 1) +
291 sizeof...(Args) + 4 /* scratch space */;
292 stack_allocator alt_stack(maximum_stack_usage * sizeof(native_value));
293 // reserve 24 bytes for data accessed by inline assembly
294 void* stack = alt_stack.top();
295 if(stack) {
296 stack = static_cast<char*>(stack) - 24;
297 }
298 auto fn = reinterpret_cast<native_value (*)(void*, void*)>(_mod.code[func_index - _mod.get_imported_functions_size()].jit_code_offset + _mod.allocator._code_base);
299
300 if constexpr(EnableBacktrace) {
301 sigset_t block_mask;
302 sigemptyset(&block_mask);
303 sigaddset(&block_mask, SIGPROF);
304 pthread_sigmask(SIG_BLOCK, &block_mask, nullptr);
305 auto restore = scope_guard{[this, &block_mask] {
306 this->_top_frame = nullptr;
307 this->_bottom_frame = nullptr;
308 pthread_sigmask(SIG_UNBLOCK, &block_mask, nullptr);
309 }};
310
311 vm::invoke_with_signal_handler([&]() {
312 result = execute<sizeof...(Args)>(args_raw, fn, this, base_type::linear_memory(), stack);
313 }, &handle_signal);
314 } else {
315 vm::invoke_with_signal_handler([&]() {
316 result = execute<sizeof...(Args)>(args_raw, fn, this, base_type::linear_memory(), stack);
317 }, &handle_signal);
318 }
319 }
320 } catch(wasm_exit_exception&) {
321 return {};
322 }
323
324 if(!ft.return_count)
325 return {};
326 else switch (ft.return_type) {
327 case i32: return {i32_const_t{result.i32}};
328 case i64: return {i64_const_t{result.i64}};
329 case f32: return {f32_const_t{result.f32}};
330 case f64: return {f64_const_t{result.f64}};
331 default: assert(!"Unexpected function return type");
332 }
333 __builtin_unreachable();
334 }
335
336#ifdef __x86_64__
337 int backtrace(void** out, int count, void* uc) const {
338 static_assert(EnableBacktrace);
339 void* end = this->_top_frame;
340 if(end == nullptr) return 0;
341 void* rbp;
342 int i = 0;
343 if(this->_bottom_frame) {
344 rbp = this->_bottom_frame;
345 } else if(count != 0) {
346 if(uc) {
347#ifdef __APPLE__
348 auto rip = reinterpret_cast<unsigned char*>(static_cast<ucontext_t*>(uc)->uc_mcontext->__ss.__rip);
349 rbp = reinterpret_cast<void*>(static_cast<ucontext_t*>(uc)->uc_mcontext->__ss.__rbp);
350 auto rsp = reinterpret_cast<void*>(static_cast<ucontext_t*>(uc)->uc_mcontext->__ss.__rsp);
351#elif defined __FreeBSD__
352 auto rip = reinterpret_cast<unsigned char*>(static_cast<ucontext_t*>(uc)->uc_mcontext.mc_rip);
353 rbp = reinterpret_cast<void*>(static_cast<ucontext_t*>(uc)->uc_mcontext.mc_rbp);
354 auto rsp = reinterpret_cast<void*>(static_cast<ucontext_t*>(uc)->uc_mcontext.mc_rsp);
355#else
356 auto rip = reinterpret_cast<unsigned char*>(static_cast<ucontext_t*>(uc)->uc_mcontext.gregs[REG_RIP]);
357 rbp = reinterpret_cast<void*>(static_cast<ucontext_t*>(uc)->uc_mcontext.gregs[REG_RBP]);
358 auto rsp = reinterpret_cast<void*>(static_cast<ucontext_t*>(uc)->uc_mcontext.gregs[REG_RSP]);
359#endif
360 out[i++] = rip;
361 // If we were interrupted in the function prologue or epilogue,
362 // avoid dropping the parent frame.
363 auto code_base = reinterpret_cast<const unsigned char*>(_mod.allocator.get_code_start());
364 auto code_end = code_base + _mod.allocator._code_size;
365 if(rip >= code_base && rip < code_end && count > 1) {
366 // function prologue
367 if(*reinterpret_cast<const unsigned char*>(rip) == 0x55) {
368 if(rip != *static_cast<void**>(rsp)) { // Ignore fake frame set up for softfloat calls
369 out[i++] = *static_cast<void**>(rsp);
370 }
371 } else if(rip[0] == 0x48 && rip[1] == 0x89 && (rip[2] == 0xe5 || rip[2] == 0x27)) {
372 if((rip - 1) != static_cast<void**>(rsp)[1]) { // Ignore fake frame set up for softfloat calls
373 out[i++] = static_cast<void**>(rsp)[1];
374 }
375 }
376 // function epilogue
377 else if(rip[0] == 0xc3) {
378 out[i++] = *static_cast<void**>(rsp);
379 }
380 }
381 } else {
382 rbp = __builtin_frame_address(0);
383 }
384 }
385 while(i < count) {
386 void* rip = static_cast<void**>(rbp)[1];
387 if(rbp == end) break;
388 out[i++] = rip;
389 rbp = *static_cast<void**>(rbp);
390 }
391 return i;
392 }
393
394 static constexpr bool async_backtrace() { return EnableBacktrace; }
395#endif
396
397 protected:
398
399 template<typename T>
401 // make sure that the garbage bits are always zero.
402 native_value result;
403 std::memset(&result, 0, sizeof(result));
404 auto tc = detail::type_converter_t<Host>{_host, get_interface()};
405 auto transformed_value = detail::resolve_result(tc, static_cast<T&&>(value)).data;
406 std::memcpy(&result, &transformed_value, sizeof(transformed_value));
407 return result;
408 }
409
410#ifdef __x86_64__
411 /* TODO abstract this and clean this up a bit, this really doesn't belong here */
412 template<int Count>
413 static native_value execute(native_value* data, native_value (*fun)(void*, void*), jit_execution_context* context, void* linear_memory, void* stack) {
414 static_assert(sizeof(native_value) == 8, "8-bytes expected for native_value");
415 native_value result;
416 unsigned stack_check = context->_remaining_call_depth;
417 // TODO refactor this whole thing to not need all of this, should be generated from the backend
418 // currently ignoring register c++17 warning
419 register void* stack_top asm ("r12") = stack;
420 // 0x1f80 is the default MXCSR value
421#define ASM_CODE(before, after) \
422 asm volatile( \
423 "test %[stack_top], %[stack_top]; " \
424 "jnz 3f; " \
425 "mov %%rsp, %[stack_top]; " \
426 "sub $0x98, %%rsp; " /* red-zone + 24 bytes*/ \
427 "mov %[stack_top], (%%rsp); " \
428 "jmp 4f; " \
429 "3: " \
430 "mov %%rsp, (%[stack_top]); " \
431 "mov %[stack_top], %%rsp; " \
432 "4: " \
433 "stmxcsr 16(%%rsp); " \
434 "mov $0x1f80, %%rax; " \
435 "mov %%rax, 8(%%rsp); " \
436 "ldmxcsr 8(%%rsp); " \
437 "mov %[Count], %%rax; " \
438 "test %%rax, %%rax; " \
439 "jz 2f; " \
440 "1: " \
441 "movq (%[data]), %%r8; " \
442 "lea 8(%[data]), %[data]; " \
443 "pushq %%r8; " \
444 "dec %%rax; " \
445 "jnz 1b; " \
446 "2: " \
447 before \
448 "callq *%[fun]; " \
449 after \
450 "add %[StackOffset], %%rsp; " \
451 "ldmxcsr 16(%%rsp); " \
452 "mov (%%rsp), %%rsp; " \
453 /* Force explicit register allocation, because otherwise it's too hard to get the clobbers right. */ \
454 : [result] "=&a" (result), /* output, reused as a scratch register */ \
455 [data] "+d" (data), [fun] "+c" (fun), [stack_top] "+r" (stack_top) /* input only, but may be clobbered */ \
456 : [context] "D" (context), [linear_memory] "S" (linear_memory), \
457 [StackOffset] "n" (Count*8), [Count] "n" (Count), "b" (stack_check) /* input */ \
458 : "memory", "cc", /* clobber */ \
459 /* call clobbered registers, that are not otherwise used */ \
460 /*"rax", "rcx", "rdx", "rsi", "rdi",*/ "r8", "r9", "r10", "r11", \
461 "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7", \
462 "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15", \
463 "mm0","mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm6", \
464 "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)" \
465 );
466 if constexpr (!EnableBacktrace) {
467 ASM_CODE("", "");
468 } else {
469 ASM_CODE("movq %%rbp, 8(%[context]); ",
470 "xor %[fun], %[fun]; "
471 "mov %[fun], 8(%[context]); ");
472 }
473#undef ASM_CODE
474 return result;
475 }
476#endif
477
478 host_type * _host = nullptr;
480 };
481
482 template <typename Host>
483 class execution_context : public execution_context_base<execution_context<Host>, Host> {
484 using base_type = execution_context_base<execution_context<Host>, Host>;
485 using host_type = detail::host_type_t<Host>;
486 public:
487 using base_type::_mod;
488 using base_type::_rhf;
489 using base_type::_error_code;
490 using base_type::handle_signal;
491 using base_type::get_operand_stack;
492 using base_type::linear_memory;
493 using base_type::get_interface;
494
496 : base_type(m), _base_allocator{max_call_depth*sizeof(activation_frame)},
497 _as{max_call_depth, _base_allocator}, _halt(exit_t{}) {}
498
499 void set_max_call_depth(uint32_t max_call_depth) {
500 static_assert(std::is_trivially_move_assignable_v<call_stack>, "This is seriously broken if call_stack move assignment might use the existing memory");
501 std::size_t mem_size = max_call_depth*sizeof(activation_frame);
502 if(mem_size > _base_allocator.mem_size) {
503 _base_allocator = bounded_allocator{mem_size};
504 _as = call_stack{max_call_depth, _base_allocator};
505 } else if (max_call_depth != _as.capacity()){
506 _base_allocator.index = 0;
507 _as = call_stack{max_call_depth, _base_allocator};
508 }
509 }
510
511 inline void call(uint32_t index) {
512 // TODO validate index is valid
513 if (index < _mod.get_imported_functions_size()) {
514 // TODO validate only importing functions
515 const auto& ft = _mod.types[_mod.imports[index].type.func_t];
516 type_check(ft);
517 inc_pc();
518 push_call( activation_frame{ nullptr, 0 } );
519 _rhf(_state.host, get_interface(), _mod.import_functions[index]);
520 pop_call();
521 } else {
522 // const auto& ft = _mod.types[_mod.functions[index - _mod.get_imported_functions_size()]];
523 // type_check(ft);
524 push_call(index);
525 setup_locals(index);
526 set_pc( _mod.get_function_pc(index) );
527 }
528 }
529
530 void print_stack() {
531 std::cout << "STACK { ";
532 for (int i = 0; i < get_operand_stack().size(); i++) {
533 std::cout << "(" << i << ")";
534 visit(overloaded { [&](i32_const_t el) { std::cout << "i32:" << el.data.ui << ", "; },
535 [&](i64_const_t el) { std::cout << "i64:" << el.data.ui << ", "; },
536 [&](f32_const_t el) { std::cout << "f32:" << el.data.f << ", "; },
537 [&](f64_const_t el) { std::cout << "f64:" << el.data.f << ", "; },
538 [&](auto el) { std::cout << "(INDEX " << el.index() << "), "; } }, get_operand_stack().get(i));
539 }
540 std::cout << " }\n";
541 }
542
543 inline uint32_t table_elem(uint32_t i) { return _mod.tables[0].table[i]; }
544 inline void push_operand(operand_stack_elem el) { get_operand_stack().push(std::move(el)); }
545 inline operand_stack_elem get_operand(uint32_t index) const { return get_operand_stack().get(_last_op_index + index); }
546 inline void eat_operands(uint32_t index) { get_operand_stack().eat(index); }
547 inline void compact_operand(uint32_t index) { get_operand_stack().compact(index); }
548 inline void set_operand(uint32_t index, const operand_stack_elem& el) { get_operand_stack().set(_last_op_index + index, el); }
549 inline uint32_t current_operands_index() const { return get_operand_stack().current_index(); }
550 inline void push_call(activation_frame&& el) { _as.push(std::move(el)); }
551 inline activation_frame pop_call() { return _as.pop(); }
552 inline uint32_t call_depth()const { return _as.size(); }
553 template <bool Should_Exit=false>
554 inline void push_call(uint32_t index) {
555 opcode* return_pc = static_cast<opcode*>(&_halt);
556 if constexpr (!Should_Exit)
557 return_pc = _state.pc + 1;
558
559 _as.push( activation_frame{ return_pc, _last_op_index } );
560 _last_op_index = get_operand_stack().size() - _mod.get_function_type(index).param_types.size();
561 }
562
563 inline void apply_pop_call(uint32_t num_locals, uint16_t return_count) {
564 const auto& af = _as.pop();
565 _state.pc = af.pc;
566 _last_op_index = af.last_op_index;
567 if (return_count)
568 compact_operand(get_operand_stack().size() - num_locals - 1);
569 else
570 eat_operands(get_operand_stack().size() - num_locals);
571 }
572 inline operand_stack_elem pop_operand() { return get_operand_stack().pop(); }
573 inline operand_stack_elem& peek_operand(size_t i = 0) { return get_operand_stack().peek(i); }
575 SYS_VM_ASSERT(index < _mod.globals.size(), wasm_interpreter_exception, "global index out of range");
576 const auto& gl = _mod.globals[index];
577 switch (gl.type.content_type) {
578 case types::i32: return i32_const_t{ *(uint32_t*)&gl.current.value.i32 };
579 case types::i64: return i64_const_t{ *(uint64_t*)&gl.current.value.i64 };
580 case types::f32: return f32_const_t{ gl.current.value.f32 };
581 case types::f64: return f64_const_t{ gl.current.value.f64 };
582 default: throw wasm_interpreter_exception{ "invalid global type" };
583 }
584 }
585
586 inline void set_global(uint32_t index, const operand_stack_elem& el) {
587 SYS_VM_ASSERT(index < _mod.globals.size(), wasm_interpreter_exception, "global index out of range");
588 auto& gl = _mod.globals[index];
589 SYS_VM_ASSERT(gl.type.mutability, wasm_interpreter_exception, "global is not mutable");
590 visit(overloaded{ [&](const i32_const_t& i) {
591 SYS_VM_ASSERT(gl.type.content_type == types::i32, wasm_interpreter_exception,
592 "expected i32 global type");
593 gl.current.value.i32 = i.data.ui;
594 },
595 [&](const i64_const_t& i) {
596 SYS_VM_ASSERT(gl.type.content_type == types::i64, wasm_interpreter_exception,
597 "expected i64 global type");
598 gl.current.value.i64 = i.data.ui;
599 },
600 [&](const f32_const_t& f) {
601 SYS_VM_ASSERT(gl.type.content_type == types::f32, wasm_interpreter_exception,
602 "expected f32 global type");
603 gl.current.value.f32 = f.data.ui;
604 },
605 [&](const f64_const_t& f) {
606 SYS_VM_ASSERT(gl.type.content_type == types::f64, wasm_interpreter_exception,
607 "expected f64 global type");
608 gl.current.value.f64 = f.data.ui;
609 },
610 [](auto) { throw wasm_interpreter_exception{ "invalid global type" }; } },
611 el);
612 }
613
614 inline bool is_true(const operand_stack_elem& el) {
615 bool ret_val = false;
616 visit(overloaded{ [&](const i32_const_t& i32) { ret_val = i32.data.ui; },
617 [&](auto) { throw wasm_invalid_element{ "should be an i32 type" }; } },
618 el);
619 return ret_val;
620 }
621
622 inline void type_check(const func_type& ft) {
623 for (uint32_t i = 0; i < ft.param_types.size(); i++) {
624 const auto& op = peek_operand((ft.param_types.size() - 1) - i);
625 visit(overloaded{ [&](const i32_const_t&) {
626 SYS_VM_ASSERT(ft.param_types[i] == types::i32, wasm_interpreter_exception,
627 "function param type mismatch");
628 },
629 [&](const f32_const_t&) {
630 SYS_VM_ASSERT(ft.param_types[i] == types::f32, wasm_interpreter_exception,
631 "function param type mismatch");
632 },
633 [&](const i64_const_t&) {
634 SYS_VM_ASSERT(ft.param_types[i] == types::i64, wasm_interpreter_exception,
635 "function param type mismatch");
636 },
637 [&](const f64_const_t&) {
638 SYS_VM_ASSERT(ft.param_types[i] == types::f64, wasm_interpreter_exception,
639 "function param type mismatch");
640 },
641 [&](auto) { throw wasm_interpreter_exception{ "function param invalid type" }; } },
642 op);
643 }
644 }
645
646 inline opcode* get_pc() const { return _state.pc; }
647 inline void set_relative_pc(uint32_t pc_offset) {
648 _state.pc = _mod.code[0].code + pc_offset;
649 }
650 inline void set_pc(opcode* pc) { _state.pc = pc; }
651 inline void inc_pc(uint32_t offset=1) { _state.pc += offset; }
652 inline void exit(std::error_code err = std::error_code()) {
653 _error_code = err;
654 _state.pc = &_halt;
655 _state.exiting = true;
656 }
657
658 inline void reset() {
659 base_type::reset();
660 _state = execution_state{};
661 get_operand_stack().eat(_state.os_index);
662 _as.eat(_state.as_index);
663 }
664
665 template <typename Visitor, typename... Args>
666 inline std::optional<operand_stack_elem> execute_func_table(host_type* host, Visitor&& visitor, uint32_t table_index,
667 Args... args) {
668 return execute(host, std::forward<Visitor>(visitor), table_elem(table_index), std::forward<Args>(args)...);
669 }
670
671 template <typename Visitor, typename... Args>
672 inline std::optional<operand_stack_elem> execute(host_type* host, Visitor&& visitor, const std::string_view func,
673 Args... args) {
674 uint32_t func_index = _mod.get_exported_function(func);
675 return execute(host, std::forward<Visitor>(visitor), func_index, std::forward<Args>(args)...);
676 }
677
678 template <typename Visitor, typename... Args>
679 inline void execute_start(host_type* host, Visitor&& visitor) {
680 if (_mod.start != std::numeric_limits<uint32_t>::max())
681 execute(host, std::forward<Visitor>(visitor), _mod.start);
682 }
683
684 template <typename Visitor, typename... Args>
685 inline std::optional<operand_stack_elem> execute(host_type* host, Visitor&& visitor, uint32_t func_index, Args... args) {
686 SYS_VM_ASSERT(func_index < std::numeric_limits<uint32_t>::max(), wasm_interpreter_exception,
687 "cannot execute function, function not found");
688
689 auto last_last_op_index = _last_op_index;
690
691 // save the state of the original calling context
692 execution_state saved_state = _state;
693
694 _state.host = host;
695 _state.as_index = _as.size();
696 _state.os_index = get_operand_stack().size();
697
698 auto cleanup = scope_guard([&]() {
699 get_operand_stack().eat(_state.os_index);
700 _as.eat(_state.as_index);
701 _state = saved_state;
702
703 _last_op_index = last_last_op_index;
704 });
705
706 this->type_check_args(_mod.get_function_type(func_index), static_cast<Args&&>(args)...);
707 push_args(args...);
708 push_call<true>(func_index);
709
710 if (func_index < _mod.get_imported_functions_size()) {
711 _rhf(_state.host, get_interface(), _mod.import_functions[func_index]);
712 } else {
713 _state.pc = _mod.get_function_pc(func_index);
714 setup_locals(func_index);
715 vm::invoke_with_signal_handler([&]() {
716 execute(visitor);
717 }, &handle_signal);
718 }
719
720 if (_mod.get_function_type(func_index).return_count && !_state.exiting) {
721 return pop_operand();
722 } else {
723 return {};
724 }
725 }
726
727 inline void jump(uint32_t pop_info, uint32_t new_pc) {
728 set_relative_pc(new_pc);
729 if ((pop_info & 0x80000000u)) {
730 const auto& op = pop_operand();
731 eat_operands(get_operand_stack().size() - ((pop_info & 0x7FFFFFFFu) - 1));
732 push_operand(op);
733 } else {
734 eat_operands(get_operand_stack().size() - pop_info);
735 }
736 }
737
738 // This isn't async-signal-safe. Cross fingers and hope for the best.
739 // It's only used for profiling.
740 int backtrace(void** data, int limit, void* uc) const {
741 int out = 0;
742 if(limit != 0) {
743 data[out++] = _state.pc;
744 }
745 for(int i = 0; out < limit && i < _as.size(); ++i) {
746 data[out++] = _as.get_back(i).pc;
747 }
748 return out;
749 }
750
751 private:
752
753 template <typename... Args>
754 void push_args(Args&&... args) {
755 auto tc = detail::type_converter_t<Host>{ _host, get_interface() };
756 (void)tc;
757 (... , push_operand(detail::resolve_result(tc, std::move(args))));
758 }
759
760 inline void setup_locals(uint32_t index) {
761 const auto& fn = _mod.code[index - _mod.get_imported_functions_size()];
762 for (uint32_t i = 0; i < fn.locals.size(); i++) {
763 for (uint32_t j = 0; j < fn.locals[i].count; j++)
764 switch (fn.locals[i].type) {
765 case types::i32: push_operand(i32_const_t{ (uint32_t)0 }); break;
766 case types::i64: push_operand(i64_const_t{ (uint64_t)0 }); break;
767 case types::f32: push_operand(f32_const_t{ (uint32_t)0 }); break;
768 case types::f64: push_operand(f64_const_t{ (uint64_t)0 }); break;
769 default: throw wasm_interpreter_exception{ "invalid function param type" };
770 }
771 }
772 }
773
774#define CREATE_TABLE_ENTRY(NAME, CODE) &&ev_label_##NAME,
775#define CREATE_LABEL(NAME, CODE) \
776 ev_label_##NAME : visitor(ev_variant->template get<sysio::vm::SYS_VM_OPCODE_T(NAME)>()); \
777 ev_variant = _state.pc; \
778 goto* dispatch_table[ev_variant->index()];
779#define CREATE_EXIT_LABEL(NAME, CODE) ev_label_##NAME : \
780 return;
781#define CREATE_EMPTY_LABEL(NAME, CODE) ev_label_##NAME : \
782 throw wasm_interpreter_exception{"empty operand"};
783
784 template <typename Visitor>
785 void execute(Visitor&& visitor) {
786 static void* dispatch_table[] = {
805 &&__ev_last
806 };
807 auto* ev_variant = _state.pc;
808 goto *dispatch_table[ev_variant->index()];
809 while (1) {
828 __ev_last:
829 throw wasm_interpreter_exception{"should never reach here"};
830 }
831 }
832
833#undef CREATE_EMPTY_LABEL
834#undef CREATE_LABEL
835#undef CREATE_TABLE_ENTRY
836
837 struct execution_state {
838 host_type* host = nullptr;
839 uint32_t as_index = 0;
840 uint32_t os_index = 0;
841 opcode* pc = nullptr;
842 bool exiting = false;
843 };
844
845 bounded_allocator _base_allocator = {
846 (constants::max_call_depth + 1) * sizeof(activation_frame)
847 };
848 execution_state _state;
849 uint32_t _last_op_index = 0;
850 call_stack _as = { _base_allocator };
851 opcode _halt;
852 host_type* _host = nullptr;
853 };
854}} // namespace sysio::vm
static void type_check_args(const func_type &ft, Args &&...)
int32_t grow_linear_memory(int32_t pages)
void set_wasm_allocator(wasm_allocator *alloc)
void execute_start(host_type *host, Visitor &&visitor)
std::optional< operand_stack_elem > execute(host_type *host, Visitor &&visitor, const std::string_view func, Args... args)
void set_max_pages(std::uint32_t max_pages)
void exit(std::error_code err=std::error_code())
detail::host_invoker_t< Host > _rhf
void type_check(const func_type &ft)
void apply_pop_call(uint32_t num_locals, uint16_t return_count)
operand_stack_elem get_global(uint32_t index)
void execute_start(host_type *host, Visitor &&visitor)
bool is_true(const operand_stack_elem &el)
operand_stack_elem get_operand(uint32_t index) const
void set_operand(uint32_t index, const operand_stack_elem &el)
void push_call(activation_frame &&el)
std::optional< operand_stack_elem > execute(host_type *host, Visitor &&visitor, uint32_t func_index, Args... args)
void set_max_call_depth(uint32_t max_call_depth)
int backtrace(void **data, int limit, void *uc) const
std::optional< operand_stack_elem > execute_func_table(host_type *host, Visitor &&visitor, uint32_t table_index, Args... args)
void jump(uint32_t pop_info, uint32_t new_pc)
void exit(std::error_code err=std::error_code())
execution_context(module &m, uint32_t max_call_depth)
void set_global(uint32_t index, const operand_stack_elem &el)
operand_stack_elem & peek_operand(size_t i=0)
void inc_pc(uint32_t offset=1)
std::optional< operand_stack_elem > execute(host_type *host, Visitor &&visitor, const std::string_view func, Args... args)
void push_operand(operand_stack_elem el)
void set_relative_pc(uint32_t pc_offset)
void compact_operand(uint32_t index)
std::optional< operand_stack_elem > execute(host_type *host, jit_visitor, uint32_t func_index, Args... args)
void set_max_call_depth(std::uint32_t max_call_depth)
native_value call_host_function(native_value *stack, uint32_t index)
native_value transform_arg(T &&value)
jit_execution_context(module &m, std::uint32_t max_call_depth)
null_execution_context(module &m, std::uint32_t max_call_depth)
constexpr const auto & get() const &
Definition variant.hpp:208
void free(std::size_t size)
void alloc(size_t size=1)
int32_t get_current_page() const
int * count
Definition name.hpp:106
typename type_converter< HF >::type type_converter_t
typename host_type< HF >::type host_type_t
typename host_invoker< HF >::type host_invoker_t
constexpr auto visit(Visitor &&vis, Variant &&var)
Definition variant.hpp:156
stack< activation_frame, constants::max_call_depth+1, bounded_allocator > call_stack
constexpr auto to_wasm_type_v
variant< > opcode
Definition opcodes.hpp:79
#define value
Definition pkcs11.h:157
#define T(meth, val, expected)
schedule config_dir_name data_dir_name p2p_port http_port file_size name host(p2p_endpoint)) FC_REFLECT(tn_node_def
unsigned short uint16_t
Definition stdint.h:125
unsigned int uint32_t
Definition stdint.h:126
signed int int32_t
Definition stdint.h:123
unsigned __int64 uint64_t
Definition stdint.h:136
typename HostFunctions::host_type_t type
typename HostFunctions::type_converter_t type
guarded_vector< value_type > param_types
Definition types.hpp:44
guarded_vector< memory_type > memories
Definition types.hpp:173
#define SYS_VM_ASSERT(expr, exc_type, msg)
Definition exceptions.hpp:8
#define CREATE_EMPTY_LABEL(NAME, CODE)
#define CREATE_LABEL(NAME, CODE)
#define CREATE_EXIT_LABEL(NAME, CODE)
#define CREATE_TABLE_ENTRY(NAME, CODE)
#define SYS_VM_F64_CONSTANT_OPS(opcode_macro)
#define SYS_VM_RETURN_OP(opcode_macro)
#define SYS_VM_EXIT_OP(opcode_macro)
#define SYS_VM_COMPARISON_OPS(opcode_macro)
#define SYS_VM_MEMORY_OPS(opcode_macro)
#define SYS_VM_I64_CONSTANT_OPS(opcode_macro)
#define SYS_VM_I32_CONSTANT_OPS(opcode_macro)
#define SYS_VM_CALL_OPS(opcode_macro)
#define SYS_VM_NUMERIC_OPS(opcode_macro)
#define SYS_VM_CONVERSION_OPS(opcode_macro)
#define SYS_VM_VARIABLE_ACCESS_OPS(opcode_macro)
#define SYS_VM_EMPTY_OPS(opcode_macro)
#define SYS_VM_CALL_IMM_OPS(opcode_macro)
#define SYS_VM_CONTROL_FLOW_OPS(opcode_macro)
#define SYS_VM_ERROR_OPS(opcode_macro)
#define SYS_VM_PARAMETRIC_OPS(opcode_macro)
#define SYS_VM_F32_CONSTANT_OPS(opcode_macro)
#define SYS_VM_BR_TABLE_OP(opcode_macro)
uint16_t j
memcpy((char *) pInfo->slotDescription, s, l)
c_gkp_out sizeof(template))