1721 lines
50 KiB
C
1721 lines
50 KiB
C
#include <assert.h>
|
|
|
|
#include "apfl.h"
|
|
|
|
#include "alloc.h"
|
|
#include "bytecode.h"
|
|
#include "compile.h"
|
|
#include "context.h"
|
|
#include "format.h"
|
|
#include "hashmap.h"
|
|
#include "resizable.h"
|
|
#include "strings.h"
|
|
#include "value.h"
|
|
|
|
static void evaluate(apfl_ctx ctx, struct func_call_stack_entry *cse);
|
|
static void evaluate_matcher(apfl_ctx ctx, struct matcher_call_stack_entry *cse);
|
|
static void dispatch(apfl_ctx ctx, struct call_stack_entry *cse);
|
|
static void matcher_init_matching(apfl_ctx ctx, struct matcher *matcher, struct scopes scopes);
|
|
|
|
static void
|
|
stack_must_drop(apfl_ctx ctx, apfl_stackidx index)
|
|
{
|
|
bool ok = apfl_stack_drop(ctx, index);
|
|
assert(ok /* stack_must_drop */);
|
|
}
|
|
|
|
#define ABSTRACT_GET_ARGUMENT(i, ilist, arg) \
|
|
if (*i >= ilist->len) { \
|
|
return false; \
|
|
} \
|
|
\
|
|
*arg = ilist->instructions[(*i)++]; \
|
|
return true;
|
|
|
|
#define ABSTRACT_MUST_GET_ARG(get, ctx, i, ilist, arg) \
|
|
if (!get(i, ilist, arg)) { \
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode); \
|
|
}
|
|
|
|
static bool
|
|
get_argument(size_t *i, struct instruction_list *ilist, union instruction_or_arg *arg)
|
|
{
|
|
ABSTRACT_GET_ARGUMENT(i, ilist, arg)
|
|
}
|
|
|
|
static void
|
|
must_get_argument(apfl_ctx ctx, size_t *i, struct instruction_list *ilist, union instruction_or_arg *arg)
|
|
{
|
|
ABSTRACT_MUST_GET_ARG(get_argument, ctx, i, ilist, arg)
|
|
}
|
|
|
|
static bool
|
|
get_matcher_argument(size_t *i, struct matcher_instruction_list *milist, union matcher_instruction_or_arg *arg)
|
|
{
|
|
ABSTRACT_GET_ARGUMENT(i, milist, arg)
|
|
}
|
|
|
|
static void
|
|
must_get_matcher_argument(apfl_ctx ctx, size_t *i, struct matcher_instruction_list *milist, union matcher_instruction_or_arg *arg)
|
|
{
|
|
ABSTRACT_MUST_GET_ARG(get_matcher_argument, ctx, i, milist, arg)
|
|
}
|
|
|
|
enum scope_type {
|
|
SCOPE_LOCAL,
|
|
SCOPE_CLOSUE,
|
|
SCOPE_GLOBAL,
|
|
};
|
|
|
|
static struct scope *
|
|
get_scope(apfl_ctx ctx, struct scopes scopes, enum scope_type type)
|
|
{
|
|
switch (type) {
|
|
case SCOPE_LOCAL:
|
|
return scopes.local;
|
|
case SCOPE_CLOSUE:
|
|
return scopes.closure;
|
|
case SCOPE_GLOBAL:
|
|
return ctx->globals;
|
|
}
|
|
|
|
assert(false);
|
|
return NULL;
|
|
}
|
|
|
|
static struct scope *
|
|
get_or_create_local_scope(apfl_ctx ctx, struct scopes *scopes)
|
|
{
|
|
if (scopes->local != NULL) {
|
|
return scopes->local;
|
|
}
|
|
|
|
if ((scopes->local = apfl_scope_new(&ctx->gc)) == NULL) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
|
|
return scopes->local;
|
|
}
|
|
|
|
static bool
|
|
try_variable_get_for_scope_type(
|
|
apfl_ctx ctx,
|
|
struct scopes scopes,
|
|
struct apfl_string *name,
|
|
enum scope_type type
|
|
) {
|
|
struct apfl_value value;
|
|
struct scope *scope;
|
|
|
|
if ((scope = get_scope(ctx, scopes, type)) != NULL) {
|
|
if (apfl_scope_get(scope, name, &value)) {
|
|
apfl_stack_must_push(ctx, value);
|
|
return true;
|
|
}
|
|
}
|
|
return false;
|
|
}
|
|
|
|
static void
|
|
variable_get(apfl_ctx ctx, struct scopes scopes, struct apfl_string *name, bool global)
|
|
{
|
|
if (try_variable_get_for_scope_type(ctx, scopes, name, SCOPE_LOCAL)) {
|
|
return;
|
|
}
|
|
if (try_variable_get_for_scope_type(ctx, scopes, name, SCOPE_CLOSUE)) {
|
|
return;
|
|
}
|
|
if (global && try_variable_get_for_scope_type(ctx, scopes, name, SCOPE_GLOBAL)) {
|
|
return;
|
|
}
|
|
|
|
apfl_raise_errorfmt(ctx, "Variable {string} does not exist.", apfl_string_view_from(*name));
|
|
}
|
|
|
|
static bool
|
|
try_variable_update_existing_for_scope_type(
|
|
struct apfl_string *name,
|
|
struct apfl_value value,
|
|
struct scope *scope
|
|
) {
|
|
if (scope == NULL) {
|
|
return false;
|
|
}
|
|
|
|
return apfl_scope_update_existing(scope, name, value);
|
|
}
|
|
|
|
static void
|
|
variable_set_value(apfl_ctx ctx, struct scopes *scopes, struct apfl_string *name, bool local, struct apfl_value value)
|
|
{
|
|
bool was_set = false;
|
|
if (!local) {
|
|
was_set = try_variable_update_existing_for_scope_type(name, value, scopes->local)
|
|
|| try_variable_update_existing_for_scope_type(name, value, scopes->closure);
|
|
}
|
|
|
|
if (!was_set) {
|
|
struct scope *scope = get_or_create_local_scope(ctx, scopes);
|
|
assert(scope != NULL /*get_or_create_local_scope should never return NULL*/);
|
|
|
|
if (!apfl_scope_set(&ctx->gc, scope, name, value)) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
}
|
|
}
|
|
|
|
static void
|
|
variable_set(apfl_ctx ctx, struct scopes *scopes, struct apfl_string *name, bool keep_on_stack, bool local)
|
|
{
|
|
struct apfl_value value = apfl_stack_must_get(ctx, -1);
|
|
|
|
variable_set_value(ctx, scopes, name, local, value);
|
|
|
|
if (keep_on_stack) {
|
|
// If the value should be kept on the stack, the value is now in two
|
|
// places. We need to set the COW flag to prevent mutations of one copy
|
|
// affecting the other one.
|
|
value = apfl_value_set_cow_flag(value);
|
|
} else {
|
|
stack_must_drop(ctx, -1);
|
|
}
|
|
}
|
|
|
|
static void
|
|
variable_new(apfl_ctx ctx, struct scopes *scopes, struct apfl_string *name, bool local)
|
|
{
|
|
if (!local) {
|
|
if (scopes->local != NULL && apfl_scope_has(scopes->local, name)) {
|
|
return;
|
|
}
|
|
if (scopes->closure != NULL && apfl_scope_has(scopes->closure, name)) {
|
|
return;
|
|
}
|
|
}
|
|
|
|
struct scope *scope = get_or_create_local_scope(ctx, scopes);
|
|
if (!apfl_scope_create_var(&ctx->gc, scope, name)) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
}
|
|
|
|
static void
|
|
matcher_push(apfl_ctx ctx, struct func_call_stack_entry *cse, struct matcher_instruction_list *milist)
|
|
{
|
|
struct matcher_stack *matcher_stack = &cse->matcher_stack;
|
|
if (!apfl_resizable_ensure_cap_for_more_elements(
|
|
ctx->gc.allocator,
|
|
sizeof(struct matcher *),
|
|
(void **)&matcher_stack->items,
|
|
matcher_stack->len,
|
|
&matcher_stack->cap,
|
|
1
|
|
)) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
|
|
if ((matcher_stack->items[matcher_stack->len] = apfl_matcher_new(&ctx->gc, milist)) == NULL) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
|
|
matcher_stack->len++;
|
|
}
|
|
|
|
static struct matcher *
|
|
matcher_stack_top(apfl_ctx ctx, struct func_call_stack_entry *cse)
|
|
{
|
|
struct matcher_stack *matcher_stack = &cse->matcher_stack;
|
|
if (matcher_stack->len == 0) {
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
|
}
|
|
|
|
struct matcher *matcher = matcher_stack->items[matcher_stack->len-1];
|
|
|
|
if (matcher == NULL) {
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
|
}
|
|
|
|
return matcher;
|
|
}
|
|
|
|
static void
|
|
matcher_stack_drop(apfl_ctx ctx, struct func_call_stack_entry *cse)
|
|
{
|
|
struct matcher_stack *matcher_stack = &cse->matcher_stack;
|
|
if (matcher_stack->len == 0) {
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
|
}
|
|
|
|
bool ok = apfl_resizable_resize(
|
|
ctx->gc.allocator,
|
|
sizeof(struct matcher *),
|
|
(void **)&matcher_stack->items,
|
|
&matcher_stack->len,
|
|
&matcher_stack->cap,
|
|
matcher_stack->len-1
|
|
);
|
|
assert(ok /* We're shrinking, should not fail */);
|
|
}
|
|
|
|
static void
|
|
func_inner(apfl_ctx ctx, struct func_call_stack_entry *cse, size_t count)
|
|
{
|
|
struct scope *scope = apfl_closure_scope_for_func(ctx, cse->scopes);
|
|
if (scope == NULL) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
|
|
if (!apfl_gc_tmproot_add(&ctx->gc, GC_OBJECT_FROM(scope, GC_TYPE_SCOPE))) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
|
|
struct apfl_value *func_value = apfl_stack_push_placeholder(ctx);
|
|
if (func_value == NULL) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
|
|
if ((func_value->func = apfl_func_new(
|
|
&ctx->gc,
|
|
count,
|
|
scope,
|
|
cse->execution_line,
|
|
cse->instructions->filename
|
|
)) == NULL) {
|
|
stack_must_drop(ctx, -1);
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
|
|
func_value->type = VALUE_FUNC;
|
|
}
|
|
|
|
static void
|
|
func(apfl_ctx ctx, struct func_call_stack_entry *cse, size_t count)
|
|
{
|
|
size_t tmproots = apfl_gc_tmproots_begin(&ctx->gc);
|
|
func_inner(ctx, cse, count);
|
|
apfl_gc_tmproots_restore(&ctx->gc, tmproots);
|
|
}
|
|
|
|
static void
|
|
func_add_subfunc(
|
|
apfl_ctx ctx,
|
|
struct func_call_stack_entry *cse,
|
|
struct instruction_list *body,
|
|
bool with_matcher
|
|
) {
|
|
// TODO: Better error messsages
|
|
|
|
struct apfl_value value = apfl_stack_must_get(ctx, -1);
|
|
if (value.type != VALUE_FUNC) {
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
|
}
|
|
|
|
if (!apfl_func_add_subfunc(
|
|
value.func,
|
|
body,
|
|
with_matcher ? matcher_stack_top(ctx, cse) : NULL
|
|
)) {
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
|
}
|
|
|
|
if (with_matcher) {
|
|
matcher_stack_drop(ctx, cse);
|
|
}
|
|
}
|
|
|
|
static void
|
|
func_set_name(apfl_ctx ctx, struct apfl_string *name)
|
|
{
|
|
struct apfl_value value = apfl_stack_must_get(ctx, -1);
|
|
if (value.type != VALUE_FUNC) {
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
|
}
|
|
|
|
apfl_func_set_name(value.func, name);
|
|
}
|
|
|
|
static bool
|
|
try_call_stack_push(apfl_ctx ctx, struct call_stack_entry cse)
|
|
{
|
|
return apfl_resizable_append(
|
|
ctx->gc.allocator,
|
|
sizeof(struct call_stack_entry),
|
|
(void**)&ctx->call_stack.items,
|
|
&ctx->call_stack.len,
|
|
&ctx->call_stack.cap,
|
|
&cse,
|
|
1
|
|
);
|
|
}
|
|
|
|
static void
|
|
call_stack_push(apfl_ctx ctx, struct call_stack_entry cse)
|
|
{
|
|
if (!try_call_stack_push(ctx, cse)) {
|
|
apfl_call_stack_entry_deinit(ctx->gc.allocator, &cse);
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
}
|
|
|
|
static void
|
|
call_stack_drop(apfl_ctx ctx)
|
|
{
|
|
assert(ctx->call_stack.len > 0);
|
|
|
|
apfl_call_stack_entry_deinit(ctx->gc.allocator, apfl_call_stack_cur_entry(ctx));
|
|
|
|
bool ok = apfl_resizable_resize(
|
|
ctx->gc.allocator,
|
|
sizeof(struct call_stack_entry),
|
|
(void **)&ctx->call_stack.items,
|
|
&ctx->call_stack.len,
|
|
&ctx->call_stack.cap,
|
|
ctx->call_stack.len - 1
|
|
);
|
|
assert(ok /* We're shrinking the memory here, should not fail */);
|
|
}
|
|
|
|
static void
|
|
return_from_function_inner(apfl_ctx ctx)
|
|
{
|
|
struct apfl_value value;
|
|
if (!apfl_stack_pop(ctx, &value, -1)) {
|
|
// No return value on the stack. Return nil instead
|
|
value = (struct apfl_value) { .type = VALUE_NIL };
|
|
}
|
|
|
|
call_stack_drop(ctx);
|
|
|
|
apfl_stack_must_push(ctx, value);
|
|
}
|
|
|
|
static void
|
|
return_from_function(apfl_ctx ctx)
|
|
{
|
|
size_t tmproots = apfl_gc_tmproots_begin(&ctx->gc);
|
|
return_from_function_inner(ctx);
|
|
apfl_gc_tmproots_restore(&ctx->gc, tmproots);
|
|
}
|
|
|
|
static void
|
|
prepare_call(apfl_ctx ctx, size_t tmproots, struct apfl_value args, struct call_stack_entry cse)
|
|
{
|
|
call_stack_push(ctx, cse);
|
|
|
|
// Note: This pushes args on the stack of the newly created call stack
|
|
apfl_stack_must_push(ctx, args);
|
|
|
|
// Both the function and the args are now rooted again, we can undo the tmproots early.
|
|
apfl_gc_tmproots_restore(&ctx->gc, tmproots);
|
|
}
|
|
|
|
// Keep evaluate instructions until we've returned from the current call stack.
|
|
// Must not be called with a APFL_CSE_CFUNCTION on top of the call stack.
|
|
static void
|
|
evaluate_until_call_stack_return(apfl_ctx ctx)
|
|
{
|
|
struct call_stack *call_stack = &ctx->call_stack;
|
|
|
|
size_t depth_started = call_stack->len;
|
|
assert(depth_started > 0);
|
|
|
|
while (call_stack->len >= depth_started) {
|
|
struct call_stack_entry *cse = apfl_call_stack_cur_entry(ctx);
|
|
assert(cse != NULL);
|
|
|
|
switch (cse->type) {
|
|
case APFL_CSE_CFUNCTION:
|
|
assert(false);
|
|
break;
|
|
case APFL_CSE_FUNCTION:
|
|
evaluate(ctx, &cse->func);
|
|
break;
|
|
case APFL_CSE_MATCHER:
|
|
evaluate_matcher(ctx, &cse->matcher);
|
|
break;
|
|
case APFL_CSE_FUNCTION_DISPATCH:
|
|
dispatch(ctx, cse);
|
|
break;
|
|
}
|
|
}
|
|
}
|
|
|
|
static void
|
|
must_tmproot_add_value(apfl_ctx ctx, struct apfl_value value)
|
|
{
|
|
if (!apfl_value_add_as_tmproot(&ctx->gc, value)) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
}
|
|
|
|
static void
|
|
call_inner(apfl_ctx ctx, size_t tmproots, apfl_stackidx func_index, apfl_stackidx args_index, bool call_from_apfl)
|
|
{
|
|
struct apfl_value func = apfl_stack_must_get(ctx, func_index);
|
|
must_tmproot_add_value(ctx, func);
|
|
|
|
struct apfl_value args = apfl_stack_must_get(ctx, args_index);
|
|
must_tmproot_add_value(ctx, args);
|
|
|
|
|
|
bool ok = apfl_stack_drop_multi(ctx, 2, (apfl_stackidx[]){func_index, args_index});
|
|
assert(ok /*apfl_stack_drop_multi*/);
|
|
|
|
if (!VALUE_IS_A(func, APFL_VALUE_FUNC)) {
|
|
apfl_raise_errorfmt(ctx, "Can only call functions, got a {value:type} instead", func);
|
|
}
|
|
if (!VALUE_IS_A(args, APFL_VALUE_LIST)) {
|
|
apfl_raise_const_error(ctx, apfl_messages.not_a_list);
|
|
}
|
|
|
|
switch (func.type) {
|
|
case VALUE_FUNC: {
|
|
struct scope *local_scope = apfl_scope_new(&ctx->gc);
|
|
if (local_scope == NULL) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
|
|
if (!apfl_gc_tmproot_add(&ctx->gc, GC_OBJECT_FROM(local_scope, GC_TYPE_SCOPE))) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
|
|
prepare_call(ctx, tmproots, args, (struct call_stack_entry) {
|
|
.type = APFL_CSE_FUNCTION_DISPATCH,
|
|
.stack = apfl_stack_new(),
|
|
.func_dispatch = {
|
|
.subfunc = 0,
|
|
.scopes = {
|
|
.local = local_scope,
|
|
.closure = func.func->scope,
|
|
},
|
|
.function = func.func,
|
|
.returning_from_matcher = false,
|
|
.matcher_result = false,
|
|
},
|
|
});
|
|
|
|
if (call_from_apfl) {
|
|
// In this case we're already coming from evaluate_until_call_stack_return,
|
|
// which will pick up the new stack entry. This way we can avoid doing the recursion in C.
|
|
return;
|
|
} else {
|
|
evaluate_until_call_stack_return(ctx);
|
|
}
|
|
break;
|
|
}
|
|
case VALUE_CFUNC:
|
|
prepare_call(ctx, tmproots, args, (struct call_stack_entry) {
|
|
.type = APFL_CSE_CFUNCTION,
|
|
.stack = apfl_stack_new(),
|
|
.cfunc = {
|
|
.func = func.cfunc,
|
|
.deferred_list = NULL,
|
|
.deferred_len = 0,
|
|
.deferred_cap = 0,
|
|
},
|
|
});
|
|
|
|
func.cfunc->func(ctx);
|
|
apfl_cfunc_run_deferred(ctx, apfl_call_stack_cur_entry(ctx));
|
|
return_from_function(ctx);
|
|
break;
|
|
default:
|
|
assert(false); // Otherwise the VALUE_IS_A() check for APFL_VALUE_FUNC would have failed
|
|
}
|
|
}
|
|
|
|
static void
|
|
call(apfl_ctx ctx, apfl_stackidx func_index, apfl_stackidx args_index, bool call_from_apfl)
|
|
{
|
|
size_t tmproots = apfl_gc_tmproots_begin(&ctx->gc);
|
|
call_inner(ctx, tmproots, func_index, args_index, call_from_apfl);
|
|
apfl_gc_tmproots_restore(&ctx->gc, tmproots);
|
|
}
|
|
|
|
void
|
|
apfl_call(apfl_ctx ctx, apfl_stackidx func_index, apfl_stackidx args_index)
|
|
{
|
|
call(ctx, func_index, args_index, false);
|
|
}
|
|
|
|
static void
|
|
matcher_set_val(apfl_ctx ctx, struct func_call_stack_entry *cse, size_t index)
|
|
{
|
|
struct matcher *matcher = matcher_stack_top(ctx, cse);
|
|
|
|
if (index >= matcher->instructions->value_count) {
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
|
}
|
|
|
|
matcher->values[index] = apfl_stack_must_pop(ctx, -1);
|
|
}
|
|
|
|
static void
|
|
matcher_must_match(apfl_ctx ctx, struct func_call_stack_entry *cse)
|
|
{
|
|
size_t tmproots = apfl_gc_tmproots_begin(&ctx->gc);
|
|
|
|
struct matcher *matcher = matcher_stack_top(ctx, cse);
|
|
if (!apfl_gc_tmproot_add(&ctx->gc, GC_OBJECT_FROM(matcher, GC_TYPE_MATCHER))) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
matcher_stack_drop(ctx, cse);
|
|
matcher_init_matching(ctx, matcher, cse->scopes);
|
|
|
|
apfl_gc_tmproots_restore(&ctx->gc, tmproots);
|
|
}
|
|
|
|
static void
|
|
evaluate(apfl_ctx ctx, struct func_call_stack_entry *cse)
|
|
{
|
|
if (cse->returning_from_matcher) {
|
|
if (!cse->matcher_result) {
|
|
apfl_raise_const_error(ctx, apfl_messages.value_doesnt_match);
|
|
}
|
|
cse->returning_from_matcher = false;
|
|
}
|
|
|
|
union instruction_or_arg arg;
|
|
|
|
size_t *pc = &cse->pc;
|
|
struct instruction_list *ilist = cse->instructions;
|
|
|
|
while (*pc < cse->instructions->len) {
|
|
switch (ilist->instructions[(*pc)++].instruction) {
|
|
case INSN_NIL:
|
|
apfl_push_nil(ctx);
|
|
goto continue_loop;
|
|
case INSN_TRUE:
|
|
apfl_push_bool(ctx, true);
|
|
goto continue_loop;
|
|
case INSN_FALSE:
|
|
apfl_push_bool(ctx, false);
|
|
goto continue_loop;
|
|
case INSN_NUMBER:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
apfl_push_number(ctx, arg.number);
|
|
goto continue_loop;
|
|
case INSN_STRING:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
apfl_stack_must_push(ctx, (struct apfl_value) {
|
|
.type = VALUE_STRING,
|
|
.string = arg.string,
|
|
});
|
|
goto continue_loop;
|
|
case INSN_LIST:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
apfl_list_create(ctx, arg.count);
|
|
goto continue_loop;
|
|
case INSN_LIST_APPEND:
|
|
apfl_list_append(ctx, -2, -1);
|
|
goto continue_loop;
|
|
case INSN_LIST_EXPAND_INTO:
|
|
apfl_list_append_list(ctx, -2, -1);
|
|
goto continue_loop;
|
|
case INSN_DICT:
|
|
apfl_dict_create(ctx);
|
|
goto continue_loop;
|
|
case INSN_DICT_APPEND_KVPAIR:
|
|
apfl_dict_set(ctx, -3, -2, -1);
|
|
goto continue_loop;
|
|
case INSN_GET_MEMBER:
|
|
apfl_get_member(ctx, -2, -1);
|
|
goto continue_loop;
|
|
case INSN_VAR_NEW:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
variable_new(ctx, &cse->scopes, arg.string, false);
|
|
goto continue_loop;
|
|
case INSN_VAR_NEW_LOCAL:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
variable_new(ctx, &cse->scopes, arg.string, true);
|
|
goto continue_loop;
|
|
case INSN_VAR_GET:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
variable_get(ctx, cse->scopes, arg.string, true);
|
|
goto continue_loop;
|
|
case INSN_VAR_SET:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
variable_set(ctx, &cse->scopes, arg.string, true, false);
|
|
goto continue_loop;
|
|
case INSN_VAR_SET_LOCAL:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
variable_set(ctx, &cse->scopes, arg.string, true, true);
|
|
goto continue_loop;
|
|
case INSN_MOVE_TO_LOCAL_VAR:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
variable_set(ctx, &cse->scopes, arg.string, false, true);
|
|
goto continue_loop;
|
|
case INSN_NEXT_LINE:
|
|
cse->execution_line++;
|
|
goto continue_loop;
|
|
case INSN_SET_LINE:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
cse->execution_line = arg.count;
|
|
goto continue_loop;
|
|
case INSN_GET_BY_INDEX_KEEP:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
apfl_get_list_member_by_index(ctx, -1, arg.index);
|
|
goto continue_loop;
|
|
case INSN_DROP:
|
|
if (!apfl_stack_drop(ctx, -1)) {
|
|
apfl_raise_invalid_stackidx(ctx);
|
|
}
|
|
goto continue_loop;
|
|
case INSN_DUP:
|
|
apfl_copy(ctx, -1);
|
|
goto continue_loop;
|
|
case INSN_CALL:
|
|
call(ctx, -2, -1, true);
|
|
|
|
// By returning from this function, the newly pushed call stack entry (if any) will get picked up by
|
|
// evaluate_until_call_stack_return. In case no new CSE was pushed (when a cfunc was called), we'll the
|
|
// simply continue with the current call stack.
|
|
return;
|
|
case INSN_FUNC:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
func(ctx, cse, arg.count);
|
|
goto continue_loop;
|
|
case INSN_FUNC_ADD_SUBFUNC:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
func_add_subfunc(ctx, cse, arg.body, true);
|
|
goto continue_loop;
|
|
case INSN_FUNC_ADD_SUBFUNC_ANYARGS:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
func_add_subfunc(ctx, cse, arg.body, false);
|
|
goto continue_loop;
|
|
case INSN_FUNC_SET_NAME:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
func_set_name(ctx, arg.string);
|
|
goto continue_loop;
|
|
case INSN_MATCHER_PUSH:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
matcher_push(ctx, cse, arg.matcher);
|
|
goto continue_loop;
|
|
case INSN_MATCHER_SET_VAL:
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
matcher_set_val(ctx, cse, arg.index);
|
|
goto continue_loop;
|
|
case INSN_MATCHER_MUST_MATCH:
|
|
// matcher_must_match pushes a new call stack entry for the matcher onto the stack. We return from this
|
|
// So this new CSE gets executed. By setting returning_from_matcher, we know that we came from the matcher,
|
|
// once it returns.
|
|
|
|
matcher_must_match(ctx, cse);
|
|
|
|
return;
|
|
case INSN_BUILD_PAIR:
|
|
apfl_push_pair(ctx, -2, -1);
|
|
goto continue_loop;
|
|
}
|
|
|
|
assert(false);
|
|
|
|
continue_loop:;
|
|
}
|
|
|
|
return_from_function(ctx);
|
|
}
|
|
|
|
static void
|
|
matcher_state_push(apfl_ctx ctx, struct matcher_call_stack_entry *cse, struct matcher_state entry)
|
|
{
|
|
if (!apfl_resizable_append(
|
|
ctx->gc.allocator,
|
|
sizeof(struct matcher_state),
|
|
(void **)&cse->matcher_state_stack,
|
|
&cse->matcher_state_stack_len,
|
|
&cse->matcher_state_stack_cap,
|
|
&entry,
|
|
1
|
|
)) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
}
|
|
|
|
noreturn static void
|
|
raise_invalid_matcher_state(apfl_ctx ctx)
|
|
{
|
|
apfl_raise_const_error(ctx, apfl_messages.invalid_matcher_state);
|
|
}
|
|
|
|
static void
|
|
matcher_state_drop(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
{
|
|
if (cse->matcher_state_stack_len == 0) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
|
|
bool ok = apfl_resizable_resize(
|
|
ctx->gc.allocator,
|
|
sizeof(struct matcher_state),
|
|
(void **)&cse->matcher_state_stack,
|
|
&cse->matcher_state_stack_len,
|
|
&cse->matcher_state_stack_cap,
|
|
cse->matcher_state_stack_len-1
|
|
);
|
|
assert(ok /* We're shrinking, should not fail */);
|
|
}
|
|
|
|
static void
|
|
matcher_init_matching_inner(apfl_ctx ctx, struct matcher *matcher, struct scopes scopes)
|
|
{
|
|
struct apfl_value value = apfl_stack_must_pop(ctx, -1);
|
|
must_tmproot_add_value(ctx, value);
|
|
|
|
if (matcher == NULL) {
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
|
}
|
|
|
|
size_t capture_count = matcher->instructions->capture_count;
|
|
struct matcher_call_stack_entry matcher_cse = {
|
|
.pc = 0,
|
|
.from_predicate = false,
|
|
.matcher = matcher,
|
|
.scopes = scopes,
|
|
.capture_index = 0,
|
|
.capture_count = capture_count,
|
|
.captures = NULL,
|
|
.transfers = NULL,
|
|
.matcher_state_stack = NULL,
|
|
.matcher_state_stack_len = 0,
|
|
.matcher_state_stack_cap = 0,
|
|
};
|
|
|
|
if (capture_count > 0) {
|
|
if ((matcher_cse.captures = ALLOC_LIST(ctx->gc.allocator, struct apfl_value, capture_count)) == NULL) {
|
|
goto error;
|
|
}
|
|
|
|
for (size_t i = 0; i < capture_count; i++) {
|
|
matcher_cse.captures[i] = (struct apfl_value) { .type = VALUE_NIL };
|
|
}
|
|
|
|
if ((matcher_cse.transfers = ALLOC_LIST(
|
|
ctx->gc.allocator,
|
|
struct matcher_capture_transfer,
|
|
capture_count
|
|
)) == NULL) {
|
|
goto error;
|
|
}
|
|
|
|
for (size_t i = 0; i < capture_count; i++) {
|
|
matcher_cse.transfers[i] = (struct matcher_capture_transfer) {
|
|
.var = NULL,
|
|
.path_start = 0,
|
|
.path_len = 0,
|
|
.local = false,
|
|
};
|
|
}
|
|
}
|
|
|
|
if ((matcher_cse.matcher_state_stack = ALLOC_LIST(
|
|
ctx->gc.allocator,
|
|
struct matcher_state,
|
|
1
|
|
)) == NULL) {
|
|
goto error;
|
|
}
|
|
|
|
matcher_cse.matcher_state_stack[0] = (struct matcher_state) {
|
|
.mode = MATCHER_MODE_VALUE,
|
|
};
|
|
matcher_cse.matcher_state_stack_len = 1;
|
|
matcher_cse.matcher_state_stack_cap = 1;
|
|
|
|
if (!try_call_stack_push(ctx, (struct call_stack_entry) {
|
|
.type = APFL_CSE_MATCHER,
|
|
.stack = apfl_stack_new(),
|
|
.matcher = matcher_cse,
|
|
})) {
|
|
goto error;
|
|
}
|
|
|
|
// No need for `goto error` on failure here, all dynamically allocated
|
|
// elements are on the call stack now, so the GC can clean them up in case
|
|
// of an error.
|
|
apfl_stack_must_push(ctx, apfl_value_set_cow_flag(value));
|
|
|
|
return;
|
|
|
|
error:
|
|
apfl_matcher_call_stack_entry_deinit(ctx->gc.allocator, &matcher_cse);
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
|
|
/*
|
|
* Initialise matching. Pushes a new call stack and pops a value of the current+
|
|
* value stack.
|
|
*/
|
|
static void
|
|
matcher_init_matching(apfl_ctx ctx, struct matcher *matcher, struct scopes scopes)
|
|
{
|
|
size_t tmproots = apfl_gc_tmproots_begin(&ctx->gc);
|
|
matcher_init_matching_inner(ctx, matcher, scopes);
|
|
apfl_gc_tmproots_restore(&ctx->gc, tmproots);
|
|
}
|
|
|
|
static void
|
|
matcher_check_index(apfl_ctx ctx, size_t count, size_t index)
|
|
{
|
|
if (index >= count) {
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
|
}
|
|
}
|
|
|
|
static struct matcher_state *
|
|
matcher_cur_state(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
{
|
|
if (cse->matcher_state_stack_len == 0) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
|
|
return &cse->matcher_state_stack[cse->matcher_state_stack_len-1];
|
|
}
|
|
|
|
static bool
|
|
matcher_current_val_in_state(apfl_ctx ctx, struct matcher_state *state, struct apfl_value *value)
|
|
{
|
|
struct apfl_value cur;
|
|
|
|
switch (state->mode) {
|
|
case MATCHER_MODE_VALUE:
|
|
case MATCHER_MODE_LIST_REMAINING:
|
|
if (!apfl_stack_get(ctx, &cur, -1)) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
*value = cur;
|
|
return true;
|
|
case MATCHER_MODE_STOP:
|
|
case MATCHER_MODE_LIST_UNDERFLOW:
|
|
return false;
|
|
case MATCHER_MODE_LIST_START:
|
|
if (!apfl_stack_get(ctx, &cur, -1)) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
if (cur.type != VALUE_LIST) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
if (state->lower >= cur.list->len) {
|
|
return false;
|
|
}
|
|
*value = cur.list->items[state->lower];
|
|
return true;
|
|
case MATCHER_MODE_LIST_END:
|
|
if (!apfl_stack_get(ctx, &cur, -1)) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
if (cur.type != VALUE_LIST) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
if (state->upper == 0) {
|
|
return NULL;
|
|
}
|
|
*value = cur.list->items[state->upper-1];
|
|
return true;
|
|
case MATCHER_MODE_PAIR_L:
|
|
case MATCHER_MODE_PAIR_R:
|
|
if (!apfl_stack_get(ctx, &cur, -1)) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
if (cur.type != VALUE_PAIR) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
*value = state->mode == MATCHER_MODE_PAIR_L
|
|
? cur.pair->l
|
|
: cur.pair->r;
|
|
return true;
|
|
}
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
|
|
static bool
|
|
matcher_current_val(apfl_ctx ctx, struct matcher_call_stack_entry *cse, struct apfl_value *value)
|
|
{
|
|
struct matcher_state *state = matcher_cur_state(ctx, cse);
|
|
return matcher_current_val_in_state(ctx, state, value);
|
|
}
|
|
|
|
static bool
|
|
matcher_next(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
{
|
|
again:;
|
|
struct matcher_state *state = matcher_cur_state(ctx, cse);
|
|
|
|
switch (state->mode) {
|
|
case MATCHER_MODE_VALUE:
|
|
state->mode = MATCHER_MODE_STOP;
|
|
if (!apfl_stack_drop(ctx, -1)) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
return true;
|
|
case MATCHER_MODE_STOP:
|
|
case MATCHER_MODE_LIST_UNDERFLOW:
|
|
raise_invalid_matcher_state(ctx);
|
|
return false;
|
|
case MATCHER_MODE_LIST_START:
|
|
state->lower++;
|
|
return true;
|
|
case MATCHER_MODE_LIST_END:
|
|
if (state->upper <= state->lower) {
|
|
state->mode = MATCHER_MODE_LIST_UNDERFLOW;
|
|
return false;
|
|
}
|
|
state->upper--;
|
|
return true;
|
|
case MATCHER_MODE_LIST_REMAINING:
|
|
case MATCHER_MODE_PAIR_R:
|
|
if (!apfl_stack_drop(ctx, -1)) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
matcher_state_drop(ctx, cse);
|
|
goto again; // We also need to advance the previous stack entry, as
|
|
// we're done with the currently matched item in that state
|
|
case MATCHER_MODE_PAIR_L:
|
|
state->mode = MATCHER_MODE_PAIR_R;
|
|
return true;
|
|
}
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
|
|
static bool
|
|
matcher_enter_list(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
{
|
|
struct matcher_state *state = matcher_cur_state(ctx, cse);
|
|
struct apfl_value cur;
|
|
if (!matcher_current_val_in_state(ctx, state, &cur)) {
|
|
return false;
|
|
}
|
|
if (cur.type != VALUE_LIST) {
|
|
return false;
|
|
}
|
|
|
|
size_t len = cur.list->len;
|
|
|
|
apfl_stack_must_push(ctx, cur);
|
|
|
|
matcher_state_push(ctx, cse, (struct matcher_state) {
|
|
.mode = MATCHER_MODE_LIST_START,
|
|
.lower = 0,
|
|
.upper = len,
|
|
});
|
|
return true;
|
|
}
|
|
|
|
static void
|
|
matcher_continue_from_end(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
{
|
|
struct matcher_state *state = matcher_cur_state(ctx, cse);
|
|
if (state->mode != MATCHER_MODE_LIST_START) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
state->mode = MATCHER_MODE_LIST_END;
|
|
}
|
|
|
|
static void
|
|
matcher_remainding(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
{
|
|
struct matcher_state *state = matcher_cur_state(ctx, cse);
|
|
struct apfl_value cur;
|
|
|
|
if (!apfl_stack_get(ctx, &cur, -1)) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
|
|
if (
|
|
(state->mode != MATCHER_MODE_LIST_START && state->mode != MATCHER_MODE_LIST_END)
|
|
|| cur.type != VALUE_LIST
|
|
) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
|
|
if (state->lower > state->upper) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
|
|
struct list_header *cur_list = cur.list;
|
|
assert(cur_list->len >= state->upper);
|
|
|
|
size_t len = state->upper - state->lower;
|
|
|
|
apfl_list_create(ctx, len);
|
|
struct apfl_value new_val = apfl_stack_must_get(ctx, -1);
|
|
assert(new_val.type == VALUE_LIST);
|
|
|
|
struct list_header *new_list = new_val.list;
|
|
assert(new_list->cap == len);
|
|
assert(new_list->len == 0);
|
|
for (size_t i = state->lower; i < state->upper; i++) {
|
|
new_list->items[new_list->len++] = cur_list->items[i];
|
|
}
|
|
assert(new_list->len == len);
|
|
|
|
if (!apfl_stack_drop(ctx, -2)) { // Drop the original list
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
|
|
state->mode = MATCHER_MODE_LIST_REMAINING;
|
|
}
|
|
|
|
static bool
|
|
matcher_unpack_pair(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
{
|
|
struct matcher_state *state = matcher_cur_state(ctx, cse);
|
|
struct apfl_value cur;
|
|
if (!matcher_current_val_in_state(ctx, state, &cur)) {
|
|
return false;
|
|
}
|
|
if (cur.type != VALUE_PAIR) {
|
|
return false;
|
|
}
|
|
|
|
apfl_stack_must_push(ctx, cur);
|
|
|
|
matcher_state_push(ctx, cse, (struct matcher_state) {
|
|
.mode = MATCHER_MODE_PAIR_L,
|
|
});
|
|
return true;
|
|
}
|
|
|
|
static bool
|
|
matcher_leave_list(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
{
|
|
struct matcher_state *state = matcher_cur_state(ctx, cse);
|
|
if (state->mode != MATCHER_MODE_LIST_START) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
|
|
if (state->lower < state->upper) {
|
|
// List was not completely matched
|
|
return false;
|
|
}
|
|
|
|
if (!apfl_stack_drop(ctx, -1)) {
|
|
raise_invalid_matcher_state(ctx);
|
|
}
|
|
matcher_state_drop(ctx, cse);
|
|
return matcher_next(ctx, cse);
|
|
}
|
|
|
|
static void
|
|
matcher_transfer(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
{
|
|
for (size_t i = 0; i < cse->capture_count; i++) {
|
|
struct matcher_capture_transfer transfer = cse->transfers[i];
|
|
if (transfer.path_len == 0) {
|
|
variable_set_value(ctx, &cse->scopes, transfer.var, transfer.local, cse->captures[i]);
|
|
} else {
|
|
// Set the value at a key path in a (nested) dictionary.
|
|
|
|
variable_get(ctx, cse->scopes, transfer.var, false);
|
|
if (apfl_get_type(ctx, -1) != APFL_VALUE_DICT) {
|
|
apfl_raise_errorfmt(ctx, "Can not update value of type {stack:type}, expected dict", -1);
|
|
}
|
|
|
|
|
|
// Get or create intermediary dictionaries along the key path and leave a copy of the previous one on the
|
|
// stack, so we can set the result in reverse order there later.
|
|
for (size_t i = 0; i < transfer.path_len - 1; i++) {
|
|
apfl_copy(ctx, -1);
|
|
|
|
size_t value_index = transfer.path_start + i;
|
|
matcher_check_index(ctx, cse->matcher->value_count, value_index);
|
|
apfl_stack_must_push(ctx, apfl_value_set_cow_flag(cse->matcher->values[value_index]));
|
|
if (apfl_get_member_if_exists(ctx, -2, -1)) {
|
|
if (apfl_get_type(ctx, -1) != APFL_VALUE_DICT) {
|
|
apfl_raise_errorfmt(ctx, "Can not update value of type {stack:type}, expected dict", -1);
|
|
}
|
|
} else {
|
|
apfl_dict_create(ctx);
|
|
}
|
|
}
|
|
|
|
// Set the value to the rightmost dictionary key
|
|
size_t value_index = transfer.path_start + transfer.path_len - 1;
|
|
matcher_check_index(ctx, cse->matcher->value_count, value_index);
|
|
apfl_stack_must_push(ctx, apfl_value_set_cow_flag(cse->matcher->values[value_index]));
|
|
apfl_stack_must_push(ctx, cse->captures[i]);
|
|
apfl_dict_set(ctx, -3, -2, -1);
|
|
|
|
// Go through the key path (minus the rightmost key) in reverse order and set the value in the intermediary
|
|
// dictionaries. Note that i has a offset of one here so we can use the i > 0 check in the loop (>= 0 would
|
|
// not work as size_t is unsigned).
|
|
for (size_t i = transfer.path_len - 1; i > 0; i--) {
|
|
size_t value_index = transfer.path_start + i - 1;
|
|
matcher_check_index(ctx, cse->matcher->value_count, value_index);
|
|
apfl_stack_must_push(ctx, apfl_value_set_cow_flag(cse->matcher->values[value_index]));
|
|
|
|
apfl_dict_set(ctx, -3, -1, -2);
|
|
}
|
|
|
|
// Finally set the copied and modified dictionary to the variable again.
|
|
variable_set(ctx, &cse->scopes, transfer.var, false, false);
|
|
}
|
|
}
|
|
}
|
|
|
|
static void
|
|
return_from_matcher(apfl_ctx ctx, bool result)
|
|
{
|
|
struct call_stack_entry *cse = apfl_call_stack_cur_entry(ctx);
|
|
assert(cse != NULL);
|
|
assert(cse->type == APFL_CSE_MATCHER);
|
|
|
|
if (result) {
|
|
matcher_transfer(ctx, &cse->matcher);
|
|
}
|
|
|
|
call_stack_drop(ctx);
|
|
|
|
cse = apfl_call_stack_cur_entry(ctx);
|
|
assert(cse != NULL);
|
|
|
|
switch (cse->type) {
|
|
case APFL_CSE_FUNCTION:
|
|
cse->func.returning_from_matcher = true;
|
|
cse->func.matcher_result = result;
|
|
break;
|
|
case APFL_CSE_FUNCTION_DISPATCH:
|
|
cse->func_dispatch.returning_from_matcher = true;
|
|
cse->func_dispatch.matcher_result = result;
|
|
break;
|
|
default:
|
|
assert(false /* Invalid stack entry below matcher stack */);
|
|
}
|
|
}
|
|
|
|
#define RETURN_WITHOUT_MATCH(ctx) \
|
|
do { \
|
|
return_from_matcher((ctx), false); \
|
|
return; \
|
|
} while (0)
|
|
|
|
#define RETURN_WITHOUT_MATCH_ON_FALSE(ctx, x) \
|
|
do { \
|
|
if (!(x)) { \
|
|
RETURN_WITHOUT_MATCH(ctx); \
|
|
} \
|
|
} while (0)
|
|
|
|
static bool
|
|
matcher_evaluate_capturing_instruction(
|
|
apfl_ctx ctx,
|
|
struct matcher_call_stack_entry *cse,
|
|
bool local,
|
|
bool with_path
|
|
) {
|
|
size_t *pc = &cse->pc;
|
|
struct matcher *matcher = cse->matcher;
|
|
struct matcher_instruction_list *milist = matcher->instructions;
|
|
|
|
struct matcher_capture_transfer transfer = {
|
|
.var = NULL,
|
|
.path_start = 0,
|
|
.path_len = 0,
|
|
.local = local,
|
|
};
|
|
|
|
union matcher_instruction_or_arg arg;
|
|
|
|
must_get_matcher_argument(ctx, pc, milist, &arg);
|
|
transfer.var = arg.string;
|
|
|
|
if (with_path) {
|
|
must_get_matcher_argument(ctx, pc, milist, &arg);
|
|
transfer.path_start = arg.index;
|
|
must_get_matcher_argument(ctx, pc, milist, &arg);
|
|
transfer.path_len = arg.index;
|
|
}
|
|
|
|
struct apfl_value cur;
|
|
|
|
if (!matcher_current_val(ctx, cse, &cur)) {
|
|
return false;
|
|
}
|
|
|
|
size_t capture = cse->capture_index++;
|
|
matcher_check_index(ctx, milist->capture_count, capture);
|
|
|
|
cse->captures[capture] = apfl_value_set_cow_flag(cur);
|
|
cse->transfers[capture] = transfer;
|
|
|
|
return matcher_next(ctx, cse);
|
|
}
|
|
|
|
|
|
static void
|
|
evaluate_matcher(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
{
|
|
union matcher_instruction_or_arg arg;
|
|
|
|
if (cse->from_predicate) {
|
|
cse->from_predicate = false;
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(ctx, apfl_is_truthy(ctx, -1));
|
|
}
|
|
|
|
size_t *pc = &cse->pc;
|
|
struct matcher *matcher = cse->matcher;
|
|
struct matcher_instruction_list *milist = matcher->instructions;
|
|
|
|
while (*pc < milist->len) {
|
|
struct apfl_value cur;
|
|
|
|
switch (milist->instructions[(*pc)++].instruction) {
|
|
case MATCHER_CAPTURE_TO_VAR:
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(
|
|
ctx,
|
|
matcher_evaluate_capturing_instruction(ctx, cse, false, false)
|
|
);
|
|
goto continue_loop;
|
|
case MATCHER_CAPTURE_TO_VAR_LOCAL:
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(
|
|
ctx,
|
|
matcher_evaluate_capturing_instruction(ctx, cse, true, false)
|
|
);
|
|
goto continue_loop;
|
|
case MATCHER_CAPTURE_TO_VAR_WITH_PATH:
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(
|
|
ctx,
|
|
matcher_evaluate_capturing_instruction(ctx, cse, false, true)
|
|
);
|
|
goto continue_loop;
|
|
case MATCHER_CAPTURE_TO_VAR_LOCAL_WITH_PATH:
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(
|
|
ctx,
|
|
matcher_evaluate_capturing_instruction(ctx, cse, true, true)
|
|
);
|
|
goto continue_loop;
|
|
case MATCHER_IGNORE:
|
|
if (!matcher_current_val(ctx, cse, &cur)) {
|
|
RETURN_WITHOUT_MATCH(ctx);
|
|
}
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(ctx, matcher_next(ctx, cse));
|
|
goto continue_loop;
|
|
case MATCHER_CHECK_CONST:
|
|
if (!matcher_current_val(ctx, cse, &cur)) {
|
|
RETURN_WITHOUT_MATCH(ctx);
|
|
}
|
|
must_get_matcher_argument(ctx, pc, milist, &arg);
|
|
matcher_check_index(ctx, milist->value_count, arg.index);
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(ctx, apfl_value_eq(matcher->values[arg.index], cur));
|
|
goto continue_loop;
|
|
case MATCHER_CHECK_PRED:
|
|
if (!matcher_current_val(ctx, cse, &cur)) {
|
|
RETURN_WITHOUT_MATCH(ctx);
|
|
}
|
|
must_get_matcher_argument(ctx, pc, milist, &arg);
|
|
matcher_check_index(ctx, milist->value_count, arg.index);
|
|
apfl_stack_must_push(ctx, apfl_value_set_cow_flag(matcher->values[arg.index]));
|
|
apfl_list_create(ctx, 1);
|
|
apfl_stack_must_push(ctx, apfl_value_set_cow_flag(cur));
|
|
apfl_list_append(ctx, -2, -1);
|
|
|
|
cse->from_predicate = true;
|
|
call(ctx, -2, -1, true);
|
|
|
|
// By returning from this function, the newly pushed call stack entry (if any) will get picked up by
|
|
// evaluate_until_call_stack_return. In case no new CSE was pushed (when a cfunc was called), we'll the
|
|
// simply continue with the current call stack.
|
|
return;
|
|
case MATCHER_ENTER_LIST:
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(ctx, matcher_enter_list(ctx, cse));
|
|
goto continue_loop;
|
|
case MATCHER_LEAVE_LIST:
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(ctx, matcher_leave_list(ctx, cse));
|
|
goto continue_loop;
|
|
case MATCHER_CONTINUE_FROM_END:
|
|
matcher_continue_from_end(ctx, cse);
|
|
goto continue_loop;
|
|
case MATCHER_REMAINDING:
|
|
matcher_remainding(ctx, cse);
|
|
goto continue_loop;
|
|
case MATCHER_UNPACK_PAIR:
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(ctx, matcher_unpack_pair(ctx, cse));
|
|
goto continue_loop;
|
|
}
|
|
|
|
assert(false);
|
|
continue_loop:;
|
|
}
|
|
|
|
return_from_matcher(
|
|
ctx,
|
|
// We've successfully matched everything, if there's only one stack element left and we're in stop state
|
|
cse->matcher_state_stack_len == 1 && cse->matcher_state_stack[0].mode == MATCHER_MODE_STOP
|
|
);
|
|
}
|
|
|
|
struct matcher_stack
|
|
matcher_stack_new(void)
|
|
{
|
|
return (struct matcher_stack) {
|
|
.items = NULL,
|
|
.len = 0,
|
|
.cap = 0,
|
|
};
|
|
}
|
|
|
|
static void
|
|
dispatch_accept(struct call_stack_entry *cse)
|
|
{
|
|
assert(cse->type == APFL_CSE_FUNCTION_DISPATCH);
|
|
struct func_dispatch_call_stack_entry *fd_cse = &cse->func_dispatch;
|
|
|
|
struct function *function = fd_cse->function;
|
|
|
|
struct subfunction *subfunction = &function->subfunctions[fd_cse->subfunc];
|
|
|
|
// Replace the current CSE with a function CSE
|
|
cse->type = APFL_CSE_FUNCTION;
|
|
cse->stack.len = 0;
|
|
cse->func = (struct func_call_stack_entry) {
|
|
.pc = 0,
|
|
.instructions = subfunction->body,
|
|
.scopes = fd_cse->scopes,
|
|
.execution_line = subfunction->body->line,
|
|
.matcher_stack = matcher_stack_new(),
|
|
.returning_from_matcher = false,
|
|
.matcher_result = false,
|
|
.function = function,
|
|
.subfunction_index = fd_cse->subfunc,
|
|
};
|
|
}
|
|
|
|
static void
|
|
dispatch(apfl_ctx ctx, struct call_stack_entry *cse)
|
|
{
|
|
assert(cse->type == APFL_CSE_FUNCTION_DISPATCH);
|
|
struct func_dispatch_call_stack_entry *fd_cse = &cse->func_dispatch;
|
|
|
|
struct function *function = fd_cse->function;
|
|
|
|
if (fd_cse->returning_from_matcher) {
|
|
if (fd_cse->matcher_result) {
|
|
dispatch_accept(cse);
|
|
return;
|
|
}
|
|
|
|
fd_cse->subfunc++;
|
|
fd_cse->returning_from_matcher = false;
|
|
}
|
|
|
|
if (fd_cse->subfunc >= function->subfunctions_len) {
|
|
apfl_raise_const_error(ctx, apfl_messages.no_matching_subfunction);
|
|
}
|
|
|
|
struct matcher *matcher = function->subfunctions[fd_cse->subfunc].matcher;
|
|
|
|
if (matcher == NULL) {
|
|
dispatch_accept(cse);
|
|
return;
|
|
}
|
|
|
|
// matcher_init_matching consumes the value on the top of the stack, we need
|
|
// to copy the value for further subfunctions.
|
|
apfl_copy(ctx, -1);
|
|
matcher_init_matching(
|
|
ctx,
|
|
function->subfunctions[fd_cse->subfunc].matcher,
|
|
fd_cse->scopes
|
|
);
|
|
}
|
|
|
|
enum interative_runner_state {
|
|
IRUNNER_OK,
|
|
IRUNNER_EOF,
|
|
IRUNNER_ERR,
|
|
};
|
|
|
|
struct apfl_iterative_runner_data {
|
|
apfl_ctx ctx;
|
|
apfl_tokenizer_ptr tokenizer;
|
|
apfl_parser_ptr parser;
|
|
enum apfl_result result;
|
|
enum interative_runner_state state;
|
|
struct scope *scope;
|
|
};
|
|
|
|
static void
|
|
iterative_runner_eval_expr_inner(apfl_iterative_runner runner, struct apfl_expr expr)
|
|
{
|
|
apfl_ctx ctx = runner->ctx;
|
|
|
|
struct instruction_list *ilist = apfl_instructions_new(&ctx->gc, expr.position.line, NULL);
|
|
if (ilist == NULL) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
|
|
if (!apfl_gc_tmproot_add(&ctx->gc, GC_OBJECT_FROM(ilist, GC_TYPE_INSTRUCTIONS))) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
|
|
struct apfl_error error;
|
|
if (!apfl_compile(&ctx->gc, expr, &error, ilist)) {
|
|
apfl_raise_error_object(ctx, error);
|
|
}
|
|
|
|
call_stack_push(ctx, (struct call_stack_entry) {
|
|
.type = APFL_CSE_FUNCTION,
|
|
.stack = apfl_stack_new(),
|
|
.func = (struct func_call_stack_entry) {
|
|
.pc = 0,
|
|
.instructions = ilist,
|
|
.scopes = {
|
|
.local = runner->scope,
|
|
.closure = NULL,
|
|
},
|
|
.execution_line = ilist->line,
|
|
.matcher_stack = matcher_stack_new(),
|
|
.returning_from_matcher = false,
|
|
},
|
|
});
|
|
evaluate_until_call_stack_return(ctx);
|
|
}
|
|
|
|
static void
|
|
iterative_runner_eval_expr(apfl_iterative_runner runner, struct apfl_expr expr)
|
|
{
|
|
apfl_ctx ctx = runner->ctx;
|
|
size_t tmproots = apfl_gc_tmproots_begin(&ctx->gc);
|
|
iterative_runner_eval_expr_inner(runner, expr);
|
|
apfl_gc_tmproots_restore(&ctx->gc, tmproots);
|
|
}
|
|
|
|
bool
|
|
apfl_debug_print_val(apfl_ctx ctx, apfl_stackidx index, struct apfl_io_writer w)
|
|
{
|
|
struct apfl_value value;
|
|
if (!apfl_stack_pop(ctx, &value, index)) {
|
|
FMT_TRY(apfl_io_write_string(w, "apfl_debug_print_val: Invalid stack index "));
|
|
FMT_TRY(apfl_format_put_int(w, (int)index));
|
|
FMT_TRY(apfl_io_write_string(w, "\n"));
|
|
return true;
|
|
}
|
|
|
|
return apfl_value_print(value, w);
|
|
}
|
|
|
|
apfl_iterative_runner
|
|
apfl_iterative_runner_new(apfl_ctx ctx, struct apfl_source_reader reader)
|
|
{
|
|
apfl_iterative_runner runner = NULL;
|
|
apfl_tokenizer_ptr tokenizer = NULL;
|
|
apfl_parser_ptr parser = NULL;
|
|
|
|
runner = ALLOC_OBJ(ctx->gc.allocator, struct apfl_iterative_runner_data);
|
|
if (runner == NULL) {
|
|
return NULL;
|
|
}
|
|
|
|
tokenizer = apfl_tokenizer_new(ctx->gc.allocator, reader);
|
|
if (tokenizer == NULL) {
|
|
goto error;
|
|
}
|
|
|
|
parser = apfl_parser_new(ctx->gc.allocator, apfl_tokenizer_as_token_source(tokenizer));
|
|
if (parser == NULL) {
|
|
goto error;
|
|
}
|
|
|
|
struct scope *scope = apfl_scope_new(&ctx->gc);
|
|
if (scope == NULL) {
|
|
goto error;
|
|
}
|
|
|
|
*runner = (struct apfl_iterative_runner_data) {
|
|
.ctx = ctx,
|
|
.tokenizer = tokenizer,
|
|
.parser = parser,
|
|
.result = APFL_RESULT_OK,
|
|
.state = IRUNNER_OK,
|
|
.scope = scope,
|
|
};
|
|
|
|
if (!apfl_ctx_register_iterative_runner(ctx, runner)) {
|
|
goto error;
|
|
}
|
|
|
|
return runner;
|
|
|
|
error:
|
|
FREE_OBJ(ctx->gc.allocator, runner);
|
|
apfl_tokenizer_destroy(tokenizer);
|
|
apfl_parser_destroy(parser);
|
|
|
|
return NULL;
|
|
}
|
|
|
|
static void
|
|
iterative_runner_next_protected(apfl_ctx ctx, void *opaque)
|
|
{
|
|
(void)ctx;
|
|
apfl_iterative_runner runner = opaque;
|
|
|
|
switch (apfl_parser_next(runner->parser)) {
|
|
case APFL_PARSE_OK:
|
|
iterative_runner_eval_expr(runner, apfl_parser_get_expr(runner->parser));
|
|
return;
|
|
case APFL_PARSE_ERROR: {
|
|
struct apfl_error err = apfl_parser_get_error(runner->parser);
|
|
if (err.type == APFL_ERR_INPUT_ERROR) {
|
|
runner->state = IRUNNER_ERR;
|
|
}
|
|
apfl_raise_error_object(runner->ctx, err);
|
|
return;
|
|
}
|
|
case APFL_PARSE_EOF:
|
|
runner->state = IRUNNER_EOF;
|
|
return;
|
|
}
|
|
|
|
assert(false);
|
|
}
|
|
|
|
#define DECORATE_TRY_FMT(ctx, x) do { if (!(x)) { apfl_raise_alloc_error(ctx); } } while (0)
|
|
|
|
void
|
|
apfl_error_decorate_with_backtrace(apfl_ctx ctx, void *opaque)
|
|
{
|
|
(void)opaque;
|
|
|
|
struct apfl_string_builder sb = apfl_string_builder_init(ctx->gc.allocator);
|
|
struct apfl_io_writer w = apfl_io_string_writer(&sb);
|
|
|
|
apfl_tostring(ctx, -1);
|
|
|
|
if (!(
|
|
apfl_io_write_string(w, apfl_get_string(ctx, -1))
|
|
&& apfl_io_write_string(w, "\n\nBacktrace:")
|
|
)) {
|
|
goto fail;
|
|
}
|
|
|
|
size_t depth = apfl_call_stack_depth(ctx);
|
|
for (size_t i = 0; i < depth; i++) {
|
|
if (!(
|
|
apfl_io_write_string(w, "\n")
|
|
&& apfl_io_write_string(w, "#")
|
|
&& apfl_format_put_int(w, (int)i+1)
|
|
&& apfl_io_write_string(w, ": ")
|
|
&& apfl_call_stack_entry_info_format(
|
|
w,
|
|
apfl_call_stack_inspect(ctx, i)
|
|
)
|
|
)) {
|
|
goto fail;
|
|
}
|
|
}
|
|
|
|
struct apfl_string string = apfl_string_builder_move_string(&sb);
|
|
apfl_string_builder_deinit(&sb);
|
|
|
|
if (!apfl_move_string_onto_stack(ctx, string)) {
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
return;
|
|
|
|
fail:
|
|
apfl_string_builder_deinit(&sb);
|
|
apfl_raise_alloc_error(ctx);
|
|
}
|
|
|
|
bool
|
|
apfl_iterative_runner_next(apfl_iterative_runner runner)
|
|
{
|
|
if (runner->state != IRUNNER_OK) {
|
|
return false;
|
|
}
|
|
|
|
apfl_stack_clear(runner->ctx);
|
|
|
|
runner->result = apfl_do_protected(
|
|
runner->ctx,
|
|
iterative_runner_next_protected,
|
|
runner,
|
|
apfl_error_decorate_with_backtrace
|
|
);
|
|
|
|
return runner->state == IRUNNER_OK;
|
|
}
|
|
|
|
enum apfl_result
|
|
apfl_iterative_runner_get_result(apfl_iterative_runner runner)
|
|
{
|
|
return runner->result;
|
|
}
|
|
|
|
bool
|
|
apfl_iterative_runner_stopped_because_of_error(apfl_iterative_runner runner)
|
|
{
|
|
return runner->state == IRUNNER_ERR;
|
|
}
|
|
|
|
bool
|
|
apfl_iterative_runner_run_repl(
|
|
apfl_iterative_runner runner,
|
|
struct apfl_io_writer w_out,
|
|
struct apfl_io_writer w_err
|
|
) {
|
|
apfl_ctx ctx = runner->ctx;
|
|
|
|
while (apfl_iterative_runner_next(runner)) {
|
|
switch (apfl_iterative_runner_get_result(runner)) {
|
|
case APFL_RESULT_OK :
|
|
if (apfl_get_type(ctx, -1) == APFL_VALUE_NIL) {
|
|
apfl_drop(ctx, -1);
|
|
} else {
|
|
FMT_TRY(apfl_debug_print_val(ctx, -1, w_out));
|
|
}
|
|
break;
|
|
case APFL_RESULT_ERR:
|
|
FMT_TRY(apfl_io_write_string(w_err, "Error occurred during evaluation:\n"));
|
|
if (apfl_get_type(ctx, -1) == APFL_VALUE_STRING) {
|
|
FMT_TRY(apfl_io_write_string(w_err, apfl_get_string(ctx, -1)));
|
|
} else {
|
|
FMT_TRY(apfl_debug_print_val(ctx, -1, w_err));
|
|
}
|
|
FMT_TRY(apfl_io_write_byte(w_err, '\n'));
|
|
break;
|
|
case APFL_RESULT_ERRERR:
|
|
FMT_TRY(apfl_io_write_string(w_err, "Error occurred during error handling.\n"));
|
|
break;
|
|
case APFL_RESULT_ERR_ALLOC:
|
|
FMT_TRY(apfl_io_write_string(w_err, "Fatal: Could not allocate memory.\n"));
|
|
return false;
|
|
}
|
|
}
|
|
|
|
if (apfl_iterative_runner_stopped_because_of_error(runner)) {
|
|
return false;
|
|
}
|
|
|
|
return true;
|
|
}
|
|
|
|
void
|
|
apfl_iterative_runner_destroy(apfl_iterative_runner runner)
|
|
{
|
|
if (runner == NULL) {
|
|
return;
|
|
}
|
|
|
|
apfl_parser_destroy(runner->parser);
|
|
apfl_tokenizer_destroy(runner->tokenizer);
|
|
|
|
apfl_ctx_unregister_iterative_runner(runner->ctx, runner);
|
|
FREE_OBJ(runner->ctx->gc.allocator, runner);
|
|
}
|
|
|
|
|
|
void
|
|
apfl_iterative_runner_visit_gc_objects(apfl_iterative_runner runner, gc_visitor visitor, void *opaque)
|
|
{
|
|
// TODO: It's a bit awkward that this function is defined here but the
|
|
// prototype lives in context.h... Maybe we should just merge context
|
|
// and eval together? The separation is rather arbitrary anyway :/
|
|
|
|
if (runner->scope != NULL) {
|
|
visitor(opaque, GC_OBJECT_FROM(runner->scope, GC_TYPE_SCOPE));
|
|
}
|
|
}
|