2022-01-02 16:19:54 +00:00
|
|
|
#include <assert.h>
|
|
|
|
|
|
|
|
|
|
#include "apfl.h"
|
2022-02-08 21:53:13 +00:00
|
|
|
|
|
|
|
|
#include "alloc.h"
|
Implement mark&sweep garbage collection and bytecode compilation
Instead of the previous refcount base garbage collection, we're now using
a basic tri-color mark&sweep collector. This is done to support cyclical
value relationships in the future (functions can form cycles, all values
implemented up to this point can not).
The collector maintains a set of roots and a set of objects (grouped into
blocks). The GC enabled objects are no longer allocated manually, but will
be allocated by the GC. The GC also wraps an allocator, this way the GC
knows, if we ran out of memory and will try to get out of this situation by
performing a full collection cycle.
The tri-color abstraction was chosen for two reasons:
- We don't have to maintain a list of objects that need to be marked, we
can simply grab the next grey one.
- It should allow us to later implement incremental collection (right now
we only do a stop-the-world collection).
This also switches to a bytecode based evaluation of the code: We no longer
directly evaluate the AST, but first compile it into a series of
instructions, that are evaluated in a separate step. This was done in
preparation for inplementing functions: We only need to turn a function
body into instructions instead of evaluating the node again with each call
of the function. Also, since an instruction list is implemented as a GC
object, this then removes manual memory management of the function body and
it's child nodes. Since the GC and the bytecode go hand in hand, this was
done in one (giant) commit.
As a downside, we've now lost the ability do do list matching on
assignments. I've already started to work on implementing this in the new
architecture, but left it out of this commit, as it's already quite a large
commit :)
2022-04-11 20:24:22 +00:00
|
|
|
#include "bytecode.h"
|
|
|
|
|
#include "compile.h"
|
|
|
|
|
#include "context.h"
|
2022-06-05 20:06:33 +00:00
|
|
|
#include "format.h"
|
2022-01-06 21:53:26 +00:00
|
|
|
#include "hashmap.h"
|
2022-07-11 19:41:05 +00:00
|
|
|
#include "resizable.h"
|
2022-04-22 21:17:28 +00:00
|
|
|
#include "strings.h"
|
2022-01-20 21:45:09 +00:00
|
|
|
#include "value.h"
|
|
|
|
|
|
2022-07-11 19:41:05 +00:00
|
|
|
static void evaluate(apfl_ctx ctx, struct func_call_stack_entry *cse);
|
2022-07-28 18:46:32 +00:00
|
|
|
static void evaluate_matcher(apfl_ctx ctx, struct matcher_call_stack_entry *cse);
|
2022-08-12 22:50:26 +00:00
|
|
|
static void dispatch(apfl_ctx ctx, struct call_stack_entry *cse);
|
2022-11-19 21:06:23 +00:00
|
|
|
static void matcher_init_matching(apfl_ctx ctx, struct matcher *matcher, struct scopes scopes);
|
2022-07-11 19:41:05 +00:00
|
|
|
|
2022-01-20 21:45:09 +00:00
|
|
|
static void
|
2022-02-10 21:39:39 +00:00
|
|
|
stack_must_drop(apfl_ctx ctx, apfl_stackidx index)
|
2022-01-20 21:45:09 +00:00
|
|
|
{
|
Implement mark&sweep garbage collection and bytecode compilation
Instead of the previous refcount base garbage collection, we're now using
a basic tri-color mark&sweep collector. This is done to support cyclical
value relationships in the future (functions can form cycles, all values
implemented up to this point can not).
The collector maintains a set of roots and a set of objects (grouped into
blocks). The GC enabled objects are no longer allocated manually, but will
be allocated by the GC. The GC also wraps an allocator, this way the GC
knows, if we ran out of memory and will try to get out of this situation by
performing a full collection cycle.
The tri-color abstraction was chosen for two reasons:
- We don't have to maintain a list of objects that need to be marked, we
can simply grab the next grey one.
- It should allow us to later implement incremental collection (right now
we only do a stop-the-world collection).
This also switches to a bytecode based evaluation of the code: We no longer
directly evaluate the AST, but first compile it into a series of
instructions, that are evaluated in a separate step. This was done in
preparation for inplementing functions: We only need to turn a function
body into instructions instead of evaluating the node again with each call
of the function. Also, since an instruction list is implemented as a GC
object, this then removes manual memory management of the function body and
it's child nodes. Since the GC and the bytecode go hand in hand, this was
done in one (giant) commit.
As a downside, we've now lost the ability do do list matching on
assignments. I've already started to work on implementing this in the new
architecture, but left it out of this commit, as it's already quite a large
commit :)
2022-04-11 20:24:22 +00:00
|
|
|
assert(apfl_stack_drop(ctx, index));
|
2022-01-14 22:16:19 +00:00
|
|
|
}
|
|
|
|
|
|
2022-07-28 18:46:32 +00:00
|
|
|
#define ABSTRACT_GET_ARGUMENT(i, ilist, arg) \
|
|
|
|
|
if (*i >= ilist->len) { \
|
|
|
|
|
return false; \
|
|
|
|
|
} \
|
|
|
|
|
\
|
|
|
|
|
*arg = ilist->instructions[(*i)++]; \
|
|
|
|
|
return true;
|
|
|
|
|
|
|
|
|
|
#define ABSTRACT_MUST_GET_ARG(get, ctx, i, ilist, arg) \
|
|
|
|
|
if (!get(i, ilist, arg)) { \
|
2022-12-09 20:22:50 +00:00
|
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode); \
|
2022-07-28 18:46:32 +00:00
|
|
|
}
|
|
|
|
|
|
2022-06-24 21:13:44 +00:00
|
|
|
static bool
|
Implement mark&sweep garbage collection and bytecode compilation
Instead of the previous refcount base garbage collection, we're now using
a basic tri-color mark&sweep collector. This is done to support cyclical
value relationships in the future (functions can form cycles, all values
implemented up to this point can not).
The collector maintains a set of roots and a set of objects (grouped into
blocks). The GC enabled objects are no longer allocated manually, but will
be allocated by the GC. The GC also wraps an allocator, this way the GC
knows, if we ran out of memory and will try to get out of this situation by
performing a full collection cycle.
The tri-color abstraction was chosen for two reasons:
- We don't have to maintain a list of objects that need to be marked, we
can simply grab the next grey one.
- It should allow us to later implement incremental collection (right now
we only do a stop-the-world collection).
This also switches to a bytecode based evaluation of the code: We no longer
directly evaluate the AST, but first compile it into a series of
instructions, that are evaluated in a separate step. This was done in
preparation for inplementing functions: We only need to turn a function
body into instructions instead of evaluating the node again with each call
of the function. Also, since an instruction list is implemented as a GC
object, this then removes manual memory management of the function body and
it's child nodes. Since the GC and the bytecode go hand in hand, this was
done in one (giant) commit.
As a downside, we've now lost the ability do do list matching on
assignments. I've already started to work on implementing this in the new
architecture, but left it out of this commit, as it's already quite a large
commit :)
2022-04-11 20:24:22 +00:00
|
|
|
get_argument(size_t *i, struct instruction_list *ilist, union instruction_or_arg *arg)
|
2022-01-14 22:16:19 +00:00
|
|
|
{
|
2022-07-28 18:46:32 +00:00
|
|
|
ABSTRACT_GET_ARGUMENT(i, ilist, arg)
|
2022-06-24 21:13:44 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
must_get_argument(apfl_ctx ctx, size_t *i, struct instruction_list *ilist, union instruction_or_arg *arg)
|
|
|
|
|
{
|
2022-07-28 18:46:32 +00:00
|
|
|
ABSTRACT_MUST_GET_ARG(get_argument, ctx, i, ilist, arg)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
get_matcher_argument(size_t *i, struct matcher_instruction_list *milist, union matcher_instruction_or_arg *arg)
|
|
|
|
|
{
|
|
|
|
|
ABSTRACT_GET_ARGUMENT(i, milist, arg)
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
must_get_matcher_argument(apfl_ctx ctx, size_t *i, struct matcher_instruction_list *milist, union matcher_instruction_or_arg *arg)
|
|
|
|
|
{
|
|
|
|
|
ABSTRACT_MUST_GET_ARG(get_matcher_argument, ctx, i, milist, arg)
|
2022-01-02 16:19:54 +00:00
|
|
|
}
|
|
|
|
|
|
2022-07-11 19:41:05 +00:00
|
|
|
enum scope_type {
|
|
|
|
|
SCOPE_LOCAL,
|
|
|
|
|
SCOPE_CLOSUE,
|
|
|
|
|
SCOPE_GLOBAL,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
static struct scope *
|
2022-11-19 21:06:23 +00:00
|
|
|
get_scope(apfl_ctx ctx, struct scopes scopes, enum scope_type type)
|
2022-07-11 19:41:05 +00:00
|
|
|
{
|
|
|
|
|
switch (type) {
|
|
|
|
|
case SCOPE_LOCAL:
|
2022-11-19 21:06:23 +00:00
|
|
|
return scopes.local;
|
2022-07-11 19:41:05 +00:00
|
|
|
case SCOPE_CLOSUE:
|
2022-11-19 21:06:23 +00:00
|
|
|
return scopes.closure;
|
2022-07-11 19:41:05 +00:00
|
|
|
case SCOPE_GLOBAL:
|
|
|
|
|
return ctx->globals;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(false);
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct scope *
|
2022-11-19 21:06:23 +00:00
|
|
|
get_or_create_local_scope(apfl_ctx ctx, struct scopes *scopes)
|
2022-07-11 19:41:05 +00:00
|
|
|
{
|
2022-11-19 21:06:23 +00:00
|
|
|
if (scopes->local != NULL) {
|
|
|
|
|
return scopes->local;
|
2022-07-11 19:41:05 +00:00
|
|
|
}
|
|
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
if ((scopes->local = apfl_scope_new(&ctx->gc)) == NULL) {
|
2022-07-11 19:41:05 +00:00
|
|
|
apfl_raise_alloc_error(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
return scopes->local;
|
2022-07-11 19:41:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2022-11-19 21:06:23 +00:00
|
|
|
try_variable_get_for_scope_type(
|
|
|
|
|
apfl_ctx ctx,
|
|
|
|
|
struct scopes scopes,
|
|
|
|
|
struct apfl_string *name,
|
|
|
|
|
enum scope_type type
|
|
|
|
|
) {
|
2022-07-11 19:41:05 +00:00
|
|
|
struct apfl_value value;
|
|
|
|
|
struct scope *scope;
|
|
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
if ((scope = get_scope(ctx, scopes, type)) != NULL) {
|
2022-07-11 19:41:05 +00:00
|
|
|
if (apfl_scope_get(scope, name, &value)) {
|
|
|
|
|
apfl_stack_must_push(ctx, value);
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-24 21:13:44 +00:00
|
|
|
static void
|
2022-11-19 21:06:23 +00:00
|
|
|
variable_get(apfl_ctx ctx, struct scopes scopes, struct apfl_string *name, bool global)
|
2022-01-02 16:19:54 +00:00
|
|
|
{
|
2022-11-19 21:06:23 +00:00
|
|
|
if (try_variable_get_for_scope_type(ctx, scopes, name, SCOPE_LOCAL)) {
|
2022-07-11 19:41:05 +00:00
|
|
|
return;
|
|
|
|
|
}
|
2022-11-19 21:06:23 +00:00
|
|
|
if (try_variable_get_for_scope_type(ctx, scopes, name, SCOPE_CLOSUE)) {
|
2022-07-11 19:41:05 +00:00
|
|
|
return;
|
|
|
|
|
}
|
2022-11-19 21:06:23 +00:00
|
|
|
if (global && try_variable_get_for_scope_type(ctx, scopes, name, SCOPE_GLOBAL)) {
|
2022-07-11 19:41:05 +00:00
|
|
|
return;
|
2022-01-02 16:19:54 +00:00
|
|
|
}
|
|
|
|
|
|
2023-01-24 20:22:22 +00:00
|
|
|
apfl_raise_errorfmt(ctx, "Variable {string} does not exist.", apfl_string_view_from(*name));
|
2022-07-11 19:41:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
try_variable_update_existing_for_scope_type(
|
|
|
|
|
struct apfl_string *name,
|
|
|
|
|
struct apfl_value value,
|
2022-11-19 21:06:23 +00:00
|
|
|
struct scope *scope
|
2022-07-11 19:41:05 +00:00
|
|
|
) {
|
2022-11-19 21:06:23 +00:00
|
|
|
if (scope == NULL) {
|
|
|
|
|
return false;
|
2022-07-11 19:41:05 +00:00
|
|
|
}
|
|
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
return apfl_scope_update_existing(scope, name, value);
|
2022-01-02 16:19:54 +00:00
|
|
|
}
|
|
|
|
|
|
2022-06-24 21:13:44 +00:00
|
|
|
static void
|
2022-11-19 21:06:23 +00:00
|
|
|
variable_set_value(apfl_ctx ctx, struct scopes *scopes, struct apfl_string *name, bool local, struct apfl_value value)
|
2022-01-02 16:19:54 +00:00
|
|
|
{
|
2022-07-11 19:41:05 +00:00
|
|
|
bool was_set = false;
|
|
|
|
|
if (!local) {
|
2022-11-19 21:06:23 +00:00
|
|
|
was_set = try_variable_update_existing_for_scope_type(name, value, scopes->local)
|
|
|
|
|
|| try_variable_update_existing_for_scope_type(name, value, scopes->closure);
|
2022-01-04 22:11:38 +00:00
|
|
|
}
|
2022-07-11 19:41:05 +00:00
|
|
|
|
|
|
|
|
if (!was_set) {
|
2022-11-19 21:06:23 +00:00
|
|
|
struct scope *scope = get_or_create_local_scope(ctx, scopes);
|
2022-07-11 19:41:05 +00:00
|
|
|
assert(scope != NULL /*get_or_create_local_scope should never return NULL*/);
|
|
|
|
|
|
|
|
|
|
if (!apfl_scope_set(&ctx->gc, scope, name, value)) {
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
|
|
|
|
}
|
2022-01-04 20:51:44 +00:00
|
|
|
}
|
2022-07-28 18:46:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2022-11-19 21:06:23 +00:00
|
|
|
variable_set(apfl_ctx ctx, struct scopes *scopes, struct apfl_string *name, bool keep_on_stack, bool local)
|
2022-07-28 18:46:32 +00:00
|
|
|
{
|
|
|
|
|
struct apfl_value value = apfl_stack_must_get(ctx, -1);
|
|
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
variable_set_value(ctx, scopes, name, local, value);
|
2022-07-11 19:41:05 +00:00
|
|
|
|
Implement mark&sweep garbage collection and bytecode compilation
Instead of the previous refcount base garbage collection, we're now using
a basic tri-color mark&sweep collector. This is done to support cyclical
value relationships in the future (functions can form cycles, all values
implemented up to this point can not).
The collector maintains a set of roots and a set of objects (grouped into
blocks). The GC enabled objects are no longer allocated manually, but will
be allocated by the GC. The GC also wraps an allocator, this way the GC
knows, if we ran out of memory and will try to get out of this situation by
performing a full collection cycle.
The tri-color abstraction was chosen for two reasons:
- We don't have to maintain a list of objects that need to be marked, we
can simply grab the next grey one.
- It should allow us to later implement incremental collection (right now
we only do a stop-the-world collection).
This also switches to a bytecode based evaluation of the code: We no longer
directly evaluate the AST, but first compile it into a series of
instructions, that are evaluated in a separate step. This was done in
preparation for inplementing functions: We only need to turn a function
body into instructions instead of evaluating the node again with each call
of the function. Also, since an instruction list is implemented as a GC
object, this then removes manual memory management of the function body and
it's child nodes. Since the GC and the bytecode go hand in hand, this was
done in one (giant) commit.
As a downside, we've now lost the ability do do list matching on
assignments. I've already started to work on implementing this in the new
architecture, but left it out of this commit, as it's already quite a large
commit :)
2022-04-11 20:24:22 +00:00
|
|
|
if (keep_on_stack) {
|
|
|
|
|
// If the value should be kept on the stack, the value is now in two
|
|
|
|
|
// places. We need to set the COW flag to prevent mutations of one copy
|
|
|
|
|
// affecting the other one.
|
|
|
|
|
value = apfl_value_set_cow_flag(value);
|
|
|
|
|
} else {
|
|
|
|
|
stack_must_drop(ctx, -1);
|
2022-01-04 20:51:44 +00:00
|
|
|
}
|
2022-01-20 21:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
2022-06-24 21:13:44 +00:00
|
|
|
static void
|
2022-11-19 21:06:23 +00:00
|
|
|
variable_new(apfl_ctx ctx, struct scopes *scopes, struct apfl_string *name, bool local)
|
2022-01-20 21:45:09 +00:00
|
|
|
{
|
2022-07-11 19:41:05 +00:00
|
|
|
if (!local) {
|
2022-11-19 21:06:23 +00:00
|
|
|
if (scopes->local != NULL && apfl_scope_has(scopes->local, name)) {
|
2022-07-11 19:41:05 +00:00
|
|
|
return;
|
|
|
|
|
}
|
2022-11-19 21:06:23 +00:00
|
|
|
if (scopes->closure != NULL && apfl_scope_has(scopes->closure, name)) {
|
2022-07-11 19:41:05 +00:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
struct scope *scope = get_or_create_local_scope(ctx, scopes);
|
2022-07-11 19:41:05 +00:00
|
|
|
if (!apfl_scope_create_var(&ctx->gc, scope, name)) {
|
2022-06-24 21:13:44 +00:00
|
|
|
apfl_raise_alloc_error(ctx);
|
2022-01-04 20:51:44 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-24 21:13:44 +00:00
|
|
|
static void
|
2022-11-19 21:06:23 +00:00
|
|
|
matcher_push(apfl_ctx ctx, struct func_call_stack_entry *cse, struct matcher_instruction_list *milist)
|
2022-01-04 23:25:41 +00:00
|
|
|
{
|
2022-11-19 21:06:23 +00:00
|
|
|
struct matcher_stack *matcher_stack = &cse->matcher_stack;
|
|
|
|
|
if (!apfl_resizable_ensure_cap_for_more_elements(
|
|
|
|
|
ctx->gc.allocator,
|
|
|
|
|
sizeof(struct matcher *),
|
|
|
|
|
(void **)&matcher_stack->items,
|
|
|
|
|
matcher_stack->len,
|
|
|
|
|
&matcher_stack->cap,
|
|
|
|
|
1
|
|
|
|
|
)) {
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ((matcher_stack->items[matcher_stack->len] = apfl_matcher_new(&ctx->gc, milist)) == NULL) {
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
matcher_stack->len++;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static struct matcher *
|
|
|
|
|
matcher_stack_top(apfl_ctx ctx, struct func_call_stack_entry *cse)
|
|
|
|
|
{
|
|
|
|
|
struct matcher_stack *matcher_stack = &cse->matcher_stack;
|
|
|
|
|
if (matcher_stack->len == 0) {
|
2022-12-09 20:22:50 +00:00
|
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
2022-11-19 21:06:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct matcher *matcher = matcher_stack->items[matcher_stack->len-1];
|
|
|
|
|
|
|
|
|
|
if (matcher == NULL) {
|
2022-12-09 20:22:50 +00:00
|
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
2022-11-19 21:06:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return matcher;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
matcher_stack_drop(apfl_ctx ctx, struct func_call_stack_entry *cse)
|
|
|
|
|
{
|
|
|
|
|
struct matcher_stack *matcher_stack = &cse->matcher_stack;
|
|
|
|
|
if (matcher_stack->len == 0) {
|
2022-12-09 20:22:50 +00:00
|
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
2022-11-19 21:06:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(
|
|
|
|
|
// We're shrinking, should not fail
|
|
|
|
|
apfl_resizable_resize(
|
|
|
|
|
ctx->gc.allocator,
|
|
|
|
|
sizeof(struct matcher *),
|
|
|
|
|
(void **)&matcher_stack->items,
|
|
|
|
|
&matcher_stack->len,
|
|
|
|
|
&matcher_stack->cap,
|
|
|
|
|
matcher_stack->len-1
|
|
|
|
|
)
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
func_inner(apfl_ctx ctx, struct func_call_stack_entry *cse, size_t count)
|
|
|
|
|
{
|
|
|
|
|
struct scope *scope = apfl_closure_scope_for_func(ctx, cse->scopes);
|
2022-07-11 19:41:05 +00:00
|
|
|
if (scope == NULL) {
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
|
|
|
|
}
|
2022-01-04 23:25:41 +00:00
|
|
|
|
2022-07-11 19:41:05 +00:00
|
|
|
if (!apfl_gc_tmproot_add(&ctx->gc, GC_OBJECT_FROM(scope, GC_TYPE_SCOPE))) {
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct apfl_value *func_value = apfl_stack_push_placeholder(ctx);
|
|
|
|
|
if (func_value == NULL) {
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
2023-01-30 21:50:01 +00:00
|
|
|
if ((func_value->func = apfl_func_new(
|
|
|
|
|
&ctx->gc,
|
|
|
|
|
count,
|
|
|
|
|
scope,
|
|
|
|
|
cse->execution_line,
|
|
|
|
|
cse->instructions->filename
|
|
|
|
|
)) == NULL) {
|
2022-07-11 19:41:05 +00:00
|
|
|
stack_must_drop(ctx, -1);
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
func_value->type = VALUE_FUNC;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2022-11-19 21:06:23 +00:00
|
|
|
func(apfl_ctx ctx, struct func_call_stack_entry *cse, size_t count)
|
2022-07-11 19:41:05 +00:00
|
|
|
{
|
|
|
|
|
size_t tmproots = apfl_gc_tmproots_begin(&ctx->gc);
|
2022-11-19 21:06:23 +00:00
|
|
|
func_inner(ctx, cse, count);
|
2022-07-11 19:41:05 +00:00
|
|
|
apfl_gc_tmproots_restore(&ctx->gc, tmproots);
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-12 22:50:26 +00:00
|
|
|
static void
|
2023-02-25 22:19:45 +00:00
|
|
|
func_add_subfunc(
|
|
|
|
|
apfl_ctx ctx,
|
|
|
|
|
struct func_call_stack_entry *cse,
|
|
|
|
|
struct instruction_list *body,
|
|
|
|
|
bool with_matcher
|
|
|
|
|
) {
|
2022-08-12 22:50:26 +00:00
|
|
|
// TODO: Better error messsages
|
|
|
|
|
|
|
|
|
|
struct apfl_value value = apfl_stack_must_get(ctx, -1);
|
|
|
|
|
if (value.type != VALUE_FUNC) {
|
2022-12-09 20:22:50 +00:00
|
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
2022-08-12 22:50:26 +00:00
|
|
|
}
|
|
|
|
|
|
2023-02-25 22:19:45 +00:00
|
|
|
if (!apfl_func_add_subfunc(
|
|
|
|
|
value.func,
|
|
|
|
|
body,
|
|
|
|
|
with_matcher ? matcher_stack_top(ctx, cse) : NULL
|
|
|
|
|
)) {
|
2022-12-09 20:22:50 +00:00
|
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
2022-08-12 22:50:26 +00:00
|
|
|
}
|
|
|
|
|
|
2023-02-25 22:19:45 +00:00
|
|
|
if (with_matcher) {
|
|
|
|
|
matcher_stack_drop(ctx, cse);
|
|
|
|
|
}
|
2022-08-12 22:50:26 +00:00
|
|
|
}
|
|
|
|
|
|
2023-01-24 20:59:54 +00:00
|
|
|
static void
|
|
|
|
|
func_set_name(apfl_ctx ctx, struct apfl_string *name)
|
|
|
|
|
{
|
|
|
|
|
struct apfl_value value = apfl_stack_must_get(ctx, -1);
|
|
|
|
|
if (value.type != VALUE_FUNC) {
|
|
|
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
apfl_func_set_name(value.func, name);
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
static bool
|
|
|
|
|
try_call_stack_push(apfl_ctx ctx, struct call_stack_entry cse)
|
2022-07-11 19:41:05 +00:00
|
|
|
{
|
2022-11-19 21:06:23 +00:00
|
|
|
return apfl_resizable_append(
|
2022-07-11 19:41:05 +00:00
|
|
|
ctx->gc.allocator,
|
|
|
|
|
sizeof(struct call_stack_entry),
|
|
|
|
|
(void**)&ctx->call_stack.items,
|
|
|
|
|
&ctx->call_stack.len,
|
|
|
|
|
&ctx->call_stack.cap,
|
|
|
|
|
&cse,
|
|
|
|
|
1
|
2022-11-19 21:06:23 +00:00
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
call_stack_push(apfl_ctx ctx, struct call_stack_entry cse)
|
|
|
|
|
{
|
|
|
|
|
if (!try_call_stack_push(ctx, cse)) {
|
2022-07-11 19:41:05 +00:00
|
|
|
apfl_call_stack_entry_deinit(ctx->gc.allocator, &cse);
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2022-07-28 18:46:32 +00:00
|
|
|
call_stack_drop(apfl_ctx ctx)
|
2022-07-11 19:41:05 +00:00
|
|
|
{
|
|
|
|
|
assert(ctx->call_stack.len > 0);
|
|
|
|
|
|
|
|
|
|
apfl_call_stack_entry_deinit(ctx->gc.allocator, apfl_call_stack_cur_entry(ctx));
|
|
|
|
|
|
|
|
|
|
assert(
|
|
|
|
|
// We're shrinking the memory here, should not fail
|
|
|
|
|
apfl_resizable_resize(
|
|
|
|
|
ctx->gc.allocator,
|
|
|
|
|
sizeof(struct call_stack_entry),
|
|
|
|
|
(void **)&ctx->call_stack.items,
|
|
|
|
|
&ctx->call_stack.len,
|
|
|
|
|
&ctx->call_stack.cap,
|
|
|
|
|
ctx->call_stack.len - 1
|
|
|
|
|
)
|
|
|
|
|
);
|
2022-07-28 18:46:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
return_from_function_inner(apfl_ctx ctx)
|
|
|
|
|
{
|
|
|
|
|
struct apfl_value value;
|
|
|
|
|
if (!apfl_stack_pop(ctx, &value, -1)) {
|
|
|
|
|
// No return value on the stack. Return nil instead
|
|
|
|
|
value = (struct apfl_value) { .type = VALUE_NIL };
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
call_stack_drop(ctx);
|
2022-07-11 19:41:05 +00:00
|
|
|
|
|
|
|
|
apfl_stack_must_push(ctx, value);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
return_from_function(apfl_ctx ctx)
|
|
|
|
|
{
|
|
|
|
|
size_t tmproots = apfl_gc_tmproots_begin(&ctx->gc);
|
|
|
|
|
return_from_function_inner(ctx);
|
|
|
|
|
apfl_gc_tmproots_restore(&ctx->gc, tmproots);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
prepare_call(apfl_ctx ctx, size_t tmproots, struct apfl_value args, struct call_stack_entry cse)
|
|
|
|
|
{
|
|
|
|
|
call_stack_push(ctx, cse);
|
|
|
|
|
|
|
|
|
|
// Note: This pushes args on the stack of the newly created call stack
|
|
|
|
|
apfl_stack_must_push(ctx, args);
|
|
|
|
|
|
|
|
|
|
// Both the function and the args are now rooted again, we can undo the tmproots early.
|
|
|
|
|
apfl_gc_tmproots_restore(&ctx->gc, tmproots);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Keep evaluate instructions until we've returned from the current call stack.
|
2023-01-26 20:22:34 +00:00
|
|
|
// Must not be called with a APFL_CSE_CFUNCTION on top of the call stack.
|
2022-07-11 19:41:05 +00:00
|
|
|
static void
|
|
|
|
|
evaluate_until_call_stack_return(apfl_ctx ctx)
|
|
|
|
|
{
|
|
|
|
|
struct call_stack *call_stack = &ctx->call_stack;
|
|
|
|
|
|
|
|
|
|
size_t depth_started = call_stack->len;
|
|
|
|
|
assert(depth_started > 0);
|
|
|
|
|
|
|
|
|
|
while (call_stack->len >= depth_started) {
|
|
|
|
|
struct call_stack_entry *cse = apfl_call_stack_cur_entry(ctx);
|
|
|
|
|
assert(cse != NULL);
|
|
|
|
|
|
2022-07-28 18:46:32 +00:00
|
|
|
switch (cse->type) {
|
2023-01-26 20:22:34 +00:00
|
|
|
case APFL_CSE_CFUNCTION:
|
2022-07-28 18:46:32 +00:00
|
|
|
assert(false);
|
|
|
|
|
break;
|
2023-01-26 20:22:34 +00:00
|
|
|
case APFL_CSE_FUNCTION:
|
2022-07-28 18:46:32 +00:00
|
|
|
evaluate(ctx, &cse->func);
|
|
|
|
|
break;
|
2023-01-26 20:22:34 +00:00
|
|
|
case APFL_CSE_MATCHER:
|
2022-07-28 18:46:32 +00:00
|
|
|
evaluate_matcher(ctx, &cse->matcher);
|
|
|
|
|
break;
|
2023-01-26 20:22:34 +00:00
|
|
|
case APFL_CSE_FUNCTION_DISPATCH:
|
2022-08-12 22:50:26 +00:00
|
|
|
dispatch(ctx, cse);
|
|
|
|
|
break;
|
2022-07-28 18:46:32 +00:00
|
|
|
}
|
2022-07-11 19:41:05 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
must_tmproot_add_value(apfl_ctx ctx, struct apfl_value value)
|
|
|
|
|
{
|
2022-11-20 15:26:38 +00:00
|
|
|
if (!apfl_value_add_as_tmproot(&ctx->gc, value)) {
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
2022-07-11 19:41:05 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
call_inner(apfl_ctx ctx, size_t tmproots, apfl_stackidx func_index, apfl_stackidx args_index, bool call_from_apfl)
|
|
|
|
|
{
|
|
|
|
|
struct apfl_value func = apfl_stack_must_get(ctx, func_index);
|
|
|
|
|
must_tmproot_add_value(ctx, func);
|
|
|
|
|
|
|
|
|
|
struct apfl_value args = apfl_stack_must_get(ctx, args_index);
|
|
|
|
|
must_tmproot_add_value(ctx, args);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
assert(apfl_stack_drop_multi(ctx, 2, (apfl_stackidx[]){func_index, args_index}));
|
|
|
|
|
|
|
|
|
|
if (!VALUE_IS_A(func, APFL_VALUE_FUNC)) {
|
2023-01-24 20:22:22 +00:00
|
|
|
apfl_raise_errorfmt(ctx, "Can only call functions, got a {value:type} instead", func);
|
2022-07-11 19:41:05 +00:00
|
|
|
}
|
|
|
|
|
if (!VALUE_IS_A(args, APFL_VALUE_LIST)) {
|
2022-12-09 20:22:50 +00:00
|
|
|
apfl_raise_const_error(ctx, apfl_messages.not_a_list);
|
2022-07-11 19:41:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
switch (func.type) {
|
2022-11-19 21:06:23 +00:00
|
|
|
case VALUE_FUNC: {
|
|
|
|
|
struct scope *local_scope = apfl_scope_new(&ctx->gc);
|
|
|
|
|
if (local_scope == NULL) {
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!apfl_gc_tmproot_add(&ctx->gc, GC_OBJECT_FROM(local_scope, GC_TYPE_SCOPE))) {
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-11 19:41:05 +00:00
|
|
|
prepare_call(ctx, tmproots, args, (struct call_stack_entry) {
|
2023-01-26 20:22:34 +00:00
|
|
|
.type = APFL_CSE_FUNCTION_DISPATCH,
|
2022-07-11 19:41:05 +00:00
|
|
|
.stack = apfl_stack_new(),
|
2022-08-12 22:50:26 +00:00
|
|
|
.func_dispatch = {
|
|
|
|
|
.subfunc = 0,
|
2022-11-19 21:06:23 +00:00
|
|
|
.scopes = {
|
|
|
|
|
.local = local_scope,
|
|
|
|
|
.closure = func.func->scope,
|
|
|
|
|
},
|
2022-08-12 22:50:26 +00:00
|
|
|
.function = func.func,
|
2022-07-28 18:46:32 +00:00
|
|
|
.returning_from_matcher = false,
|
2022-11-19 21:06:23 +00:00
|
|
|
.matcher_result = false,
|
2022-07-11 19:41:05 +00:00
|
|
|
},
|
Implement mark&sweep garbage collection and bytecode compilation
Instead of the previous refcount base garbage collection, we're now using
a basic tri-color mark&sweep collector. This is done to support cyclical
value relationships in the future (functions can form cycles, all values
implemented up to this point can not).
The collector maintains a set of roots and a set of objects (grouped into
blocks). The GC enabled objects are no longer allocated manually, but will
be allocated by the GC. The GC also wraps an allocator, this way the GC
knows, if we ran out of memory and will try to get out of this situation by
performing a full collection cycle.
The tri-color abstraction was chosen for two reasons:
- We don't have to maintain a list of objects that need to be marked, we
can simply grab the next grey one.
- It should allow us to later implement incremental collection (right now
we only do a stop-the-world collection).
This also switches to a bytecode based evaluation of the code: We no longer
directly evaluate the AST, but first compile it into a series of
instructions, that are evaluated in a separate step. This was done in
preparation for inplementing functions: We only need to turn a function
body into instructions instead of evaluating the node again with each call
of the function. Also, since an instruction list is implemented as a GC
object, this then removes manual memory management of the function body and
it's child nodes. Since the GC and the bytecode go hand in hand, this was
done in one (giant) commit.
As a downside, we've now lost the ability do do list matching on
assignments. I've already started to work on implementing this in the new
architecture, but left it out of this commit, as it's already quite a large
commit :)
2022-04-11 20:24:22 +00:00
|
|
|
});
|
2022-07-11 19:41:05 +00:00
|
|
|
|
|
|
|
|
if (call_from_apfl) {
|
|
|
|
|
// In this case we're already coming from evaluate_until_call_stack_return,
|
|
|
|
|
// which will pick up the new stack entry. This way we can avoid doing the recursion in C.
|
|
|
|
|
return;
|
|
|
|
|
} else {
|
|
|
|
|
evaluate_until_call_stack_return(ctx);
|
|
|
|
|
}
|
|
|
|
|
break;
|
2022-11-19 21:06:23 +00:00
|
|
|
}
|
2022-07-11 19:41:05 +00:00
|
|
|
case VALUE_CFUNC:
|
|
|
|
|
prepare_call(ctx, tmproots, args, (struct call_stack_entry) {
|
2023-01-26 20:22:34 +00:00
|
|
|
.type = APFL_CSE_CFUNCTION,
|
2022-07-11 19:41:05 +00:00
|
|
|
.stack = apfl_stack_new(),
|
|
|
|
|
.cfunc = {
|
|
|
|
|
.func = func.cfunc,
|
|
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
|
|
|
|
|
func.cfunc->func(ctx);
|
|
|
|
|
return_from_function(ctx);
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
assert(false); // Otherwise the VALUE_IS_A() check for APFL_VALUE_FUNC would have failed
|
2022-01-04 23:25:41 +00:00
|
|
|
}
|
2022-07-11 19:41:05 +00:00
|
|
|
}
|
2022-01-20 21:45:09 +00:00
|
|
|
|
2022-07-11 19:41:05 +00:00
|
|
|
static void
|
|
|
|
|
call(apfl_ctx ctx, apfl_stackidx func_index, apfl_stackidx args_index, bool call_from_apfl)
|
|
|
|
|
{
|
|
|
|
|
size_t tmproots = apfl_gc_tmproots_begin(&ctx->gc);
|
|
|
|
|
call_inner(ctx, tmproots, func_index, args_index, call_from_apfl);
|
|
|
|
|
apfl_gc_tmproots_restore(&ctx->gc, tmproots);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
apfl_call(apfl_ctx ctx, apfl_stackidx func_index, apfl_stackidx args_index)
|
|
|
|
|
{
|
|
|
|
|
call(ctx, func_index, args_index, false);
|
2022-01-04 23:25:41 +00:00
|
|
|
}
|
|
|
|
|
|
2022-07-28 18:46:32 +00:00
|
|
|
static void
|
2022-11-19 21:06:23 +00:00
|
|
|
matcher_set_val(apfl_ctx ctx, struct func_call_stack_entry *cse, size_t index)
|
2022-07-28 18:46:32 +00:00
|
|
|
{
|
2022-11-19 21:06:23 +00:00
|
|
|
struct matcher *matcher = matcher_stack_top(ctx, cse);
|
2022-07-28 18:46:32 +00:00
|
|
|
|
|
|
|
|
if (index >= matcher->instructions->value_count) {
|
2022-12-09 20:22:50 +00:00
|
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
2022-07-28 18:46:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
matcher->values[index] = apfl_stack_must_pop(ctx, -1);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2022-11-19 21:06:23 +00:00
|
|
|
matcher_must_match(apfl_ctx ctx, struct func_call_stack_entry *cse)
|
|
|
|
|
{
|
|
|
|
|
size_t tmproots = apfl_gc_tmproots_begin(&ctx->gc);
|
2022-07-28 18:46:32 +00:00
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
struct matcher *matcher = matcher_stack_top(ctx, cse);
|
|
|
|
|
if (!apfl_gc_tmproot_add(&ctx->gc, GC_OBJECT_FROM(matcher, GC_TYPE_MATCHER))) {
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
2022-07-28 18:46:32 +00:00
|
|
|
}
|
2022-11-19 21:06:23 +00:00
|
|
|
matcher_stack_drop(ctx, cse);
|
|
|
|
|
matcher_init_matching(ctx, matcher, cse->scopes);
|
2022-07-28 18:46:32 +00:00
|
|
|
|
|
|
|
|
apfl_gc_tmproots_restore(&ctx->gc, tmproots);
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-24 21:13:44 +00:00
|
|
|
static void
|
2022-07-11 19:41:05 +00:00
|
|
|
evaluate(apfl_ctx ctx, struct func_call_stack_entry *cse)
|
2022-01-04 23:25:41 +00:00
|
|
|
{
|
2022-07-28 18:46:32 +00:00
|
|
|
if (cse->returning_from_matcher) {
|
2022-11-19 21:06:23 +00:00
|
|
|
if (!cse->matcher_result) {
|
2022-12-09 20:22:50 +00:00
|
|
|
apfl_raise_const_error(ctx, apfl_messages.value_doesnt_match);
|
2022-07-28 18:46:32 +00:00
|
|
|
}
|
|
|
|
|
cse->returning_from_matcher = false;
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-11 19:41:05 +00:00
|
|
|
union instruction_or_arg arg;
|
|
|
|
|
|
|
|
|
|
size_t *pc = &cse->pc;
|
|
|
|
|
struct instruction_list *ilist = cse->instructions;
|
|
|
|
|
|
|
|
|
|
while (*pc < cse->instructions->len) {
|
|
|
|
|
switch (ilist->instructions[(*pc)++].instruction) {
|
|
|
|
|
case INSN_NIL:
|
|
|
|
|
apfl_push_nil(ctx);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_TRUE:
|
|
|
|
|
apfl_push_bool(ctx, true);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_FALSE:
|
|
|
|
|
apfl_push_bool(ctx, false);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_NUMBER:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
|
|
|
apfl_push_number(ctx, arg.number);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_STRING:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
|
|
|
apfl_stack_must_push(ctx, (struct apfl_value) {
|
|
|
|
|
.type = VALUE_STRING,
|
|
|
|
|
.string = arg.string,
|
|
|
|
|
});
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_LIST:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
|
|
|
apfl_list_create(ctx, arg.count);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_LIST_APPEND:
|
|
|
|
|
apfl_list_append(ctx, -2, -1);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_LIST_EXPAND_INTO:
|
|
|
|
|
apfl_list_append_list(ctx, -2, -1);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_DICT:
|
|
|
|
|
apfl_dict_create(ctx);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_DICT_APPEND_KVPAIR:
|
|
|
|
|
apfl_dict_set(ctx, -3, -2, -1);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_GET_MEMBER:
|
|
|
|
|
apfl_get_member(ctx, -2, -1);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_VAR_NEW:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
2022-11-19 21:06:23 +00:00
|
|
|
variable_new(ctx, &cse->scopes, arg.string, false);
|
2022-07-11 19:41:05 +00:00
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_VAR_NEW_LOCAL:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
2022-11-19 21:06:23 +00:00
|
|
|
variable_new(ctx, &cse->scopes, arg.string, true);
|
2022-07-11 19:41:05 +00:00
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_VAR_GET:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
2022-11-19 21:06:23 +00:00
|
|
|
variable_get(ctx, cse->scopes, arg.string, true);
|
2022-07-11 19:41:05 +00:00
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_VAR_SET:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
2022-11-19 21:06:23 +00:00
|
|
|
variable_set(ctx, &cse->scopes, arg.string, true, false);
|
2022-07-11 19:41:05 +00:00
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_VAR_SET_LOCAL:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
2022-11-19 21:06:23 +00:00
|
|
|
variable_set(ctx, &cse->scopes, arg.string, true, true);
|
2022-07-11 19:41:05 +00:00
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_MOVE_TO_LOCAL_VAR:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
2022-11-19 21:06:23 +00:00
|
|
|
variable_set(ctx, &cse->scopes, arg.string, false, true);
|
2022-07-11 19:41:05 +00:00
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_NEXT_LINE:
|
|
|
|
|
cse->execution_line++;
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_SET_LINE:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
|
|
|
cse->execution_line = arg.count;
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_GET_BY_INDEX_KEEP:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
|
|
|
apfl_get_list_member_by_index(ctx, -1, arg.index);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_DROP:
|
|
|
|
|
if (!apfl_stack_drop(ctx, -1)) {
|
|
|
|
|
apfl_raise_invalid_stackidx(ctx);
|
|
|
|
|
}
|
|
|
|
|
goto continue_loop;
|
2022-11-19 21:06:23 +00:00
|
|
|
case INSN_DUP:
|
|
|
|
|
apfl_copy(ctx, -1);
|
|
|
|
|
goto continue_loop;
|
2022-07-11 19:41:05 +00:00
|
|
|
case INSN_CALL:
|
|
|
|
|
call(ctx, -2, -1, true);
|
|
|
|
|
|
|
|
|
|
// By returning from this function, the newly pushed call stack entry (if any) will get picked up by
|
|
|
|
|
// evaluate_until_call_stack_return. In case no new CSE was pushed (when a cfunc was called), we'll the
|
|
|
|
|
// simply continue with the current call stack.
|
|
|
|
|
return;
|
|
|
|
|
case INSN_FUNC:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
2022-11-19 21:06:23 +00:00
|
|
|
func(ctx, cse, arg.count);
|
2022-08-12 22:50:26 +00:00
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_FUNC_ADD_SUBFUNC:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
2023-02-25 22:19:45 +00:00
|
|
|
func_add_subfunc(ctx, cse, arg.body, true);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_FUNC_ADD_SUBFUNC_ANYARGS:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
|
|
|
func_add_subfunc(ctx, cse, arg.body, false);
|
2022-07-11 19:41:05 +00:00
|
|
|
goto continue_loop;
|
2023-01-24 20:59:54 +00:00
|
|
|
case INSN_FUNC_SET_NAME:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
|
|
|
|
func_set_name(ctx, arg.string);
|
|
|
|
|
goto continue_loop;
|
2022-11-19 21:06:23 +00:00
|
|
|
case INSN_MATCHER_PUSH:
|
2022-07-28 18:46:32 +00:00
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
2022-11-19 21:06:23 +00:00
|
|
|
matcher_push(ctx, cse, arg.matcher);
|
2022-07-28 18:46:32 +00:00
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_MATCHER_SET_VAL:
|
|
|
|
|
must_get_argument(ctx, pc, ilist, &arg);
|
2022-11-19 21:06:23 +00:00
|
|
|
matcher_set_val(ctx, cse, arg.index);
|
2022-07-28 18:46:32 +00:00
|
|
|
goto continue_loop;
|
|
|
|
|
case INSN_MATCHER_MUST_MATCH:
|
2022-11-19 21:06:23 +00:00
|
|
|
// matcher_must_match pushes a new call stack entry for the matcher onto the stack. We return from this
|
2022-07-28 18:46:32 +00:00
|
|
|
// So this new CSE gets executed. By setting returning_from_matcher, we know that we came from the matcher,
|
|
|
|
|
// once it returns.
|
|
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
matcher_must_match(ctx, cse);
|
2022-07-28 18:46:32 +00:00
|
|
|
|
|
|
|
|
return;
|
2022-07-11 19:41:05 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(false);
|
|
|
|
|
|
|
|
|
|
continue_loop:;
|
2022-01-04 23:25:41 +00:00
|
|
|
}
|
2022-07-11 19:41:05 +00:00
|
|
|
|
|
|
|
|
return_from_function(ctx);
|
2022-01-04 23:25:41 +00:00
|
|
|
}
|
2022-01-04 20:51:44 +00:00
|
|
|
|
2022-07-28 18:46:32 +00:00
|
|
|
static void
|
2022-11-06 16:12:03 +00:00
|
|
|
matcher_state_push(apfl_ctx ctx, struct matcher_call_stack_entry *cse, struct matcher_state entry)
|
2022-07-28 18:46:32 +00:00
|
|
|
{
|
|
|
|
|
if (!apfl_resizable_append(
|
|
|
|
|
ctx->gc.allocator,
|
2022-11-06 16:12:03 +00:00
|
|
|
sizeof(struct matcher_state),
|
|
|
|
|
(void **)&cse->matcher_state_stack,
|
|
|
|
|
&cse->matcher_state_stack_len,
|
|
|
|
|
&cse->matcher_state_stack_cap,
|
2022-07-28 18:46:32 +00:00
|
|
|
&entry,
|
|
|
|
|
1
|
|
|
|
|
)) {
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-16 19:52:27 +00:00
|
|
|
noreturn static void
|
2022-07-28 18:46:32 +00:00
|
|
|
raise_invalid_matcher_state(apfl_ctx ctx)
|
|
|
|
|
{
|
2022-12-09 20:22:50 +00:00
|
|
|
apfl_raise_const_error(ctx, apfl_messages.invalid_matcher_state);
|
2022-07-28 18:46:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2022-11-06 16:12:03 +00:00
|
|
|
matcher_state_drop(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
2022-07-28 18:46:32 +00:00
|
|
|
{
|
2022-11-06 16:12:03 +00:00
|
|
|
if (cse->matcher_state_stack_len == 0) {
|
2022-07-28 18:46:32 +00:00
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(
|
|
|
|
|
// We're shrinking, should not fail
|
|
|
|
|
apfl_resizable_resize(
|
|
|
|
|
ctx->gc.allocator,
|
2022-11-06 16:12:03 +00:00
|
|
|
sizeof(struct matcher_state),
|
|
|
|
|
(void **)&cse->matcher_state_stack,
|
|
|
|
|
&cse->matcher_state_stack_len,
|
|
|
|
|
&cse->matcher_state_stack_cap,
|
|
|
|
|
cse->matcher_state_stack_len-1
|
2022-07-28 18:46:32 +00:00
|
|
|
)
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
2022-11-19 21:06:23 +00:00
|
|
|
matcher_init_matching_inner(apfl_ctx ctx, struct matcher *matcher, struct scopes scopes)
|
2022-07-28 18:46:32 +00:00
|
|
|
{
|
2022-11-19 21:06:23 +00:00
|
|
|
struct apfl_value value = apfl_stack_must_pop(ctx, -1);
|
2022-07-28 18:46:32 +00:00
|
|
|
must_tmproot_add_value(ctx, value);
|
|
|
|
|
|
2022-07-31 20:04:59 +00:00
|
|
|
if (matcher == NULL) {
|
2022-12-09 20:22:50 +00:00
|
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
2022-07-31 20:04:59 +00:00
|
|
|
}
|
|
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
size_t capture_count = matcher->instructions->capture_count;
|
2022-07-28 18:46:32 +00:00
|
|
|
struct matcher_call_stack_entry matcher_cse = {
|
|
|
|
|
.pc = 0,
|
2022-11-19 22:20:22 +00:00
|
|
|
.from_predicate = false,
|
2022-07-28 18:46:32 +00:00
|
|
|
.matcher = matcher,
|
2022-11-19 21:06:23 +00:00
|
|
|
.scopes = scopes,
|
|
|
|
|
.capture_index = 0,
|
|
|
|
|
.capture_count = capture_count,
|
|
|
|
|
.captures = NULL,
|
|
|
|
|
.transfers = NULL,
|
2022-11-06 16:12:03 +00:00
|
|
|
.matcher_state_stack = NULL,
|
|
|
|
|
.matcher_state_stack_len = 0,
|
|
|
|
|
.matcher_state_stack_cap = 0,
|
2022-07-28 18:46:32 +00:00
|
|
|
};
|
|
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
if (capture_count > 0) {
|
|
|
|
|
if ((matcher_cse.captures = ALLOC_LIST(ctx->gc.allocator, struct apfl_value, capture_count)) == NULL) {
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < capture_count; i++) {
|
|
|
|
|
matcher_cse.captures[i] = (struct apfl_value) { .type = VALUE_NIL };
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ((matcher_cse.transfers = ALLOC_LIST(
|
|
|
|
|
ctx->gc.allocator,
|
|
|
|
|
struct matcher_capture_transfer,
|
|
|
|
|
capture_count
|
|
|
|
|
)) == NULL) {
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
for (size_t i = 0; i < capture_count; i++) {
|
|
|
|
|
matcher_cse.transfers[i] = (struct matcher_capture_transfer) {
|
|
|
|
|
.var = NULL,
|
|
|
|
|
.path_start = 0,
|
|
|
|
|
.path_len = 0,
|
|
|
|
|
.local = false,
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if ((matcher_cse.matcher_state_stack = ALLOC_LIST(
|
|
|
|
|
ctx->gc.allocator,
|
|
|
|
|
struct matcher_state,
|
|
|
|
|
1
|
|
|
|
|
)) == NULL) {
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
matcher_cse.matcher_state_stack[0] = (struct matcher_state) {
|
2022-07-28 18:46:32 +00:00
|
|
|
.mode = MATCHER_MODE_VALUE,
|
2022-11-19 21:06:23 +00:00
|
|
|
};
|
|
|
|
|
matcher_cse.matcher_state_stack_len = 1;
|
|
|
|
|
matcher_cse.matcher_state_stack_cap = 1;
|
2022-07-28 18:46:32 +00:00
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
if (!try_call_stack_push(ctx, (struct call_stack_entry) {
|
2023-01-26 20:22:34 +00:00
|
|
|
.type = APFL_CSE_MATCHER,
|
2022-07-28 18:46:32 +00:00
|
|
|
.stack = apfl_stack_new(),
|
|
|
|
|
.matcher = matcher_cse,
|
2022-11-19 21:06:23 +00:00
|
|
|
})) {
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
2022-07-28 18:46:32 +00:00
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
// No need for `goto error` on failure here, all dynamically allocated
|
|
|
|
|
// elements are on the call stack now, so the GC can clean them up in case
|
|
|
|
|
// of an error.
|
2022-07-28 18:46:32 +00:00
|
|
|
apfl_stack_must_push(ctx, apfl_value_set_cow_flag(value));
|
2022-11-19 21:06:23 +00:00
|
|
|
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
error:
|
|
|
|
|
apfl_matcher_call_stack_entry_deinit(ctx->gc.allocator, &matcher_cse);
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
2022-07-28 18:46:32 +00:00
|
|
|
}
|
|
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
/*
|
|
|
|
|
* Initialise matching. Pushes a new call stack and pops a value of the current+
|
|
|
|
|
* value stack.
|
|
|
|
|
*/
|
2022-07-28 18:46:32 +00:00
|
|
|
static void
|
2022-11-19 21:06:23 +00:00
|
|
|
matcher_init_matching(apfl_ctx ctx, struct matcher *matcher, struct scopes scopes)
|
2022-07-28 18:46:32 +00:00
|
|
|
{
|
|
|
|
|
size_t tmproots = apfl_gc_tmproots_begin(&ctx->gc);
|
2022-11-19 21:06:23 +00:00
|
|
|
matcher_init_matching_inner(ctx, matcher, scopes);
|
2022-07-28 18:46:32 +00:00
|
|
|
apfl_gc_tmproots_restore(&ctx->gc, tmproots);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
matcher_check_index(apfl_ctx ctx, size_t count, size_t index)
|
|
|
|
|
{
|
|
|
|
|
if (index >= count) {
|
2022-12-09 20:22:50 +00:00
|
|
|
apfl_raise_const_error(ctx, apfl_messages.corrupted_bytecode);
|
2022-07-28 18:46:32 +00:00
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-06 16:12:03 +00:00
|
|
|
static struct matcher_state *
|
|
|
|
|
matcher_cur_state(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
2022-07-28 18:46:32 +00:00
|
|
|
{
|
2022-11-06 16:12:03 +00:00
|
|
|
if (cse->matcher_state_stack_len == 0) {
|
2022-07-28 18:46:32 +00:00
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-06 16:12:03 +00:00
|
|
|
return &cse->matcher_state_stack[cse->matcher_state_stack_len-1];
|
2022-07-28 18:46:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
2022-11-06 16:12:03 +00:00
|
|
|
matcher_current_val_in_state(apfl_ctx ctx, struct matcher_state *state, struct apfl_value *value)
|
2022-07-28 18:46:32 +00:00
|
|
|
{
|
|
|
|
|
struct apfl_value cur;
|
|
|
|
|
|
2022-11-06 16:12:03 +00:00
|
|
|
switch (state->mode) {
|
2022-07-28 18:46:32 +00:00
|
|
|
case MATCHER_MODE_VALUE:
|
|
|
|
|
case MATCHER_MODE_LIST_REMAINING:
|
|
|
|
|
if (!apfl_stack_get(ctx, &cur, -1)) {
|
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
|
|
|
|
*value = cur;
|
|
|
|
|
return true;
|
|
|
|
|
case MATCHER_MODE_STOP:
|
|
|
|
|
case MATCHER_MODE_LIST_UNDERFLOW:
|
|
|
|
|
return false;
|
|
|
|
|
case MATCHER_MODE_LIST_START:
|
|
|
|
|
if (!apfl_stack_get(ctx, &cur, -1)) {
|
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
|
|
|
|
if (cur.type != VALUE_LIST) {
|
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
2022-11-06 16:12:03 +00:00
|
|
|
if (state->lower >= cur.list->len) {
|
2022-07-28 18:46:32 +00:00
|
|
|
return false;
|
|
|
|
|
}
|
2022-11-06 16:12:03 +00:00
|
|
|
*value = cur.list->items[state->lower];
|
2022-07-28 18:46:32 +00:00
|
|
|
return true;
|
|
|
|
|
case MATCHER_MODE_LIST_END:
|
|
|
|
|
if (!apfl_stack_get(ctx, &cur, -1)) {
|
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
|
|
|
|
if (cur.type != VALUE_LIST) {
|
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
2022-11-06 16:12:03 +00:00
|
|
|
if (state->upper == 0) {
|
2022-07-28 18:46:32 +00:00
|
|
|
return NULL;
|
|
|
|
|
}
|
2022-11-06 16:12:03 +00:00
|
|
|
*value = cur.list->items[state->upper-1];
|
2022-07-28 18:46:32 +00:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
matcher_current_val(apfl_ctx ctx, struct matcher_call_stack_entry *cse, struct apfl_value *value)
|
|
|
|
|
{
|
2022-11-06 16:12:03 +00:00
|
|
|
struct matcher_state *state = matcher_cur_state(ctx, cse);
|
|
|
|
|
return matcher_current_val_in_state(ctx, state, value);
|
2022-07-28 18:46:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
matcher_next(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
|
|
|
{
|
|
|
|
|
again:;
|
2022-11-06 16:12:03 +00:00
|
|
|
struct matcher_state *state = matcher_cur_state(ctx, cse);
|
2022-07-28 18:46:32 +00:00
|
|
|
|
2022-11-06 16:12:03 +00:00
|
|
|
switch (state->mode) {
|
2022-07-28 18:46:32 +00:00
|
|
|
case MATCHER_MODE_VALUE:
|
2022-11-06 16:12:03 +00:00
|
|
|
state->mode = MATCHER_MODE_STOP;
|
2022-07-28 18:46:32 +00:00
|
|
|
if (!apfl_stack_drop(ctx, -1)) {
|
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
|
|
|
|
return true;
|
|
|
|
|
case MATCHER_MODE_STOP:
|
|
|
|
|
case MATCHER_MODE_LIST_UNDERFLOW:
|
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
return false;
|
|
|
|
|
case MATCHER_MODE_LIST_START:
|
2022-11-06 16:12:03 +00:00
|
|
|
state->lower++;
|
2022-07-28 18:46:32 +00:00
|
|
|
return true;
|
|
|
|
|
case MATCHER_MODE_LIST_END:
|
2022-11-06 16:12:03 +00:00
|
|
|
if (state->upper <= state->lower) {
|
|
|
|
|
state->mode = MATCHER_MODE_LIST_UNDERFLOW;
|
2022-07-28 18:46:32 +00:00
|
|
|
return false;
|
|
|
|
|
}
|
2022-11-06 16:12:03 +00:00
|
|
|
state->upper--;
|
2022-07-28 18:46:32 +00:00
|
|
|
return true;
|
|
|
|
|
case MATCHER_MODE_LIST_REMAINING:
|
|
|
|
|
if (!apfl_stack_drop(ctx, -1)) {
|
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
2022-11-06 16:12:03 +00:00
|
|
|
matcher_state_drop(ctx, cse);
|
2022-07-28 18:46:32 +00:00
|
|
|
goto again; // We also need to advance the previous stack entry,
|
|
|
|
|
// like we would do when doing a MATCHER_LEAVE_LIST
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
matcher_enter_list(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
|
|
|
{
|
2022-11-06 16:12:03 +00:00
|
|
|
struct matcher_state *state = matcher_cur_state(ctx, cse);
|
2022-07-28 18:46:32 +00:00
|
|
|
struct apfl_value cur;
|
2022-11-06 16:12:03 +00:00
|
|
|
if (!matcher_current_val_in_state(ctx, state, &cur)) {
|
2022-07-28 18:46:32 +00:00
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
if (cur.type != VALUE_LIST) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t len = cur.list->len;
|
|
|
|
|
|
|
|
|
|
apfl_stack_must_push(ctx, cur);
|
|
|
|
|
|
2022-11-06 16:12:03 +00:00
|
|
|
matcher_state_push(ctx, cse, (struct matcher_state) {
|
2022-07-28 18:46:32 +00:00
|
|
|
.mode = MATCHER_MODE_LIST_START,
|
|
|
|
|
.lower = 0,
|
|
|
|
|
.upper = len,
|
|
|
|
|
});
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
matcher_continue_from_end(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
|
|
|
{
|
2022-11-06 16:12:03 +00:00
|
|
|
struct matcher_state *state = matcher_cur_state(ctx, cse);
|
|
|
|
|
if (state->mode != MATCHER_MODE_LIST_START) {
|
2022-07-28 18:46:32 +00:00
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
2022-11-06 16:12:03 +00:00
|
|
|
state->mode = MATCHER_MODE_LIST_END;
|
2022-07-28 18:46:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
matcher_remainding(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
|
|
|
{
|
2022-11-06 16:12:03 +00:00
|
|
|
struct matcher_state *state = matcher_cur_state(ctx, cse);
|
2022-07-28 18:46:32 +00:00
|
|
|
struct apfl_value cur;
|
|
|
|
|
|
|
|
|
|
if (!apfl_stack_get(ctx, &cur, -1)) {
|
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (
|
2022-11-06 16:12:03 +00:00
|
|
|
(state->mode != MATCHER_MODE_LIST_START && state->mode != MATCHER_MODE_LIST_END)
|
2022-07-28 18:46:32 +00:00
|
|
|
|| cur.type != VALUE_LIST
|
|
|
|
|
) {
|
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-06 16:12:03 +00:00
|
|
|
if (state->lower > state->upper) {
|
2022-07-28 18:46:32 +00:00
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct list_header *cur_list = cur.list;
|
2022-11-06 16:12:03 +00:00
|
|
|
assert(cur_list->len >= state->upper);
|
2022-07-28 18:46:32 +00:00
|
|
|
|
2022-11-06 16:12:03 +00:00
|
|
|
size_t len = state->upper - state->lower;
|
2022-07-28 18:46:32 +00:00
|
|
|
|
|
|
|
|
apfl_list_create(ctx, len);
|
|
|
|
|
struct apfl_value new_val = apfl_stack_must_get(ctx, -1);
|
|
|
|
|
assert(new_val.type == VALUE_LIST);
|
|
|
|
|
|
|
|
|
|
struct list_header *new_list = new_val.list;
|
|
|
|
|
assert(new_list->cap == len);
|
|
|
|
|
assert(new_list->len == 0);
|
2022-11-06 16:12:03 +00:00
|
|
|
for (size_t i = state->lower; i < state->upper; i++) {
|
2022-07-28 18:46:32 +00:00
|
|
|
new_list->items[new_list->len++] = cur_list->items[i];
|
|
|
|
|
}
|
|
|
|
|
assert(new_list->len == len);
|
|
|
|
|
|
|
|
|
|
if (!apfl_stack_drop(ctx, -2)) { // Drop the original list
|
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-06 16:12:03 +00:00
|
|
|
state->mode = MATCHER_MODE_LIST_REMAINING;
|
2022-07-28 18:46:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static bool
|
|
|
|
|
matcher_leave_list(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
|
|
|
{
|
2022-11-06 16:12:03 +00:00
|
|
|
struct matcher_state *state = matcher_cur_state(ctx, cse);
|
|
|
|
|
if (state->mode != MATCHER_MODE_LIST_START) {
|
2022-07-28 18:46:32 +00:00
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-06 16:12:03 +00:00
|
|
|
if (state->lower < state->upper) {
|
2022-07-28 18:46:32 +00:00
|
|
|
// List was not completely matched
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (!apfl_stack_drop(ctx, -1)) {
|
|
|
|
|
raise_invalid_matcher_state(ctx);
|
|
|
|
|
}
|
2022-11-06 16:12:03 +00:00
|
|
|
matcher_state_drop(ctx, cse);
|
2022-07-28 18:46:32 +00:00
|
|
|
return matcher_next(ctx, cse);
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
static void
|
|
|
|
|
matcher_transfer(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
|
|
|
{
|
|
|
|
|
for (size_t i = 0; i < cse->capture_count; i++) {
|
|
|
|
|
struct matcher_capture_transfer transfer = cse->transfers[i];
|
|
|
|
|
if (transfer.path_len == 0) {
|
|
|
|
|
variable_set_value(ctx, &cse->scopes, transfer.var, transfer.local, cse->captures[i]);
|
|
|
|
|
} else {
|
|
|
|
|
// Set the value at a key path in a (nested) dictionary.
|
|
|
|
|
|
|
|
|
|
variable_get(ctx, cse->scopes, transfer.var, false);
|
|
|
|
|
if (apfl_get_type(ctx, -1) != APFL_VALUE_DICT) {
|
2023-01-24 20:22:22 +00:00
|
|
|
apfl_raise_errorfmt(ctx, "Can not update value of type {stack:type}, expected dict", -1);
|
2022-11-19 21:06:23 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// Get or create intermediary dictionaries along the key path and leave a copy of the previous one on the
|
|
|
|
|
// stack, so we can set the result in reverse order there later.
|
|
|
|
|
for (size_t i = 0; i < transfer.path_len - 1; i++) {
|
|
|
|
|
apfl_copy(ctx, -1);
|
|
|
|
|
|
|
|
|
|
size_t value_index = transfer.path_start + i;
|
|
|
|
|
matcher_check_index(ctx, cse->matcher->value_count, value_index);
|
|
|
|
|
apfl_stack_must_push(ctx, apfl_value_set_cow_flag(cse->matcher->values[value_index]));
|
|
|
|
|
if (apfl_get_member_if_exists(ctx, -2, -1)) {
|
|
|
|
|
if (apfl_get_type(ctx, -1) != APFL_VALUE_DICT) {
|
2023-01-24 20:22:22 +00:00
|
|
|
apfl_raise_errorfmt(ctx, "Can not update value of type {stack:type}, expected dict", -1);
|
2022-11-19 21:06:23 +00:00
|
|
|
}
|
|
|
|
|
} else {
|
|
|
|
|
apfl_dict_create(ctx);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Set the value to the rightmost dictionary key
|
|
|
|
|
size_t value_index = transfer.path_start + transfer.path_len - 1;
|
|
|
|
|
matcher_check_index(ctx, cse->matcher->value_count, value_index);
|
|
|
|
|
apfl_stack_must_push(ctx, apfl_value_set_cow_flag(cse->matcher->values[value_index]));
|
|
|
|
|
apfl_stack_must_push(ctx, cse->captures[i]);
|
|
|
|
|
apfl_dict_set(ctx, -3, -2, -1);
|
|
|
|
|
|
|
|
|
|
// Go through the key path (minus the rightmost key) in reverse order and set the value in the intermediary
|
|
|
|
|
// dictionaries. Note that i has a offset of one here so we can use the i > 0 check in the loop (>= 0 would
|
|
|
|
|
// not work as size_t is unsigned).
|
|
|
|
|
for (size_t i = transfer.path_len - 1; i > 0; i--) {
|
|
|
|
|
size_t value_index = transfer.path_start + i - 1;
|
|
|
|
|
matcher_check_index(ctx, cse->matcher->value_count, value_index);
|
|
|
|
|
apfl_stack_must_push(ctx, apfl_value_set_cow_flag(cse->matcher->values[value_index]));
|
|
|
|
|
|
|
|
|
|
apfl_dict_set(ctx, -3, -1, -2);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// Finally set the copied and modified dictionary to the variable again.
|
|
|
|
|
variable_set(ctx, &cse->scopes, transfer.var, false, false);
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-28 18:46:32 +00:00
|
|
|
static void
|
|
|
|
|
return_from_matcher(apfl_ctx ctx, bool result)
|
|
|
|
|
{
|
|
|
|
|
struct call_stack_entry *cse = apfl_call_stack_cur_entry(ctx);
|
|
|
|
|
assert(cse != NULL);
|
2023-01-26 20:22:34 +00:00
|
|
|
assert(cse->type == APFL_CSE_MATCHER);
|
2022-07-28 18:46:32 +00:00
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
if (result) {
|
|
|
|
|
matcher_transfer(ctx, &cse->matcher);
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-28 18:46:32 +00:00
|
|
|
call_stack_drop(ctx);
|
|
|
|
|
|
|
|
|
|
cse = apfl_call_stack_cur_entry(ctx);
|
|
|
|
|
assert(cse != NULL);
|
2022-11-19 21:06:23 +00:00
|
|
|
|
|
|
|
|
switch (cse->type) {
|
2023-01-26 20:22:34 +00:00
|
|
|
case APFL_CSE_FUNCTION:
|
2022-11-19 21:06:23 +00:00
|
|
|
cse->func.returning_from_matcher = true;
|
|
|
|
|
cse->func.matcher_result = result;
|
|
|
|
|
break;
|
2023-01-26 20:22:34 +00:00
|
|
|
case APFL_CSE_FUNCTION_DISPATCH:
|
2022-11-19 21:06:23 +00:00
|
|
|
cse->func_dispatch.returning_from_matcher = true;
|
|
|
|
|
cse->func_dispatch.matcher_result = result;
|
|
|
|
|
break;
|
|
|
|
|
default:
|
|
|
|
|
assert(false /* Invalid stack entry below matcher stack */);
|
|
|
|
|
}
|
2022-07-28 18:46:32 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
#define RETURN_WITHOUT_MATCH(ctx) \
|
|
|
|
|
do { \
|
|
|
|
|
return_from_matcher((ctx), false); \
|
|
|
|
|
return; \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
|
|
|
|
#define RETURN_WITHOUT_MATCH_ON_FALSE(ctx, x) \
|
|
|
|
|
do { \
|
|
|
|
|
if (!(x)) { \
|
|
|
|
|
RETURN_WITHOUT_MATCH(ctx); \
|
|
|
|
|
} \
|
|
|
|
|
} while (0)
|
|
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
static bool
|
|
|
|
|
matcher_evaluate_capturing_instruction(
|
|
|
|
|
apfl_ctx ctx,
|
|
|
|
|
struct matcher_call_stack_entry *cse,
|
|
|
|
|
bool local,
|
|
|
|
|
bool with_path
|
|
|
|
|
) {
|
|
|
|
|
size_t *pc = &cse->pc;
|
|
|
|
|
struct matcher *matcher = cse->matcher;
|
|
|
|
|
struct matcher_instruction_list *milist = matcher->instructions;
|
|
|
|
|
|
|
|
|
|
struct matcher_capture_transfer transfer = {
|
|
|
|
|
.var = NULL,
|
|
|
|
|
.path_start = 0,
|
|
|
|
|
.path_len = 0,
|
|
|
|
|
.local = local,
|
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
union matcher_instruction_or_arg arg;
|
|
|
|
|
|
|
|
|
|
must_get_matcher_argument(ctx, pc, milist, &arg);
|
|
|
|
|
transfer.var = arg.string;
|
|
|
|
|
|
|
|
|
|
if (with_path) {
|
|
|
|
|
must_get_matcher_argument(ctx, pc, milist, &arg);
|
|
|
|
|
transfer.path_start = arg.index;
|
|
|
|
|
must_get_matcher_argument(ctx, pc, milist, &arg);
|
|
|
|
|
transfer.path_len = arg.index;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct apfl_value cur;
|
|
|
|
|
|
|
|
|
|
if (!matcher_current_val(ctx, cse, &cur)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t capture = cse->capture_index++;
|
|
|
|
|
matcher_check_index(ctx, milist->capture_count, capture);
|
|
|
|
|
|
|
|
|
|
cse->captures[capture] = apfl_value_set_cow_flag(cur);
|
|
|
|
|
cse->transfers[capture] = transfer;
|
|
|
|
|
|
|
|
|
|
return matcher_next(ctx, cse);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
2022-07-28 18:46:32 +00:00
|
|
|
static void
|
|
|
|
|
evaluate_matcher(apfl_ctx ctx, struct matcher_call_stack_entry *cse)
|
|
|
|
|
{
|
|
|
|
|
union matcher_instruction_or_arg arg;
|
|
|
|
|
|
2022-11-19 22:20:22 +00:00
|
|
|
if (cse->from_predicate) {
|
|
|
|
|
cse->from_predicate = false;
|
|
|
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(ctx, apfl_is_truthy(ctx, -1));
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-28 18:46:32 +00:00
|
|
|
size_t *pc = &cse->pc;
|
|
|
|
|
struct matcher *matcher = cse->matcher;
|
|
|
|
|
struct matcher_instruction_list *milist = matcher->instructions;
|
|
|
|
|
|
|
|
|
|
while (*pc < milist->len) {
|
|
|
|
|
struct apfl_value cur;
|
|
|
|
|
|
|
|
|
|
switch (milist->instructions[(*pc)++].instruction) {
|
2022-11-19 21:06:23 +00:00
|
|
|
case MATCHER_CAPTURE_TO_VAR:
|
|
|
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(
|
|
|
|
|
ctx,
|
|
|
|
|
matcher_evaluate_capturing_instruction(ctx, cse, false, false)
|
|
|
|
|
);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case MATCHER_CAPTURE_TO_VAR_LOCAL:
|
|
|
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(
|
|
|
|
|
ctx,
|
|
|
|
|
matcher_evaluate_capturing_instruction(ctx, cse, true, false)
|
|
|
|
|
);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case MATCHER_CAPTURE_TO_VAR_WITH_PATH:
|
|
|
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(
|
|
|
|
|
ctx,
|
|
|
|
|
matcher_evaluate_capturing_instruction(ctx, cse, false, true)
|
|
|
|
|
);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case MATCHER_CAPTURE_TO_VAR_LOCAL_WITH_PATH:
|
|
|
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(
|
|
|
|
|
ctx,
|
|
|
|
|
matcher_evaluate_capturing_instruction(ctx, cse, true, true)
|
|
|
|
|
);
|
2022-07-28 18:46:32 +00:00
|
|
|
goto continue_loop;
|
|
|
|
|
case MATCHER_IGNORE:
|
|
|
|
|
if (!matcher_current_val(ctx, cse, &cur)) {
|
|
|
|
|
RETURN_WITHOUT_MATCH(ctx);
|
|
|
|
|
}
|
|
|
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(ctx, matcher_next(ctx, cse));
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case MATCHER_CHECK_CONST:
|
|
|
|
|
if (!matcher_current_val(ctx, cse, &cur)) {
|
|
|
|
|
RETURN_WITHOUT_MATCH(ctx);
|
|
|
|
|
}
|
|
|
|
|
must_get_matcher_argument(ctx, pc, milist, &arg);
|
|
|
|
|
matcher_check_index(ctx, milist->value_count, arg.index);
|
|
|
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(ctx, apfl_value_eq(matcher->values[arg.index], cur));
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case MATCHER_CHECK_PRED:
|
|
|
|
|
if (!matcher_current_val(ctx, cse, &cur)) {
|
|
|
|
|
RETURN_WITHOUT_MATCH(ctx);
|
|
|
|
|
}
|
|
|
|
|
must_get_matcher_argument(ctx, pc, milist, &arg);
|
|
|
|
|
matcher_check_index(ctx, milist->value_count, arg.index);
|
2022-11-19 22:20:22 +00:00
|
|
|
apfl_stack_must_push(ctx, apfl_value_set_cow_flag(matcher->values[arg.index]));
|
|
|
|
|
apfl_list_create(ctx, 1);
|
|
|
|
|
apfl_stack_must_push(ctx, apfl_value_set_cow_flag(cur));
|
|
|
|
|
apfl_list_append(ctx, -2, -1);
|
|
|
|
|
|
|
|
|
|
cse->from_predicate = true;
|
|
|
|
|
call(ctx, -2, -1, true);
|
|
|
|
|
|
|
|
|
|
// By returning from this function, the newly pushed call stack entry (if any) will get picked up by
|
|
|
|
|
// evaluate_until_call_stack_return. In case no new CSE was pushed (when a cfunc was called), we'll the
|
|
|
|
|
// simply continue with the current call stack.
|
|
|
|
|
return;
|
2022-07-28 18:46:32 +00:00
|
|
|
case MATCHER_ENTER_LIST:
|
|
|
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(ctx, matcher_enter_list(ctx, cse));
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case MATCHER_LEAVE_LIST:
|
|
|
|
|
RETURN_WITHOUT_MATCH_ON_FALSE(ctx, matcher_leave_list(ctx, cse));
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case MATCHER_CONTINUE_FROM_END:
|
|
|
|
|
matcher_continue_from_end(ctx, cse);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
case MATCHER_REMAINDING:
|
|
|
|
|
matcher_remainding(ctx, cse);
|
|
|
|
|
goto continue_loop;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(false);
|
|
|
|
|
continue_loop:;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return_from_matcher(
|
|
|
|
|
ctx,
|
2022-11-06 16:12:03 +00:00
|
|
|
// We've successfully matched everything, if there's only one stack element left and we're in stop state
|
|
|
|
|
cse->matcher_state_stack_len == 1 && cse->matcher_state_stack[0].mode == MATCHER_MODE_STOP
|
2022-07-28 18:46:32 +00:00
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
struct matcher_stack
|
|
|
|
|
matcher_stack_new(void)
|
|
|
|
|
{
|
|
|
|
|
return (struct matcher_stack) {
|
|
|
|
|
.items = NULL,
|
|
|
|
|
.len = 0,
|
|
|
|
|
.cap = 0,
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
2023-02-25 22:19:45 +00:00
|
|
|
static void
|
|
|
|
|
dispatch_accept(struct call_stack_entry *cse)
|
|
|
|
|
{
|
|
|
|
|
assert(cse->type == APFL_CSE_FUNCTION_DISPATCH);
|
|
|
|
|
struct func_dispatch_call_stack_entry *fd_cse = &cse->func_dispatch;
|
|
|
|
|
|
|
|
|
|
struct function *function = fd_cse->function;
|
|
|
|
|
|
|
|
|
|
struct subfunction *subfunction = &function->subfunctions[fd_cse->subfunc];
|
|
|
|
|
|
|
|
|
|
// Replace the current CSE with a function CSE
|
|
|
|
|
cse->type = APFL_CSE_FUNCTION;
|
|
|
|
|
cse->stack.len = 0;
|
|
|
|
|
cse->func = (struct func_call_stack_entry) {
|
|
|
|
|
.pc = 0,
|
|
|
|
|
.instructions = subfunction->body,
|
|
|
|
|
.scopes = fd_cse->scopes,
|
|
|
|
|
.execution_line = subfunction->body->line,
|
|
|
|
|
.matcher_stack = matcher_stack_new(),
|
|
|
|
|
.returning_from_matcher = false,
|
|
|
|
|
.matcher_result = false,
|
|
|
|
|
.function = function,
|
|
|
|
|
.subfunction_index = fd_cse->subfunc,
|
|
|
|
|
};
|
|
|
|
|
}
|
|
|
|
|
|
2022-08-12 22:50:26 +00:00
|
|
|
static void
|
|
|
|
|
dispatch(apfl_ctx ctx, struct call_stack_entry *cse)
|
|
|
|
|
{
|
2023-01-26 20:22:34 +00:00
|
|
|
assert(cse->type == APFL_CSE_FUNCTION_DISPATCH);
|
2022-08-12 22:50:26 +00:00
|
|
|
struct func_dispatch_call_stack_entry *fd_cse = &cse->func_dispatch;
|
|
|
|
|
|
|
|
|
|
struct function *function = fd_cse->function;
|
|
|
|
|
|
|
|
|
|
if (fd_cse->returning_from_matcher) {
|
2022-11-19 21:06:23 +00:00
|
|
|
if (fd_cse->matcher_result) {
|
2023-02-25 22:19:45 +00:00
|
|
|
dispatch_accept(cse);
|
2022-08-12 22:50:26 +00:00
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
fd_cse->subfunc++;
|
|
|
|
|
fd_cse->returning_from_matcher = false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (fd_cse->subfunc >= function->subfunctions_len) {
|
2022-12-09 20:22:50 +00:00
|
|
|
apfl_raise_const_error(ctx, apfl_messages.no_matching_subfunction);
|
2022-08-12 22:50:26 +00:00
|
|
|
}
|
|
|
|
|
|
2023-02-25 22:19:45 +00:00
|
|
|
struct matcher *matcher = function->subfunctions[fd_cse->subfunc].matcher;
|
|
|
|
|
|
|
|
|
|
if (matcher == NULL) {
|
|
|
|
|
dispatch_accept(cse);
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
2022-11-19 21:06:23 +00:00
|
|
|
// matcher_init_matching consumes the value on the top of the stack, we need
|
|
|
|
|
// to copy the value for further subfunctions.
|
|
|
|
|
apfl_copy(ctx, -1);
|
|
|
|
|
matcher_init_matching(
|
|
|
|
|
ctx,
|
|
|
|
|
function->subfunctions[fd_cse->subfunc].matcher,
|
|
|
|
|
fd_cse->scopes
|
|
|
|
|
);
|
2022-08-12 22:50:26 +00:00
|
|
|
}
|
|
|
|
|
|
2022-12-08 20:35:41 +00:00
|
|
|
enum interative_runner_state {
|
|
|
|
|
IRUNNER_OK,
|
|
|
|
|
IRUNNER_EOF,
|
|
|
|
|
IRUNNER_ERR,
|
|
|
|
|
};
|
|
|
|
|
|
2022-07-11 19:41:05 +00:00
|
|
|
struct apfl_iterative_runner_data {
|
|
|
|
|
apfl_ctx ctx;
|
|
|
|
|
apfl_tokenizer_ptr tokenizer;
|
|
|
|
|
apfl_parser_ptr parser;
|
|
|
|
|
enum apfl_result result;
|
2022-12-08 20:35:41 +00:00
|
|
|
enum interative_runner_state state;
|
2022-07-11 19:41:05 +00:00
|
|
|
struct scope *scope;
|
|
|
|
|
};
|
|
|
|
|
|
2022-06-24 21:13:44 +00:00
|
|
|
static void
|
2022-07-11 19:41:05 +00:00
|
|
|
iterative_runner_eval_expr_inner(apfl_iterative_runner runner, struct apfl_expr expr)
|
2022-01-06 21:53:26 +00:00
|
|
|
{
|
2022-07-11 19:41:05 +00:00
|
|
|
apfl_ctx ctx = runner->ctx;
|
|
|
|
|
|
2023-01-30 21:50:01 +00:00
|
|
|
struct instruction_list *ilist = apfl_instructions_new(&ctx->gc, expr.position.line, NULL);
|
Implement mark&sweep garbage collection and bytecode compilation
Instead of the previous refcount base garbage collection, we're now using
a basic tri-color mark&sweep collector. This is done to support cyclical
value relationships in the future (functions can form cycles, all values
implemented up to this point can not).
The collector maintains a set of roots and a set of objects (grouped into
blocks). The GC enabled objects are no longer allocated manually, but will
be allocated by the GC. The GC also wraps an allocator, this way the GC
knows, if we ran out of memory and will try to get out of this situation by
performing a full collection cycle.
The tri-color abstraction was chosen for two reasons:
- We don't have to maintain a list of objects that need to be marked, we
can simply grab the next grey one.
- It should allow us to later implement incremental collection (right now
we only do a stop-the-world collection).
This also switches to a bytecode based evaluation of the code: We no longer
directly evaluate the AST, but first compile it into a series of
instructions, that are evaluated in a separate step. This was done in
preparation for inplementing functions: We only need to turn a function
body into instructions instead of evaluating the node again with each call
of the function. Also, since an instruction list is implemented as a GC
object, this then removes manual memory management of the function body and
it's child nodes. Since the GC and the bytecode go hand in hand, this was
done in one (giant) commit.
As a downside, we've now lost the ability do do list matching on
assignments. I've already started to work on implementing this in the new
architecture, but left it out of this commit, as it's already quite a large
commit :)
2022-04-11 20:24:22 +00:00
|
|
|
if (ilist == NULL) {
|
2022-06-24 21:13:44 +00:00
|
|
|
apfl_raise_alloc_error(ctx);
|
2022-01-14 22:16:19 +00:00
|
|
|
}
|
|
|
|
|
|
Implement mark&sweep garbage collection and bytecode compilation
Instead of the previous refcount base garbage collection, we're now using
a basic tri-color mark&sweep collector. This is done to support cyclical
value relationships in the future (functions can form cycles, all values
implemented up to this point can not).
The collector maintains a set of roots and a set of objects (grouped into
blocks). The GC enabled objects are no longer allocated manually, but will
be allocated by the GC. The GC also wraps an allocator, this way the GC
knows, if we ran out of memory and will try to get out of this situation by
performing a full collection cycle.
The tri-color abstraction was chosen for two reasons:
- We don't have to maintain a list of objects that need to be marked, we
can simply grab the next grey one.
- It should allow us to later implement incremental collection (right now
we only do a stop-the-world collection).
This also switches to a bytecode based evaluation of the code: We no longer
directly evaluate the AST, but first compile it into a series of
instructions, that are evaluated in a separate step. This was done in
preparation for inplementing functions: We only need to turn a function
body into instructions instead of evaluating the node again with each call
of the function. Also, since an instruction list is implemented as a GC
object, this then removes manual memory management of the function body and
it's child nodes. Since the GC and the bytecode go hand in hand, this was
done in one (giant) commit.
As a downside, we've now lost the ability do do list matching on
assignments. I've already started to work on implementing this in the new
architecture, but left it out of this commit, as it's already quite a large
commit :)
2022-04-11 20:24:22 +00:00
|
|
|
if (!apfl_gc_tmproot_add(&ctx->gc, GC_OBJECT_FROM(ilist, GC_TYPE_INSTRUCTIONS))) {
|
2022-06-24 21:13:44 +00:00
|
|
|
apfl_raise_alloc_error(ctx);
|
2022-01-14 22:16:19 +00:00
|
|
|
}
|
|
|
|
|
|
Implement mark&sweep garbage collection and bytecode compilation
Instead of the previous refcount base garbage collection, we're now using
a basic tri-color mark&sweep collector. This is done to support cyclical
value relationships in the future (functions can form cycles, all values
implemented up to this point can not).
The collector maintains a set of roots and a set of objects (grouped into
blocks). The GC enabled objects are no longer allocated manually, but will
be allocated by the GC. The GC also wraps an allocator, this way the GC
knows, if we ran out of memory and will try to get out of this situation by
performing a full collection cycle.
The tri-color abstraction was chosen for two reasons:
- We don't have to maintain a list of objects that need to be marked, we
can simply grab the next grey one.
- It should allow us to later implement incremental collection (right now
we only do a stop-the-world collection).
This also switches to a bytecode based evaluation of the code: We no longer
directly evaluate the AST, but first compile it into a series of
instructions, that are evaluated in a separate step. This was done in
preparation for inplementing functions: We only need to turn a function
body into instructions instead of evaluating the node again with each call
of the function. Also, since an instruction list is implemented as a GC
object, this then removes manual memory management of the function body and
it's child nodes. Since the GC and the bytecode go hand in hand, this was
done in one (giant) commit.
As a downside, we've now lost the ability do do list matching on
assignments. I've already started to work on implementing this in the new
architecture, but left it out of this commit, as it's already quite a large
commit :)
2022-04-11 20:24:22 +00:00
|
|
|
struct apfl_error error;
|
|
|
|
|
if (!apfl_compile(&ctx->gc, expr, &error, ilist)) {
|
2022-06-24 21:13:44 +00:00
|
|
|
apfl_raise_error_object(ctx, error);
|
2022-01-06 21:53:26 +00:00
|
|
|
}
|
|
|
|
|
|
2022-07-11 19:41:05 +00:00
|
|
|
call_stack_push(ctx, (struct call_stack_entry) {
|
2023-01-26 20:22:34 +00:00
|
|
|
.type = APFL_CSE_FUNCTION,
|
2022-07-11 19:41:05 +00:00
|
|
|
.stack = apfl_stack_new(),
|
2023-01-26 20:22:34 +00:00
|
|
|
.func = (struct func_call_stack_entry) {
|
2022-07-11 19:41:05 +00:00
|
|
|
.pc = 0,
|
|
|
|
|
.instructions = ilist,
|
2022-11-19 21:06:23 +00:00
|
|
|
.scopes = {
|
|
|
|
|
.local = runner->scope,
|
|
|
|
|
.closure = NULL,
|
|
|
|
|
},
|
2022-07-11 19:41:05 +00:00
|
|
|
.execution_line = ilist->line,
|
2022-11-19 21:06:23 +00:00
|
|
|
.matcher_stack = matcher_stack_new(),
|
2022-07-28 18:46:32 +00:00
|
|
|
.returning_from_matcher = false,
|
2022-07-11 19:41:05 +00:00
|
|
|
},
|
|
|
|
|
});
|
|
|
|
|
evaluate_until_call_stack_return(ctx);
|
2022-01-02 16:19:54 +00:00
|
|
|
}
|
|
|
|
|
|
2022-06-24 21:13:44 +00:00
|
|
|
static void
|
2022-07-11 19:41:05 +00:00
|
|
|
iterative_runner_eval_expr(apfl_iterative_runner runner, struct apfl_expr expr)
|
2022-01-02 16:19:54 +00:00
|
|
|
{
|
2022-07-11 19:41:05 +00:00
|
|
|
apfl_ctx ctx = runner->ctx;
|
Implement mark&sweep garbage collection and bytecode compilation
Instead of the previous refcount base garbage collection, we're now using
a basic tri-color mark&sweep collector. This is done to support cyclical
value relationships in the future (functions can form cycles, all values
implemented up to this point can not).
The collector maintains a set of roots and a set of objects (grouped into
blocks). The GC enabled objects are no longer allocated manually, but will
be allocated by the GC. The GC also wraps an allocator, this way the GC
knows, if we ran out of memory and will try to get out of this situation by
performing a full collection cycle.
The tri-color abstraction was chosen for two reasons:
- We don't have to maintain a list of objects that need to be marked, we
can simply grab the next grey one.
- It should allow us to later implement incremental collection (right now
we only do a stop-the-world collection).
This also switches to a bytecode based evaluation of the code: We no longer
directly evaluate the AST, but first compile it into a series of
instructions, that are evaluated in a separate step. This was done in
preparation for inplementing functions: We only need to turn a function
body into instructions instead of evaluating the node again with each call
of the function. Also, since an instruction list is implemented as a GC
object, this then removes manual memory management of the function body and
it's child nodes. Since the GC and the bytecode go hand in hand, this was
done in one (giant) commit.
As a downside, we've now lost the ability do do list matching on
assignments. I've already started to work on implementing this in the new
architecture, but left it out of this commit, as it's already quite a large
commit :)
2022-04-11 20:24:22 +00:00
|
|
|
size_t tmproots = apfl_gc_tmproots_begin(&ctx->gc);
|
2022-07-11 19:41:05 +00:00
|
|
|
iterative_runner_eval_expr_inner(runner, expr);
|
Implement mark&sweep garbage collection and bytecode compilation
Instead of the previous refcount base garbage collection, we're now using
a basic tri-color mark&sweep collector. This is done to support cyclical
value relationships in the future (functions can form cycles, all values
implemented up to this point can not).
The collector maintains a set of roots and a set of objects (grouped into
blocks). The GC enabled objects are no longer allocated manually, but will
be allocated by the GC. The GC also wraps an allocator, this way the GC
knows, if we ran out of memory and will try to get out of this situation by
performing a full collection cycle.
The tri-color abstraction was chosen for two reasons:
- We don't have to maintain a list of objects that need to be marked, we
can simply grab the next grey one.
- It should allow us to later implement incremental collection (right now
we only do a stop-the-world collection).
This also switches to a bytecode based evaluation of the code: We no longer
directly evaluate the AST, but first compile it into a series of
instructions, that are evaluated in a separate step. This was done in
preparation for inplementing functions: We only need to turn a function
body into instructions instead of evaluating the node again with each call
of the function. Also, since an instruction list is implemented as a GC
object, this then removes manual memory management of the function body and
it's child nodes. Since the GC and the bytecode go hand in hand, this was
done in one (giant) commit.
As a downside, we've now lost the ability do do list matching on
assignments. I've already started to work on implementing this in the new
architecture, but left it out of this commit, as it's already quite a large
commit :)
2022-04-11 20:24:22 +00:00
|
|
|
apfl_gc_tmproots_restore(&ctx->gc, tmproots);
|
2022-01-02 16:19:54 +00:00
|
|
|
}
|
2022-01-20 21:45:09 +00:00
|
|
|
|
2022-04-22 21:13:01 +00:00
|
|
|
bool
|
2023-02-10 20:38:54 +00:00
|
|
|
apfl_debug_print_val(apfl_ctx ctx, apfl_stackidx index, struct apfl_io_writer w)
|
2022-01-20 21:45:09 +00:00
|
|
|
{
|
|
|
|
|
struct apfl_value value;
|
Implement mark&sweep garbage collection and bytecode compilation
Instead of the previous refcount base garbage collection, we're now using
a basic tri-color mark&sweep collector. This is done to support cyclical
value relationships in the future (functions can form cycles, all values
implemented up to this point can not).
The collector maintains a set of roots and a set of objects (grouped into
blocks). The GC enabled objects are no longer allocated manually, but will
be allocated by the GC. The GC also wraps an allocator, this way the GC
knows, if we ran out of memory and will try to get out of this situation by
performing a full collection cycle.
The tri-color abstraction was chosen for two reasons:
- We don't have to maintain a list of objects that need to be marked, we
can simply grab the next grey one.
- It should allow us to later implement incremental collection (right now
we only do a stop-the-world collection).
This also switches to a bytecode based evaluation of the code: We no longer
directly evaluate the AST, but first compile it into a series of
instructions, that are evaluated in a separate step. This was done in
preparation for inplementing functions: We only need to turn a function
body into instructions instead of evaluating the node again with each call
of the function. Also, since an instruction list is implemented as a GC
object, this then removes manual memory management of the function body and
it's child nodes. Since the GC and the bytecode go hand in hand, this was
done in one (giant) commit.
As a downside, we've now lost the ability do do list matching on
assignments. I've already started to work on implementing this in the new
architecture, but left it out of this commit, as it's already quite a large
commit :)
2022-04-11 20:24:22 +00:00
|
|
|
if (!apfl_stack_pop(ctx, &value, index)) {
|
2023-02-10 20:38:54 +00:00
|
|
|
FMT_TRY(apfl_io_write_string(w, "apfl_debug_print_val: Invalid stack index "));
|
2022-06-05 20:06:33 +00:00
|
|
|
FMT_TRY(apfl_format_put_int(w, (int)index));
|
2023-02-10 20:38:54 +00:00
|
|
|
FMT_TRY(apfl_io_write_string(w, "\n"));
|
2022-04-22 21:13:01 +00:00
|
|
|
return true;
|
2022-01-20 21:45:09 +00:00
|
|
|
}
|
|
|
|
|
|
2022-06-05 20:06:33 +00:00
|
|
|
return apfl_value_print(value, w);
|
2022-01-20 21:45:09 +00:00
|
|
|
}
|
2022-04-21 19:15:20 +00:00
|
|
|
|
|
|
|
|
apfl_iterative_runner
|
|
|
|
|
apfl_iterative_runner_new(apfl_ctx ctx, struct apfl_source_reader reader)
|
|
|
|
|
{
|
2022-07-11 19:41:05 +00:00
|
|
|
apfl_iterative_runner runner = NULL;
|
|
|
|
|
apfl_tokenizer_ptr tokenizer = NULL;
|
|
|
|
|
apfl_parser_ptr parser = NULL;
|
|
|
|
|
|
|
|
|
|
runner = ALLOC_OBJ(ctx->gc.allocator, struct apfl_iterative_runner_data);
|
2022-04-21 19:15:20 +00:00
|
|
|
if (runner == NULL) {
|
|
|
|
|
return NULL;
|
|
|
|
|
}
|
|
|
|
|
|
2022-07-11 19:41:05 +00:00
|
|
|
tokenizer = apfl_tokenizer_new(ctx->gc.allocator, reader);
|
2022-04-21 19:15:20 +00:00
|
|
|
if (tokenizer == NULL) {
|
2022-07-11 19:41:05 +00:00
|
|
|
goto error;
|
2022-04-21 19:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
2022-07-11 19:41:05 +00:00
|
|
|
parser = apfl_parser_new(ctx->gc.allocator, apfl_tokenizer_as_token_source(tokenizer));
|
2022-04-21 19:15:20 +00:00
|
|
|
if (parser == NULL) {
|
2022-07-11 19:41:05 +00:00
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct scope *scope = apfl_scope_new(&ctx->gc);
|
|
|
|
|
if (scope == NULL) {
|
|
|
|
|
goto error;
|
2022-04-21 19:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*runner = (struct apfl_iterative_runner_data) {
|
|
|
|
|
.ctx = ctx,
|
|
|
|
|
.tokenizer = tokenizer,
|
|
|
|
|
.parser = parser,
|
|
|
|
|
.result = APFL_RESULT_OK,
|
2022-12-08 20:35:41 +00:00
|
|
|
.state = IRUNNER_OK,
|
2022-07-11 19:41:05 +00:00
|
|
|
.scope = scope,
|
2022-04-21 19:15:20 +00:00
|
|
|
};
|
|
|
|
|
|
2022-07-11 19:41:05 +00:00
|
|
|
if (!apfl_ctx_register_iterative_runner(ctx, runner)) {
|
|
|
|
|
goto error;
|
|
|
|
|
}
|
|
|
|
|
|
2022-04-21 19:15:20 +00:00
|
|
|
return runner;
|
2022-07-11 19:41:05 +00:00
|
|
|
|
|
|
|
|
error:
|
|
|
|
|
FREE_OBJ(ctx->gc.allocator, runner);
|
|
|
|
|
apfl_tokenizer_destroy(tokenizer);
|
|
|
|
|
apfl_parser_destroy(parser);
|
|
|
|
|
|
|
|
|
|
return NULL;
|
2022-04-21 19:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
2022-06-24 21:13:44 +00:00
|
|
|
static void
|
2022-11-20 20:42:46 +00:00
|
|
|
iterative_runner_next_protected(apfl_ctx ctx, void *opaque)
|
2022-04-21 19:15:20 +00:00
|
|
|
{
|
2022-11-20 20:42:46 +00:00
|
|
|
(void)ctx;
|
|
|
|
|
apfl_iterative_runner runner = opaque;
|
2022-04-21 19:15:20 +00:00
|
|
|
|
|
|
|
|
switch (apfl_parser_next(runner->parser)) {
|
|
|
|
|
case APFL_PARSE_OK:
|
2022-07-11 19:41:05 +00:00
|
|
|
iterative_runner_eval_expr(runner, apfl_parser_get_expr(runner->parser));
|
2022-06-24 21:13:44 +00:00
|
|
|
return;
|
2022-12-08 20:35:41 +00:00
|
|
|
case APFL_PARSE_ERROR: {
|
|
|
|
|
struct apfl_error err = apfl_parser_get_error(runner->parser);
|
|
|
|
|
if (err.type == APFL_ERR_INPUT_ERROR) {
|
|
|
|
|
runner->state = IRUNNER_ERR;
|
|
|
|
|
}
|
|
|
|
|
apfl_raise_error_object(runner->ctx, err);
|
2022-06-24 21:13:44 +00:00
|
|
|
return;
|
2022-12-08 20:35:41 +00:00
|
|
|
}
|
2022-04-21 19:15:20 +00:00
|
|
|
case APFL_PARSE_EOF:
|
2022-12-08 20:35:41 +00:00
|
|
|
runner->state = IRUNNER_EOF;
|
2022-06-24 21:13:44 +00:00
|
|
|
return;
|
2022-04-21 19:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(false);
|
2022-06-24 21:13:44 +00:00
|
|
|
}
|
2022-04-21 19:15:20 +00:00
|
|
|
|
2023-01-28 20:41:48 +00:00
|
|
|
#define DECORATE_TRY_FMT(ctx, x) do { if (!(x)) { apfl_raise_alloc_error(ctx); } } while (0)
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
iterative_runner_decorate_error(apfl_ctx ctx, void *opaque)
|
|
|
|
|
{
|
|
|
|
|
(void)opaque;
|
|
|
|
|
|
|
|
|
|
struct apfl_string_builder sb = apfl_string_builder_init(ctx->gc.allocator);
|
2023-02-10 20:38:54 +00:00
|
|
|
struct apfl_io_writer w = apfl_io_string_writer(&sb);
|
2023-01-28 20:41:48 +00:00
|
|
|
|
|
|
|
|
apfl_tostring(ctx, -1);
|
|
|
|
|
|
|
|
|
|
if (!(
|
2023-02-10 20:38:54 +00:00
|
|
|
apfl_io_write_string(w, apfl_get_string(ctx, -1))
|
|
|
|
|
&& apfl_io_write_string(w, "\n\nBacktrace:")
|
2023-01-28 20:41:48 +00:00
|
|
|
)) {
|
|
|
|
|
goto fail;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
size_t depth = apfl_call_stack_depth(ctx);
|
|
|
|
|
for (size_t i = 0; i < depth; i++) {
|
|
|
|
|
if (!(
|
2023-02-10 20:38:54 +00:00
|
|
|
apfl_io_write_string(w, "\n")
|
|
|
|
|
&& apfl_io_write_string(w, "#")
|
2023-01-28 20:41:48 +00:00
|
|
|
&& apfl_format_put_int(w, (int)i+1)
|
2023-02-10 20:38:54 +00:00
|
|
|
&& apfl_io_write_string(w, ": ")
|
2023-01-28 20:41:48 +00:00
|
|
|
&& apfl_call_stack_entry_info_format(
|
|
|
|
|
w,
|
|
|
|
|
apfl_call_stack_inspect(ctx, i)
|
|
|
|
|
)
|
|
|
|
|
)) {
|
|
|
|
|
goto fail;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
struct apfl_string string = apfl_string_builder_move_string(&sb);
|
|
|
|
|
apfl_string_builder_deinit(&sb);
|
|
|
|
|
|
|
|
|
|
if (!apfl_move_string_onto_stack(ctx, string)) {
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
|
|
|
|
}
|
|
|
|
|
return;
|
|
|
|
|
|
|
|
|
|
fail:
|
|
|
|
|
apfl_string_builder_deinit(&sb);
|
|
|
|
|
apfl_raise_alloc_error(ctx);
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-24 21:13:44 +00:00
|
|
|
bool
|
|
|
|
|
apfl_iterative_runner_next(apfl_iterative_runner runner)
|
|
|
|
|
{
|
2022-12-08 20:35:41 +00:00
|
|
|
if (runner->state != IRUNNER_OK) {
|
2022-06-24 21:13:44 +00:00
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
apfl_stack_clear(runner->ctx);
|
|
|
|
|
|
2023-01-28 20:41:48 +00:00
|
|
|
runner->result = apfl_do_protected(
|
|
|
|
|
runner->ctx,
|
|
|
|
|
iterative_runner_next_protected,
|
|
|
|
|
runner,
|
|
|
|
|
iterative_runner_decorate_error
|
|
|
|
|
);
|
2022-06-24 21:13:44 +00:00
|
|
|
|
2022-12-08 20:35:41 +00:00
|
|
|
return runner->state == IRUNNER_OK;
|
2022-04-21 19:15:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
enum apfl_result
|
|
|
|
|
apfl_iterative_runner_get_result(apfl_iterative_runner runner)
|
|
|
|
|
{
|
|
|
|
|
return runner->result;
|
|
|
|
|
}
|
|
|
|
|
|
2022-06-24 21:13:44 +00:00
|
|
|
bool
|
2023-01-28 20:41:48 +00:00
|
|
|
apfl_iterative_runner_stopped_because_of_error(apfl_iterative_runner runner)
|
2022-06-24 21:13:44 +00:00
|
|
|
{
|
2023-01-28 20:41:48 +00:00
|
|
|
return runner->state == IRUNNER_ERR;
|
2022-06-24 21:13:44 +00:00
|
|
|
}
|
|
|
|
|
|
2022-12-08 20:35:41 +00:00
|
|
|
bool
|
2023-01-28 20:41:48 +00:00
|
|
|
apfl_iterative_runner_run_repl(
|
|
|
|
|
apfl_iterative_runner runner,
|
2023-02-10 20:38:54 +00:00
|
|
|
struct apfl_io_writer w_out,
|
|
|
|
|
struct apfl_io_writer w_err
|
2023-01-28 20:41:48 +00:00
|
|
|
) {
|
|
|
|
|
apfl_ctx ctx = runner->ctx;
|
|
|
|
|
|
|
|
|
|
while (apfl_iterative_runner_next(runner)) {
|
|
|
|
|
switch (apfl_iterative_runner_get_result(runner)) {
|
|
|
|
|
case APFL_RESULT_OK :
|
|
|
|
|
if (apfl_get_type(ctx, -1) == APFL_VALUE_NIL) {
|
|
|
|
|
apfl_drop(ctx, -1);
|
|
|
|
|
} else {
|
|
|
|
|
FMT_TRY(apfl_debug_print_val(ctx, -1, w_out));
|
|
|
|
|
}
|
|
|
|
|
break;
|
|
|
|
|
case APFL_RESULT_ERR:
|
2023-02-10 20:38:54 +00:00
|
|
|
FMT_TRY(apfl_io_write_string(w_err, "Error occurred during evaluation:\n"));
|
2023-01-28 20:41:48 +00:00
|
|
|
if (apfl_get_type(ctx, -1) == APFL_VALUE_STRING) {
|
2023-02-10 20:38:54 +00:00
|
|
|
FMT_TRY(apfl_io_write_string(w_err, apfl_get_string(ctx, -1)));
|
2023-01-28 20:41:48 +00:00
|
|
|
} else {
|
|
|
|
|
FMT_TRY(apfl_debug_print_val(ctx, -1, w_err));
|
|
|
|
|
}
|
2023-02-10 20:38:54 +00:00
|
|
|
FMT_TRY(apfl_io_write_byte(w_err, '\n'));
|
2023-01-28 20:41:48 +00:00
|
|
|
break;
|
|
|
|
|
case APFL_RESULT_ERRERR:
|
2023-02-10 20:38:54 +00:00
|
|
|
FMT_TRY(apfl_io_write_string(w_err, "Error occurred during error handling.\n"));
|
2023-01-28 20:41:48 +00:00
|
|
|
break;
|
|
|
|
|
case APFL_RESULT_ERR_ALLOC:
|
2023-02-10 20:38:54 +00:00
|
|
|
FMT_TRY(apfl_io_write_string(w_err, "Fatal: Could not allocate memory.\n"));
|
2023-01-28 20:41:48 +00:00
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (apfl_iterative_runner_stopped_because_of_error(runner)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
return true;
|
2022-12-08 20:35:41 +00:00
|
|
|
}
|
|
|
|
|
|
2022-04-21 19:15:20 +00:00
|
|
|
void
|
|
|
|
|
apfl_iterative_runner_destroy(apfl_iterative_runner runner)
|
|
|
|
|
{
|
|
|
|
|
if (runner == NULL) {
|
|
|
|
|
return;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
apfl_parser_destroy(runner->parser);
|
|
|
|
|
apfl_tokenizer_destroy(runner->tokenizer);
|
2022-07-11 19:41:05 +00:00
|
|
|
|
|
|
|
|
apfl_ctx_unregister_iterative_runner(runner->ctx, runner);
|
2022-04-21 19:15:20 +00:00
|
|
|
FREE_OBJ(runner->ctx->gc.allocator, runner);
|
|
|
|
|
}
|
2022-07-11 19:41:05 +00:00
|
|
|
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
apfl_iterative_runner_visit_gc_objects(apfl_iterative_runner runner, gc_visitor visitor, void *opaque)
|
|
|
|
|
{
|
|
|
|
|
// TODO: It's a bit awkward that this function is defined here but the
|
|
|
|
|
// prototype lives in context.h... Maybe we should just merge context
|
|
|
|
|
// and eval together? The separation is rather arbitrary anyway :/
|
|
|
|
|
|
|
|
|
|
if (runner->scope != NULL) {
|
|
|
|
|
visitor(opaque, GC_OBJECT_FROM(runner->scope, GC_TYPE_SCOPE));
|
|
|
|
|
}
|
|
|
|
|
}
|