gc+context: Make stack no longer a gc object
The new roots callback mechanism makes it possible to traverse the gc objects on the stack without needing the stack to be an gc object. This also uncovered a nasty bug in apfl_stack_pop where we passed in a wrong mem pointer into apfl_resizable_splice. We should probably find a way to make the apfl_resizable_* functions a bit safer, the need for casting stuff to void** easily hides errors.
This commit is contained in:
parent
1a23c63ec6
commit
eb7cb0baf7
5 changed files with 49 additions and 72 deletions
100
src/context.c
100
src/context.c
|
|
@ -32,7 +32,7 @@ apfl_protected(apfl_ctx ctx, apfl_protected_callback cb, void *opaque, bool *wit
|
|||
{
|
||||
struct error_handler *prev_handler = ctx->error_handler;
|
||||
|
||||
size_t stack_len = ctx->stack->len;
|
||||
size_t stack_len = ctx->stack.len;
|
||||
size_t tmproots = apfl_gc_tmproots_begin(&ctx->gc);
|
||||
|
||||
struct error_handler handler;
|
||||
|
|
@ -55,8 +55,8 @@ apfl_protected(apfl_ctx ctx, apfl_protected_callback cb, void *opaque, bool *wit
|
|||
}
|
||||
|
||||
apfl_gc_tmproots_restore(&ctx->gc, tmproots);
|
||||
assert(stack_len <= ctx->stack->cap);
|
||||
ctx->stack->len = stack_len;
|
||||
assert(stack_len <= ctx->stack.cap);
|
||||
ctx->stack.len = stack_len;
|
||||
|
||||
if (*with_error_on_stack) {
|
||||
if (!apfl_stack_push(ctx, err)) {
|
||||
|
|
@ -145,27 +145,14 @@ apfl_ctx_set_panic_callback(apfl_ctx ctx, apfl_panic_callback cb, void *opaque)
|
|||
ctx->panic_callback_data = opaque;
|
||||
}
|
||||
|
||||
static struct stack *
|
||||
stack_new(struct gc *gc)
|
||||
static struct stack
|
||||
stack_new()
|
||||
{
|
||||
struct stack *stack = apfl_gc_new_stack(gc);
|
||||
if (stack == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
|
||||
*stack = (struct stack) {
|
||||
return (struct stack) {
|
||||
.items = NULL,
|
||||
.len = 0,
|
||||
.cap = 0,
|
||||
};
|
||||
|
||||
return stack;
|
||||
}
|
||||
|
||||
void
|
||||
apfl_stack_deinit(struct apfl_allocator allocator, struct stack *stack)
|
||||
{
|
||||
FREE_LIST(allocator, stack->items, stack->cap);
|
||||
}
|
||||
|
||||
void
|
||||
|
|
@ -182,9 +169,9 @@ apfl_stack_push(apfl_ctx ctx, struct apfl_value value)
|
|||
return apfl_resizable_append(
|
||||
ctx->gc.allocator,
|
||||
sizeof(struct apfl_value),
|
||||
(void **)&ctx->stack->items,
|
||||
&ctx->stack->len,
|
||||
&ctx->stack->cap,
|
||||
(void **)&ctx->stack.items,
|
||||
&ctx->stack.len,
|
||||
&ctx->stack.cap,
|
||||
&value,
|
||||
1
|
||||
);
|
||||
|
|
@ -194,15 +181,15 @@ bool
|
|||
apfl_stack_check_index(apfl_ctx ctx, apfl_stackidx *index)
|
||||
{
|
||||
if (*index < 0) {
|
||||
if ((size_t)-*index > ctx->stack->len) {
|
||||
if ((size_t)-*index > ctx->stack.len) {
|
||||
return false;
|
||||
}
|
||||
*index = ctx->stack->len + *index;
|
||||
} else if ((size_t)*index >= ctx->stack->len) {
|
||||
*index = ctx->stack.len + *index;
|
||||
} else if ((size_t)*index >= ctx->stack.len) {
|
||||
return false;
|
||||
}
|
||||
|
||||
assert(0 <= *index && (size_t)*index < ctx->stack->len);
|
||||
assert(0 <= *index && (size_t)*index < ctx->stack.len);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
@ -229,8 +216,8 @@ bool apfl_stack_drop_multi(apfl_ctx ctx, size_t count, apfl_stackidx *indices)
|
|||
// Will not fail, as we've already checked the indices
|
||||
assert(apfl_resizable_cut_without_resize(
|
||||
sizeof(struct apfl_value),
|
||||
(void **)&ctx->stack->items,
|
||||
&ctx->stack->len,
|
||||
(void **)&ctx->stack.items,
|
||||
&ctx->stack.len,
|
||||
indices[i],
|
||||
1
|
||||
));
|
||||
|
|
@ -248,14 +235,14 @@ apfl_stack_pop(apfl_ctx ctx, struct apfl_value *value, apfl_stackidx index)
|
|||
return false;
|
||||
}
|
||||
|
||||
*value = ctx->stack->items[index];
|
||||
*value = ctx->stack.items[index];
|
||||
|
||||
assert(apfl_resizable_splice(
|
||||
ctx->gc.allocator,
|
||||
sizeof(struct apfl_value),
|
||||
(void **)ctx->stack,
|
||||
&ctx->stack->len,
|
||||
&ctx->stack->cap,
|
||||
(void **)&ctx->stack.items,
|
||||
&ctx->stack.len,
|
||||
&ctx->stack.cap,
|
||||
index,
|
||||
1,
|
||||
NULL,
|
||||
|
|
@ -272,7 +259,7 @@ stack_get_pointer(apfl_ctx ctx, apfl_stackidx index)
|
|||
return NULL;
|
||||
}
|
||||
|
||||
return &ctx->stack->items[index];
|
||||
return &ctx->stack.items[index];
|
||||
}
|
||||
|
||||
static bool
|
||||
|
|
@ -282,7 +269,7 @@ stack_get_and_adjust_index(apfl_ctx ctx, struct apfl_value *value, apfl_stackidx
|
|||
return false;
|
||||
}
|
||||
|
||||
*value = ctx->stack->items[*index];
|
||||
*value = ctx->stack.items[*index];
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
@ -338,14 +325,14 @@ apfl_stack_move_to_top(apfl_ctx ctx, apfl_stackidx index)
|
|||
size_t absindex = (size_t)index;
|
||||
|
||||
// If we're here, index is an absolute address and is guaranteed to be < len
|
||||
assert(ctx->stack->len >= absindex+1);
|
||||
assert(ctx->stack.len >= absindex+1);
|
||||
|
||||
memmove(
|
||||
&ctx->stack->items[absindex + 1],
|
||||
&ctx->stack->items[absindex],
|
||||
ctx->stack->len - absindex - 1
|
||||
&ctx->stack.items[absindex + 1],
|
||||
&ctx->stack.items[absindex],
|
||||
ctx->stack.len - absindex - 1
|
||||
);
|
||||
ctx->stack->items[ctx->stack->len-1] = val;
|
||||
ctx->stack.items[ctx->stack.len-1] = val;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
@ -353,7 +340,18 @@ apfl_stack_move_to_top(apfl_ctx ctx, apfl_stackidx index)
|
|||
void
|
||||
apfl_stack_clear(apfl_ctx ctx)
|
||||
{
|
||||
ctx->stack->len = 0;
|
||||
ctx->stack.len = 0;
|
||||
}
|
||||
|
||||
static void
|
||||
stack_traverse(struct stack *stack, gc_visitor visitor, void *opaque)
|
||||
{
|
||||
for (size_t i = 0; i < stack->len; i++) {
|
||||
struct gc_object *object = apfl_value_get_gc_object(stack->items[i]);
|
||||
if (object != NULL) {
|
||||
visitor(opaque, object);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
|
|
@ -365,9 +363,7 @@ get_roots(void *own_opaque, gc_visitor visitor, void *visitor_opaque)
|
|||
visitor(visitor_opaque, GC_OBJECT_FROM(ctx->scope, GC_TYPE_SCOPE));
|
||||
}
|
||||
|
||||
if (ctx->stack != NULL) {
|
||||
visitor(visitor_opaque, GC_OBJECT_FROM(ctx->stack, GC_TYPE_STACK));
|
||||
}
|
||||
stack_traverse(&ctx->stack, visitor, visitor_opaque);
|
||||
}
|
||||
|
||||
apfl_ctx
|
||||
|
|
@ -384,9 +380,7 @@ apfl_ctx_new(struct apfl_allocator base_allocator)
|
|||
goto error;
|
||||
}
|
||||
|
||||
if ((ctx->stack = stack_new(&ctx->gc)) == NULL) {
|
||||
goto error;
|
||||
}
|
||||
ctx->stack = stack_new();
|
||||
|
||||
ctx->error_handler = NULL;
|
||||
ctx->panic_callback = NULL;
|
||||
|
|
@ -411,6 +405,9 @@ apfl_ctx_destroy(apfl_ctx ctx)
|
|||
apfl_gc_full(&ctx->gc);
|
||||
apfl_gc_deinit(&ctx->gc);
|
||||
|
||||
FREE_LIST(ctx->gc.allocator, ctx->stack.items, ctx->stack.cap);
|
||||
ctx->stack = stack_new();
|
||||
|
||||
FREE_OBJ(base_allocator, ctx);
|
||||
}
|
||||
|
||||
|
|
@ -662,14 +659,3 @@ apfl_get_member(
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
void
|
||||
apfl_gc_stack_traverse(struct stack *stack, gc_visitor visitor, void *opaque)
|
||||
{
|
||||
for (size_t i = 0; i < stack->len; i++) {
|
||||
struct gc_object *object = apfl_value_get_gc_object(stack->items[i]);
|
||||
if (object != NULL) {
|
||||
visitor(opaque, object);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ struct apfl_ctx_data {
|
|||
apfl_panic_callback panic_callback;
|
||||
void *panic_callback_data;
|
||||
struct scope *scope;
|
||||
struct stack *stack;
|
||||
struct stack stack;
|
||||
struct error_handler *error_handler;
|
||||
|
||||
int execution_line;
|
||||
|
|
@ -43,9 +43,6 @@ APFL_NORETURN void apfl_raise_alloc_error(apfl_ctx);
|
|||
APFL_NORETURN void apfl_raise_invalid_stackidx(apfl_ctx);
|
||||
APFL_NORETURN void apfl_raise_error_object(apfl_ctx, struct apfl_error);
|
||||
|
||||
void apfl_stack_deinit(struct apfl_allocator, struct stack *);
|
||||
void apfl_gc_stack_traverse(struct stack *, gc_visitor, void *);
|
||||
|
||||
bool apfl_stack_push(apfl_ctx, struct apfl_value);
|
||||
void apfl_stack_must_push(apfl_ctx ctx, struct apfl_value value);
|
||||
bool apfl_stack_check_index(apfl_ctx, apfl_stackidx *);
|
||||
|
|
|
|||
9
src/gc.c
9
src/gc.c
|
|
@ -157,7 +157,6 @@ IMPL_NEW(struct apfl_value, apfl_gc_new_var, GC_T
|
|||
IMPL_NEW(struct apfl_string, apfl_gc_new_string, GC_TYPE_STRING, string )
|
||||
IMPL_NEW(struct instruction_list, apfl_gc_new_instructions, GC_TYPE_INSTRUCTIONS, instructions )
|
||||
IMPL_NEW(struct scope, apfl_gc_new_scope, GC_TYPE_SCOPE, scope )
|
||||
IMPL_NEW(struct stack, apfl_gc_new_stack, GC_TYPE_STACK, stack )
|
||||
|
||||
size_t
|
||||
apfl_gc_tmproots_begin(struct gc *gc)
|
||||
|
|
@ -251,9 +250,6 @@ visit_children(struct gc_object *object, gc_visitor cb, void *opaque)
|
|||
case GC_TYPE_INSTRUCTIONS:
|
||||
apfl_gc_instructions_traverse(&object->instructions, cb, opaque);
|
||||
return;
|
||||
case GC_TYPE_STACK:
|
||||
apfl_gc_stack_traverse(&object->stack, cb, opaque);
|
||||
return;
|
||||
}
|
||||
|
||||
assert(false);
|
||||
|
|
@ -316,9 +312,6 @@ deinit_object(struct gc *gc, struct gc_object *object)
|
|||
case GC_TYPE_SCOPE:
|
||||
apfl_scope_deinit(gc->allocator, &object->scope);
|
||||
return;
|
||||
case GC_TYPE_STACK:
|
||||
apfl_stack_deinit(gc->allocator, &object->stack);
|
||||
return;
|
||||
}
|
||||
|
||||
assert(false);
|
||||
|
|
@ -412,8 +405,6 @@ type_to_string(enum gc_type type)
|
|||
return "instructions";
|
||||
case GC_TYPE_SCOPE:
|
||||
return "scope";
|
||||
case GC_TYPE_STACK:
|
||||
return "stack";
|
||||
}
|
||||
|
||||
assert(false);
|
||||
|
|
|
|||
2
src/gc.h
2
src/gc.h
|
|
@ -27,7 +27,6 @@ enum gc_type {
|
|||
GC_TYPE_STRING,
|
||||
GC_TYPE_INSTRUCTIONS,
|
||||
GC_TYPE_SCOPE,
|
||||
GC_TYPE_STACK,
|
||||
};
|
||||
|
||||
struct gc_tmproots {
|
||||
|
|
@ -77,7 +76,6 @@ struct apfl_value* apfl_gc_new_var(struct gc *);
|
|||
struct apfl_string* apfl_gc_new_string(struct gc *);
|
||||
struct instruction_list* apfl_gc_new_instructions(struct gc *);
|
||||
struct scope* apfl_gc_new_scope(struct gc *);
|
||||
struct stack* apfl_gc_new_stack(struct gc *);
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,6 +6,10 @@
|
|||
|
||||
#include "apfl.h"
|
||||
|
||||
// TODO: Find a way to make these functions safer. These functions require
|
||||
// casting things to void** which hides useful compiler warnings. Also needing
|
||||
// to pass in the correct elem_size is error prone.
|
||||
|
||||
#define APFL_RESIZABLE_TRAIT(T, N) \
|
||||
T* N; \
|
||||
size_t len; \
|
||||
|
|
@ -31,6 +35,7 @@ bool apfl_resizable_ensure_cap(
|
|||
size_t *cap,
|
||||
size_t want_cap
|
||||
);
|
||||
|
||||
bool apfl_resizable_ensure_cap_for_more_elements(
|
||||
struct apfl_allocator,
|
||||
size_t elem_size,
|
||||
|
|
|
|||
Loading…
Reference in a new issue