Instead of the previous refcount base garbage collection, we're now using a basic tri-color mark&sweep collector. This is done to support cyclical value relationships in the future (functions can form cycles, all values implemented up to this point can not). The collector maintains a set of roots and a set of objects (grouped into blocks). The GC enabled objects are no longer allocated manually, but will be allocated by the GC. The GC also wraps an allocator, this way the GC knows, if we ran out of memory and will try to get out of this situation by performing a full collection cycle. The tri-color abstraction was chosen for two reasons: - We don't have to maintain a list of objects that need to be marked, we can simply grab the next grey one. - It should allow us to later implement incremental collection (right now we only do a stop-the-world collection). This also switches to a bytecode based evaluation of the code: We no longer directly evaluate the AST, but first compile it into a series of instructions, that are evaluated in a separate step. This was done in preparation for inplementing functions: We only need to turn a function body into instructions instead of evaluating the node again with each call of the function. Also, since an instruction list is implemented as a GC object, this then removes manual memory management of the function body and it's child nodes. Since the GC and the bytecode go hand in hand, this was done in one (giant) commit. As a downside, we've now lost the ability do do list matching on assignments. I've already started to work on implementing this in the new architecture, but left it out of this commit, as it's already quite a large commit :)
181 lines
3.8 KiB
C
181 lines
3.8 KiB
C
#include <assert.h>
|
|
#include <stdlib.h>
|
|
#include <stddef.h>
|
|
#include <stdbool.h>
|
|
#include <string.h>
|
|
|
|
#include "alloc.h"
|
|
#include "resizable.h"
|
|
|
|
void
|
|
apfl_resizable_init(void **mem, size_t *len, size_t *cap)
|
|
{
|
|
*mem = NULL;
|
|
*len = 0;
|
|
*cap = 0;
|
|
}
|
|
|
|
bool
|
|
apfl_resizable_resize(
|
|
struct apfl_allocator allocator,
|
|
size_t elem_size,
|
|
void **mem,
|
|
size_t *len,
|
|
size_t *cap,
|
|
size_t newlen
|
|
) {
|
|
// TODO: We're wasteful here by never actually shrinking the memory.
|
|
if (newlen <= *len || newlen < *cap) {
|
|
*len = newlen;
|
|
return true;
|
|
}
|
|
|
|
assert(newlen >= *cap);
|
|
|
|
if (!apfl_resizable_ensure_cap(allocator, elem_size, mem, cap, newlen)) {
|
|
return false;
|
|
}
|
|
|
|
*len = newlen;
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
apfl_resizable_ensure_cap(
|
|
struct apfl_allocator allocator,
|
|
size_t elem_size,
|
|
void **mem,
|
|
size_t *cap,
|
|
size_t want_cap
|
|
) {
|
|
if (want_cap <= *cap) {
|
|
return true;
|
|
}
|
|
|
|
// TODO: We currently simply grow the memory to have space for exactly
|
|
// want_cap elements. It would probably be smarter to grow the memory
|
|
// a bit larger to reduce calls to realloc.
|
|
void *newmem = REALLOC_BYTES(allocator, *mem, *cap * elem_size, want_cap * elem_size);
|
|
if (newmem == NULL) {
|
|
return false;
|
|
}
|
|
|
|
*mem = newmem;
|
|
*cap = want_cap;
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
apfl_resizable_ensure_cap_for_more_elements(
|
|
struct apfl_allocator allocator,
|
|
size_t elem_size,
|
|
void **mem,
|
|
size_t len,
|
|
size_t *cap,
|
|
size_t more_elements
|
|
) {
|
|
return apfl_resizable_ensure_cap(allocator, elem_size, mem, cap, len + more_elements); // TODO: What if len + more_elements overflows?
|
|
}
|
|
|
|
bool
|
|
apfl_resizable_check_cut_args(size_t len, size_t cut_start, size_t cut_len)
|
|
{
|
|
return !(cut_start > len || cut_start + cut_len > len);
|
|
}
|
|
|
|
static void
|
|
move_elems_for_cut(
|
|
size_t elem_size,
|
|
void **mem,
|
|
size_t len,
|
|
size_t cut_start,
|
|
size_t cut_len,
|
|
size_t other_len
|
|
) {
|
|
size_t src_off = cut_start + cut_len;
|
|
size_t dst_off = cut_start + other_len;
|
|
|
|
memmove(
|
|
((char *)(*mem)) + (dst_off * elem_size),
|
|
((char *)(*mem)) + (src_off * elem_size),
|
|
(len - cut_start - cut_len) * elem_size
|
|
);
|
|
}
|
|
|
|
bool
|
|
apfl_resizable_cut_without_resize(
|
|
size_t elem_size,
|
|
void **mem,
|
|
size_t *len,
|
|
size_t cut_start,
|
|
size_t cut_len
|
|
) {
|
|
if (!apfl_resizable_check_cut_args(*len, cut_start, cut_len)) {
|
|
return false;
|
|
}
|
|
|
|
move_elems_for_cut(elem_size, mem, *len, cut_start, cut_len, 0);
|
|
*len -= cut_len;
|
|
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
apfl_resizable_splice(
|
|
struct apfl_allocator allocator,
|
|
size_t elem_size,
|
|
void **mem,
|
|
size_t *len,
|
|
size_t *cap,
|
|
size_t cut_start,
|
|
size_t cut_len,
|
|
const void *other_mem,
|
|
size_t other_len
|
|
) {
|
|
if (!apfl_resizable_check_cut_args(*len, cut_start, cut_len)) {
|
|
return false;
|
|
}
|
|
|
|
if (other_len > cut_len) {
|
|
if (!apfl_resizable_ensure_cap_for_more_elements(
|
|
allocator,
|
|
elem_size,
|
|
mem,
|
|
*len,
|
|
cap,
|
|
other_len - cut_len
|
|
)) {
|
|
return false;
|
|
}
|
|
}
|
|
|
|
move_elems_for_cut(elem_size, mem, *len, cut_start, cut_len, other_len);
|
|
|
|
if (other_len > 0 && other_mem != NULL) {
|
|
memcpy(
|
|
((char *)(*mem)) + cut_start * elem_size,
|
|
other_mem,
|
|
other_len * elem_size
|
|
);
|
|
}
|
|
|
|
*len += other_len - cut_len;
|
|
|
|
return true;
|
|
}
|
|
|
|
bool
|
|
apfl_resizable_append(struct apfl_allocator allocator, size_t elem_size, void **mem, size_t *len, size_t *cap, const void *other_mem, size_t other_len)
|
|
{
|
|
return apfl_resizable_splice(
|
|
allocator,
|
|
elem_size,
|
|
mem,
|
|
len,
|
|
cap,
|
|
*len,
|
|
0,
|
|
other_mem,
|
|
other_len
|
|
);
|
|
}
|