2021-12-10 20:22:16 +00:00
|
|
|
#include <assert.h>
|
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
#include <stddef.h>
|
|
|
|
|
#include <stdbool.h>
|
|
|
|
|
#include <string.h>
|
|
|
|
|
|
2022-02-08 21:53:13 +00:00
|
|
|
#include "alloc.h"
|
2021-12-10 20:22:16 +00:00
|
|
|
#include "resizable.h"
|
|
|
|
|
|
|
|
|
|
void
|
|
|
|
|
apfl_resizable_init(void **mem, size_t *len, size_t *cap)
|
|
|
|
|
{
|
|
|
|
|
*mem = NULL;
|
|
|
|
|
*len = 0;
|
|
|
|
|
*cap = 0;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool
|
2022-02-08 21:53:13 +00:00
|
|
|
apfl_resizable_resize(
|
|
|
|
|
struct apfl_allocator allocator,
|
|
|
|
|
size_t elem_size,
|
|
|
|
|
void **mem,
|
|
|
|
|
size_t *len,
|
|
|
|
|
size_t *cap,
|
|
|
|
|
size_t newlen
|
|
|
|
|
) {
|
2021-12-10 20:22:16 +00:00
|
|
|
// TODO: We're wasteful here by never actually shrinking the memory.
|
|
|
|
|
if (newlen <= *len || newlen < *cap) {
|
|
|
|
|
*len = newlen;
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
assert(newlen >= *cap);
|
|
|
|
|
|
2022-02-08 21:53:13 +00:00
|
|
|
if (!apfl_resizable_ensure_cap(allocator, elem_size, mem, cap, newlen)) {
|
2021-12-10 20:22:16 +00:00
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*len = newlen;
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool
|
2022-02-08 21:53:13 +00:00
|
|
|
apfl_resizable_ensure_cap(
|
|
|
|
|
struct apfl_allocator allocator,
|
|
|
|
|
size_t elem_size,
|
|
|
|
|
void **mem,
|
|
|
|
|
size_t *cap,
|
|
|
|
|
size_t want_cap
|
|
|
|
|
) {
|
2022-01-02 16:16:32 +00:00
|
|
|
if (want_cap <= *cap) {
|
2021-12-10 20:22:16 +00:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
// TODO: We currently simply grow the memory to have space for exactly
|
2022-01-02 16:16:32 +00:00
|
|
|
// want_cap elements. It would probably be smarter to grow the memory
|
|
|
|
|
// a bit larger to reduce calls to realloc.
|
2022-02-08 21:53:13 +00:00
|
|
|
void *newmem = REALLOC_BYTES(allocator, *mem, *cap * elem_size, want_cap * elem_size);
|
2021-12-10 20:22:16 +00:00
|
|
|
if (newmem == NULL) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*mem = newmem;
|
2022-01-02 16:16:32 +00:00
|
|
|
*cap = want_cap;
|
2021-12-10 20:22:16 +00:00
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
2022-01-02 16:16:32 +00:00
|
|
|
bool
|
2022-02-08 21:53:13 +00:00
|
|
|
apfl_resizable_ensure_cap_for_more_elements(
|
|
|
|
|
struct apfl_allocator allocator,
|
|
|
|
|
size_t elem_size,
|
|
|
|
|
void **mem,
|
|
|
|
|
size_t len,
|
|
|
|
|
size_t *cap,
|
|
|
|
|
size_t more_elements
|
|
|
|
|
) {
|
|
|
|
|
return apfl_resizable_ensure_cap(allocator, elem_size, mem, cap, len + more_elements); // TODO: What if len + more_elements overflows?
|
2022-01-02 16:16:32 +00:00
|
|
|
}
|
|
|
|
|
|
Implement mark&sweep garbage collection and bytecode compilation
Instead of the previous refcount base garbage collection, we're now using
a basic tri-color mark&sweep collector. This is done to support cyclical
value relationships in the future (functions can form cycles, all values
implemented up to this point can not).
The collector maintains a set of roots and a set of objects (grouped into
blocks). The GC enabled objects are no longer allocated manually, but will
be allocated by the GC. The GC also wraps an allocator, this way the GC
knows, if we ran out of memory and will try to get out of this situation by
performing a full collection cycle.
The tri-color abstraction was chosen for two reasons:
- We don't have to maintain a list of objects that need to be marked, we
can simply grab the next grey one.
- It should allow us to later implement incremental collection (right now
we only do a stop-the-world collection).
This also switches to a bytecode based evaluation of the code: We no longer
directly evaluate the AST, but first compile it into a series of
instructions, that are evaluated in a separate step. This was done in
preparation for inplementing functions: We only need to turn a function
body into instructions instead of evaluating the node again with each call
of the function. Also, since an instruction list is implemented as a GC
object, this then removes manual memory management of the function body and
it's child nodes. Since the GC and the bytecode go hand in hand, this was
done in one (giant) commit.
As a downside, we've now lost the ability do do list matching on
assignments. I've already started to work on implementing this in the new
architecture, but left it out of this commit, as it's already quite a large
commit :)
2022-04-11 20:24:22 +00:00
|
|
|
bool
|
|
|
|
|
apfl_resizable_check_cut_args(size_t len, size_t cut_start, size_t cut_len)
|
|
|
|
|
{
|
|
|
|
|
return !(cut_start > len || cut_start + cut_len > len);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
static void
|
|
|
|
|
move_elems_for_cut(
|
|
|
|
|
size_t elem_size,
|
|
|
|
|
void **mem,
|
|
|
|
|
size_t len,
|
|
|
|
|
size_t cut_start,
|
|
|
|
|
size_t cut_len,
|
|
|
|
|
size_t other_len
|
|
|
|
|
) {
|
|
|
|
|
size_t src_off = cut_start + cut_len;
|
|
|
|
|
size_t dst_off = cut_start + other_len;
|
|
|
|
|
|
|
|
|
|
memmove(
|
|
|
|
|
((char *)(*mem)) + (dst_off * elem_size),
|
|
|
|
|
((char *)(*mem)) + (src_off * elem_size),
|
|
|
|
|
(len - cut_start - cut_len) * elem_size
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool
|
|
|
|
|
apfl_resizable_cut_without_resize(
|
|
|
|
|
size_t elem_size,
|
|
|
|
|
void **mem,
|
|
|
|
|
size_t *len,
|
|
|
|
|
size_t cut_start,
|
|
|
|
|
size_t cut_len
|
|
|
|
|
) {
|
|
|
|
|
if (!apfl_resizable_check_cut_args(*len, cut_start, cut_len)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
move_elems_for_cut(elem_size, mem, *len, cut_start, cut_len, 0);
|
|
|
|
|
*len -= cut_len;
|
|
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
bool
|
|
|
|
|
apfl_resizable_splice(
|
2022-02-08 21:53:13 +00:00
|
|
|
struct apfl_allocator allocator,
|
2022-01-20 20:33:04 +00:00
|
|
|
size_t elem_size,
|
|
|
|
|
void **mem,
|
|
|
|
|
size_t *len,
|
|
|
|
|
size_t *cap,
|
|
|
|
|
size_t cut_start,
|
|
|
|
|
size_t cut_len,
|
|
|
|
|
const void *other_mem,
|
|
|
|
|
size_t other_len
|
|
|
|
|
) {
|
Implement mark&sweep garbage collection and bytecode compilation
Instead of the previous refcount base garbage collection, we're now using
a basic tri-color mark&sweep collector. This is done to support cyclical
value relationships in the future (functions can form cycles, all values
implemented up to this point can not).
The collector maintains a set of roots and a set of objects (grouped into
blocks). The GC enabled objects are no longer allocated manually, but will
be allocated by the GC. The GC also wraps an allocator, this way the GC
knows, if we ran out of memory and will try to get out of this situation by
performing a full collection cycle.
The tri-color abstraction was chosen for two reasons:
- We don't have to maintain a list of objects that need to be marked, we
can simply grab the next grey one.
- It should allow us to later implement incremental collection (right now
we only do a stop-the-world collection).
This also switches to a bytecode based evaluation of the code: We no longer
directly evaluate the AST, but first compile it into a series of
instructions, that are evaluated in a separate step. This was done in
preparation for inplementing functions: We only need to turn a function
body into instructions instead of evaluating the node again with each call
of the function. Also, since an instruction list is implemented as a GC
object, this then removes manual memory management of the function body and
it's child nodes. Since the GC and the bytecode go hand in hand, this was
done in one (giant) commit.
As a downside, we've now lost the ability do do list matching on
assignments. I've already started to work on implementing this in the new
architecture, but left it out of this commit, as it's already quite a large
commit :)
2022-04-11 20:24:22 +00:00
|
|
|
if (!apfl_resizable_check_cut_args(*len, cut_start, cut_len)) {
|
2022-01-02 16:16:32 +00:00
|
|
|
return false;
|
2021-12-10 20:22:16 +00:00
|
|
|
}
|
|
|
|
|
|
2022-01-20 20:33:04 +00:00
|
|
|
if (other_len > cut_len) {
|
|
|
|
|
if (!apfl_resizable_ensure_cap_for_more_elements(
|
2022-02-08 21:53:13 +00:00
|
|
|
allocator,
|
2022-01-20 20:33:04 +00:00
|
|
|
elem_size,
|
|
|
|
|
mem,
|
|
|
|
|
*len,
|
|
|
|
|
cap,
|
|
|
|
|
other_len - cut_len
|
|
|
|
|
)) {
|
|
|
|
|
return false;
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
|
Implement mark&sweep garbage collection and bytecode compilation
Instead of the previous refcount base garbage collection, we're now using
a basic tri-color mark&sweep collector. This is done to support cyclical
value relationships in the future (functions can form cycles, all values
implemented up to this point can not).
The collector maintains a set of roots and a set of objects (grouped into
blocks). The GC enabled objects are no longer allocated manually, but will
be allocated by the GC. The GC also wraps an allocator, this way the GC
knows, if we ran out of memory and will try to get out of this situation by
performing a full collection cycle.
The tri-color abstraction was chosen for two reasons:
- We don't have to maintain a list of objects that need to be marked, we
can simply grab the next grey one.
- It should allow us to later implement incremental collection (right now
we only do a stop-the-world collection).
This also switches to a bytecode based evaluation of the code: We no longer
directly evaluate the AST, but first compile it into a series of
instructions, that are evaluated in a separate step. This was done in
preparation for inplementing functions: We only need to turn a function
body into instructions instead of evaluating the node again with each call
of the function. Also, since an instruction list is implemented as a GC
object, this then removes manual memory management of the function body and
it's child nodes. Since the GC and the bytecode go hand in hand, this was
done in one (giant) commit.
As a downside, we've now lost the ability do do list matching on
assignments. I've already started to work on implementing this in the new
architecture, but left it out of this commit, as it's already quite a large
commit :)
2022-04-11 20:24:22 +00:00
|
|
|
move_elems_for_cut(elem_size, mem, *len, cut_start, cut_len, other_len);
|
2022-01-20 20:33:04 +00:00
|
|
|
|
|
|
|
|
if (other_len > 0 && other_mem != NULL) {
|
|
|
|
|
memcpy(
|
|
|
|
|
((char *)(*mem)) + cut_start * elem_size,
|
|
|
|
|
other_mem,
|
|
|
|
|
other_len * elem_size
|
|
|
|
|
);
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
*len += other_len - cut_len;
|
2021-12-10 20:22:16 +00:00
|
|
|
|
|
|
|
|
return true;
|
|
|
|
|
}
|
2022-01-20 20:33:04 +00:00
|
|
|
|
|
|
|
|
bool
|
2022-02-08 21:53:13 +00:00
|
|
|
apfl_resizable_append(struct apfl_allocator allocator, size_t elem_size, void **mem, size_t *len, size_t *cap, const void *other_mem, size_t other_len)
|
2022-01-20 20:33:04 +00:00
|
|
|
{
|
|
|
|
|
return apfl_resizable_splice(
|
2022-02-08 21:53:13 +00:00
|
|
|
allocator,
|
2022-01-20 20:33:04 +00:00
|
|
|
elem_size,
|
|
|
|
|
mem,
|
|
|
|
|
len,
|
|
|
|
|
cap,
|
|
|
|
|
*len,
|
|
|
|
|
0,
|
|
|
|
|
other_mem,
|
|
|
|
|
other_len
|
|
|
|
|
);
|
|
|
|
|
}
|