From 77c73df87c62e3b1d55a106ce5b93af468f8ad19 Mon Sep 17 00:00:00 2001 From: Alan Wu Date: Fri, 5 Feb 2021 14:20:56 -0500 Subject: [PATCH 1/7] Tie lifetime of uJIT blocks to iseqs Blocks weren't being freed when iseqs are collected. --- iseq.c | 4 ++ ujit.h | 3 + ujit_core.c | 21 ++++++- ujit_core.h | 9 +++ ujit_iface.c | 174 ++++++++++++++++++++++++++++++++++++++------------- ujit_iface.h | 1 + vm_core.h | 2 + 7 files changed, 166 insertions(+), 48 deletions(-) diff --git a/iseq.c b/iseq.c index 6c96d6ea087235..e63e77431d3b58 100644 --- a/iseq.c +++ b/iseq.c @@ -109,6 +109,7 @@ rb_iseq_free(const rb_iseq_t *iseq) if (iseq && iseq->body) { struct rb_iseq_constant_body *const body = iseq->body; mjit_free_iseq(iseq); /* Notify MJIT */ + rb_ujit_iseq_free(body); ruby_xfree((void *)body->iseq_encoded); ruby_xfree((void *)body->insns_info.body); if (body->insns_info.positions) ruby_xfree((void *)body->insns_info.positions); @@ -321,6 +322,7 @@ rb_iseq_update_references(rb_iseq_t *iseq) #if USE_MJIT mjit_update_references(iseq); #endif + rb_ujit_iseq_update_references(body); } } @@ -401,6 +403,7 @@ rb_iseq_mark(const rb_iseq_t *iseq) #if USE_MJIT mjit_mark_cc_entries(body); #endif + rb_ujit_iseq_mark(body); } if (FL_TEST_RAW((VALUE)iseq, ISEQ_NOT_LOADED_YET)) { @@ -490,6 +493,7 @@ rb_iseq_constant_body_alloc(void) { struct rb_iseq_constant_body *iseq_body; iseq_body = ZALLOC(struct rb_iseq_constant_body); + list_head_init(&iseq_body->ujit_blocks); return iseq_body; } diff --git a/ujit.h b/ujit.h index a931ce0d3d4ca0..c2e03e5e26e9af 100644 --- a/ujit.h +++ b/ujit.h @@ -54,5 +54,8 @@ void rb_ujit_compile_iseq(const rb_iseq_t *iseq); void rb_ujit_init(struct rb_ujit_options *options); void rb_ujit_bop_redefined(VALUE klass, const rb_method_entry_t *me, enum ruby_basic_operators bop); void rb_ujit_constant_state_changed(void); +void rb_ujit_iseq_mark(const struct rb_iseq_constant_body *body); +void rb_ujit_iseq_update_references(const struct rb_iseq_constant_body *body); +void rb_ujit_iseq_free(const struct rb_iseq_constant_body *body); #endif // #ifndef UJIT_H diff --git a/ujit_core.c b/ujit_core.c index c1f9b941a1aed0..5134b7a9127227 100644 --- a/ujit_core.c +++ b/ujit_core.c @@ -147,8 +147,9 @@ int ctx_diff(const ctx_t* src, const ctx_t* dst) return diff; } -// Add a block version to the map -static void add_block_version(blockid_t blockid, block_t* block) +// Add a block version to the map. Block should be fully constructed +static void +add_block_version(blockid_t blockid, block_t* block) { // Function entry blocks must have stack size 0 RUBY_ASSERT(!(block->blockid.idx == 0 && block->ctx.stack_size > 0)); @@ -166,6 +167,17 @@ static void add_block_version(blockid_t blockid, block_t* block) // Add the block version to the map st_insert(version_tbl, (st_data_t)&block->blockid, (st_data_t)block); RUBY_ASSERT(find_block_version(blockid, &block->ctx) != NULL); + + { + // Store the block on the iseq + list_add_tail(&block->blockid.iseq->body->ujit_blocks, &block->iseq_block_node); + + const rb_iseq_t *iseq = block->blockid.iseq; + // Run write barriers for block dependencies + RB_OBJ_WRITTEN(iseq, Qundef, block->dependencies.iseq); + RB_OBJ_WRITTEN(iseq, Qundef, block->dependencies.cc); + RB_OBJ_WRITTEN(iseq, Qundef, block->dependencies.cme); + } } // Add an incoming branch for a given block version @@ -633,9 +645,9 @@ invalidate_block_version(block_t* block) } } - // If the block is an entry point, it needs to be unmapped from its iseq const rb_iseq_t* iseq = block->blockid.iseq; uint32_t idx = block->blockid.idx; + // If the block is an entry point, it needs to be unmapped from its iseq VALUE* entry_pc = &iseq->body->iseq_encoded[idx]; int entry_opcode = opcode_at_pc(iseq, entry_pc); @@ -652,6 +664,9 @@ invalidate_block_version(block_t* block) // FIXME: // Call continuation addresses on the stack can also be atomically replaced by jumps going to the stub. + // Remove block from iseq + list_del_from(&iseq->body->ujit_blocks, &block->iseq_block_node); + ujit_unlink_method_lookup_dependency(block); // Free the old block version object free(block->incoming); free(block); diff --git a/ujit_core.h b/ujit_core.h index f63dcef0525b67..c4729d4e2ac4f0 100644 --- a/ujit_core.h +++ b/ujit_core.h @@ -125,6 +125,15 @@ typedef struct BlockVersion // Next block version for this blockid (singly-linked list) struct BlockVersion* next; + // List node for all block versions in an iseq + struct list_node iseq_block_node; + + // GC managed objects that this block depend on + struct { + VALUE cc; + VALUE cme; + VALUE iseq; + } dependencies; } block_t; // Context object methods diff --git a/ujit_iface.c b/ujit_iface.c index bf36985ff6ec59..587b626230ec35 100644 --- a/ujit_iface.c +++ b/ujit_iface.c @@ -136,8 +136,6 @@ struct compiled_region_array { int32_t capa; struct compiled_region { block_t *block; - const struct rb_callcache *cc; - const rb_callable_method_entry_t *cme; } data[]; }; @@ -159,7 +157,7 @@ add_compiled_region(struct compiled_region_array *array, struct compiled_region } // Check if the region is already present for (int32_t i = 0; i < array->size; i++) { - if (array->data[i].block == region->block && array->data[i].cc == region->cc && array->data[i].cme == region->cme) { + if (array->data[i].block == region->block) { return array; } } @@ -195,35 +193,78 @@ add_lookup_dependency_i(st_data_t *key, st_data_t *value, st_data_t data, int ex if (!regions) { rb_bug("ujit: failed to add method lookup dependency"); // TODO: we could bail out of compiling instead } + *value = (st_data_t)regions; return ST_CONTINUE; } // Remember that the currently compiling region is only valid while cme and cc are valid void -assume_method_lookup_stable(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme, block_t* block) +assume_method_lookup_stable(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme, block_t *block) { RUBY_ASSERT(block != NULL); - struct compiled_region region = { .block = block, .cc = cc, .cme = cme }; + RUBY_ASSERT(block->dependencies.cc == 0 && block->dependencies.cme == 0); + struct compiled_region region = { .block = block }; st_update(method_lookup_dependency, (st_data_t)cme, add_lookup_dependency_i, (st_data_t)®ion); + block->dependencies.cme = (VALUE)cme; st_update(method_lookup_dependency, (st_data_t)cc, add_lookup_dependency_i, (st_data_t)®ion); - // FIXME: This is a leak! When either the cme or the cc become invalid, the other also needs to go + block->dependencies.cc = (VALUE)cc; } static int ujit_root_mark_i(st_data_t k, st_data_t v, st_data_t ignore) { - // FIXME: This leaks everything that end up in the dependency table! - // One way to deal with this is with weak references... - rb_gc_mark((VALUE)k); - struct compiled_region_array *regions = (void *)v; - for (int32_t i = 0; i < regions->size; i++) { - rb_gc_mark((VALUE)regions->data[i].block->blockid.iseq); - } + // Lifetime notes: cc and cme get added in pairs into the table. One of + // them should become invalid before dying. When one of them invalidate we + // remove the pair from the table. Blocks remove themself from the table + // when they die. + rb_gc_mark_movable((VALUE)k); return ST_CONTINUE; } +static int +method_lookup_dep_table_update_keys(st_data_t *key, st_data_t *value, st_data_t argp, int existing) +{ + *key = rb_gc_location(rb_gc_location((VALUE)*key)); + + return ST_CONTINUE; +} + +static int +replace_all(st_data_t key, st_data_t value, st_data_t argp, int error) +{ + return ST_REPLACE; +} + +static int +update_blockid_iseq(st_data_t *key, st_data_t *value, st_data_t argp, int existing) +{ + blockid_t *blockid = (blockid_t *)*key; + blockid->iseq = (const rb_iseq_t *)rb_gc_location((VALUE)blockid->iseq); + + return ST_CONTINUE; +} + +// GC callback during compaction +static void +ujit_root_update_references(void *ptr) +{ + if (method_lookup_dependency) { + if (st_foreach_with_replace(method_lookup_dependency, replace_all, method_lookup_dep_table_update_keys, 0)) { + RUBY_ASSERT(false); + } + } + + if (version_tbl) { + // If any of the iseqs in the version table moves, we need to rehash their corresponding blockid. + // TODO: this seems very inefficient. This can be more targeted if each store their own version_tbl. + if (st_foreach_with_replace(version_tbl, replace_all, update_blockid_iseq, 0)) { + RUBY_ASSERT(false); + } + } +} + // GC callback during mark phase static void ujit_root_mark(void *ptr) @@ -251,7 +292,7 @@ ujit_root_memsize(const void *ptr) // TODO: make this write barrier protected static const rb_data_type_t ujit_root_type = { "ujit_root", - {ujit_root_mark, ujit_root_free, ujit_root_memsize, }, + {ujit_root_mark, ujit_root_free, ujit_root_memsize, ujit_root_update_references}, 0, 0, RUBY_TYPED_FREE_IMMEDIATELY }; @@ -266,46 +307,54 @@ rb_ujit_method_lookup_change(VALUE cme_or_cc) RUBY_ASSERT(IMEMO_TYPE_P(cme_or_cc, imemo_ment) || IMEMO_TYPE_P(cme_or_cc, imemo_callcache)); - st_data_t image, other_image; - if (st_lookup(method_lookup_dependency, (st_data_t)cme_or_cc, &image)) { + // Invalidate all regions that depend on the cme or cc + st_data_t key = (st_data_t)cme_or_cc, image; + if (st_delete(method_lookup_dependency, &key, &image)) { struct compiled_region_array *array = (void *)image; - // Invalidate all regions that depend on the cme or cc for (int32_t i = 0; i < array->size; i++) { - struct compiled_region *region = &array->data[i]; - - VALUE other_key; - if (IMEMO_TYPE_P(cme_or_cc, imemo_ment)) { - other_key = (VALUE)region->cc; - } - else { - other_key = (VALUE)region->cme; - } + invalidate_block_version(array->data[i].block); + } - if (!st_lookup(method_lookup_dependency, (st_data_t)other_key, &other_image)) { - // See assume_method_lookup_stable() for why this should always hit. - rb_bug("method lookup dependency bookkeeping bug"); - } - struct compiled_region_array *other_region_array = (void *)other_image; - const int32_t other_size = other_region_array->size; - // Find the block we are invalidating in the other region array - for (int32_t i = 0; i < other_size; i++) { - if (other_region_array->data[i].block == region->block) { - // Do a shuffle remove. Order in the region array doesn't matter. - other_region_array->data[i] = other_region_array->data[other_size - 1]; - other_region_array->size--; - break; - } - } - RUBY_ASSERT(other_region_array->size < other_size); + free(array); + } - invalidate_block_version(region->block); + RB_VM_LOCK_LEAVE(); +} + +// Remove a block from the method lookup dependency table +static void +remove_method_lookup_dependency(VALUE cc_or_cme, block_t *block) +{ + st_data_t key = (st_data_t)cc_or_cme, image; + if (st_lookup(method_lookup_dependency, key, &image)) { + struct compiled_region_array *array = (void *)image; + + const int32_t size = array->size; + // Find the block we are removing + for (int32_t i = 0; i < size; i++) { + if (array->data[i].block == block) { + // Do a shuffle remove. Order in the region array doesn't matter. + array->data[i] = array->data[size - 1]; + array->size--; + break; + } } + RUBY_ASSERT(array->size < size); - array->size = 0; + if (array->size == 0) { + st_delete(method_lookup_dependency, &key, NULL); + free(array); + } } - RB_VM_LOCK_LEAVE(); +} + +void +ujit_unlink_method_lookup_dependency(block_t *block) +{ + if (block->dependencies.cc) remove_method_lookup_dependency(block->dependencies.cc, block); + if (block->dependencies.cme) remove_method_lookup_dependency(block->dependencies.cme, block); } void @@ -571,6 +620,41 @@ print_ujit_stats(void) } #endif // if RUBY_DEBUG +void +rb_ujit_iseq_mark(const struct rb_iseq_constant_body *body) +{ + block_t *block = NULL; + list_for_each(&body->ujit_blocks, block, iseq_block_node) { + rb_gc_mark_movable((VALUE)block->blockid.iseq); + + rb_gc_mark_movable(block->dependencies.cc); + rb_gc_mark_movable(block->dependencies.cme); + rb_gc_mark_movable(block->dependencies.iseq); + } +} + +void +rb_ujit_iseq_update_references(const struct rb_iseq_constant_body *body) +{ + block_t *block = NULL; + list_for_each(&body->ujit_blocks, block, iseq_block_node) { + block->blockid.iseq = (const rb_iseq_t *)rb_gc_location((VALUE)block->blockid.iseq); + + block->dependencies.cc = rb_gc_location(block->dependencies.cc); + block->dependencies.cme = rb_gc_location(block->dependencies.cme); + block->dependencies.iseq = rb_gc_location(block->dependencies.iseq); + } +} + +void +rb_ujit_iseq_free(const struct rb_iseq_constant_body *body) +{ + block_t *block = NULL; + list_for_each(&body->ujit_blocks, block, iseq_block_node) { + invalidate_block_version(block); + } +} + void rb_ujit_init(struct rb_ujit_options *options) { diff --git a/ujit_iface.h b/ujit_iface.h index 73b121de612e26..bab86ec7e722d0 100644 --- a/ujit_iface.h +++ b/ujit_iface.h @@ -34,5 +34,6 @@ bool cfunc_needs_frame(const rb_method_cfunc_t *cfunc); void assume_method_lookup_stable(const struct rb_callcache *cc, const rb_callable_method_entry_t *cme, block_t* block); // this function *must* return passed exit_pc const VALUE *rb_ujit_count_side_exit_op(const VALUE *exit_pc); +void ujit_unlink_method_lookup_dependency(block_t *block); #endif // #ifndef UJIT_IFACE_H diff --git a/vm_core.h b/vm_core.h index 03b8b50f0fefdf..5ab723298070ab 100644 --- a/vm_core.h +++ b/vm_core.h @@ -437,6 +437,8 @@ struct rb_iseq_constant_body { long unsigned total_calls; /* number of total calls with `mjit_exec()` */ struct rb_mjit_unit *jit_unit; #endif + + struct list_head ujit_blocks; }; /* T_IMEMO/iseq */ From 4d952087e7a8b305a6c3961ddb098cdbc7438e18 Mon Sep 17 00:00:00 2001 From: Alan Wu Date: Thu, 11 Feb 2021 16:30:25 -0500 Subject: [PATCH 2/7] Add rb_dary. Use it for method dependency table --- dary.h | 141 +++++++++++++++++++++++++++++++++++++++++++++++++++ ujit_iface.c | 87 ++++++++----------------------- 2 files changed, 163 insertions(+), 65 deletions(-) create mode 100644 dary.h diff --git a/dary.h b/dary.h new file mode 100644 index 00000000000000..33eaacd3ce9219 --- /dev/null +++ b/dary.h @@ -0,0 +1,141 @@ +#ifndef RUBY_DARY_H +#define RUBY_DARY_H + +#include +#include +#include + +// Type for a dynamic array. Use to declare a dynamic array. +// It is a pointer so it fits in st_table nicely. Designed +// to be fairly type-safe. +// +// NULL is a valid empty dynamic array. +// +// Example: +// rb_dary(char) char_array = NULL; +// if (!rb_dary_append(&char_array, 'e')) abort(); +// printf("pushed %c\n", *rb_dary_ref(char_array, 0)); +// rb_dary_free(char_array); +// +#define rb_dary(T) struct { rb_dary_meta_t meta; T data[]; } * + +// Copy an element out of the array. Warning: not bounds checked. +// +// T rb_dary_get(rb_dary(T) ary, int32_t idx); +// +#define rb_dary_get(ary, idx) ((ary)->data[(idx)]) + +// Assign to an element. Warning: not bounds checked. +// +// void rb_dary_set(rb_dary(T) ary, int32_t idx, T element); +// +#define rb_dary_set(ary, idx, element) ((ary)->data[(idx)] = (element)) + +// Get a pointer to an element. Warning: not bounds checked. +// +// T *rb_dary_ref(rb_dary(T) ary, int32_t idx); +// +#define rb_dary_ref(ary, idx) (&((ary)->data[(idx)])) + +// Copy a new element into the array. Return 1 on success and 0 on failure. +// ptr_to_ary is evaluated multiple times. +// +// bool rb_dary_append(rb_dary(T) *ptr_to_ary, T element); +// +#define rb_dary_append(ptr_to_ary, element) ( \ + rb_dary_ensure_space((ptr_to_ary)) ? ( \ + rb_dary_set(*(ptr_to_ary), \ + (*(ptr_to_ary))->meta.size, \ + (element)), \ + ++((*(ptr_to_ary))->meta.size), \ + 1 \ + ) : 0) + +// Iterate over items of the array in a for loop +// +#define rb_dary_foreach(ary, idx_name, elem_ptr_var) \ + for (int idx_name = 0; idx_name < rb_dary_size(ary) && ((elem_ptr_var) = rb_dary_ref(ary, idx_name)); ++idx_name) + +typedef struct rb_dary_meta { + int32_t size; + int32_t capa; +} rb_dary_meta_t; + +// Get the size of the dynamic array. +// +static inline int32_t +rb_dary_size(const void *ary) +{ + const rb_dary_meta_t *meta = ary; + return meta ? meta->size : 0; +} + +// Get the capacity of the dynamic array. +// +static inline int32_t +rb_dary_capa(const void *ary) +{ + const rb_dary_meta_t *meta = ary; + return meta ? meta->capa : 0; +} + +// Free the dynamic array. +// +static inline void +rb_dary_free(void *ary) +{ + free(ary); +} + +// Remove the last element of the array. +// +#define rb_dary_pop_back(ary) ((ary)->meta.size--) + +// Internal macro +// Ensure there is space for one more element. Return 1 on success and 0 on failure. +// `ptr_to_ary` is evaluated multiple times. +#define rb_dary_ensure_space(ptr_to_ary) ( \ + (rb_dary_capa(*(ptr_to_ary)) > rb_dary_size(*(ptr_to_ary))) ? \ + 1 : \ + rb_dary_double(ptr_to_ary, sizeof((*(ptr_to_ary))->data[0]))) + +// Internal function +static inline int +rb_dary_double(void *ptr_to_ary, size_t element_size) +{ + rb_dary_meta_t **ptr_to_ptr_to_meta = ptr_to_ary; + const rb_dary_meta_t *meta = *ptr_to_ptr_to_meta; + int32_t current_capa = rb_dary_capa(meta); + + int32_t new_capa; + // Calculate new capacity + if (current_capa == 0) { + new_capa = 1; + } + else { + int64_t doubled = 2 * (int64_t)current_capa; + new_capa = (int32_t)doubled; + if (new_capa != doubled) return 0; + } + + // Calculate new buffer size + size_t current_buffer_size = element_size * (size_t)current_capa + (meta ? sizeof(*meta) : 0); + size_t new_buffer_size = element_size * (size_t)new_capa + sizeof(*meta); + if (new_buffer_size <= current_buffer_size) return 0; + + rb_dary_meta_t *doubled_ary = realloc(*ptr_to_ptr_to_meta, new_buffer_size); + if (!doubled_ary) return 0; + + if (meta == NULL) { + // First allocation. Initialize size. On subsequence allocations + // realloc takes care of carrying over the size. + doubled_ary->size = 0; + } + + doubled_ary->capa = new_capa; + + *ptr_to_ptr_to_meta = doubled_ary; + return 1; +} + +#endif /* RUBY_DARY_H */ diff --git a/ujit_iface.c b/ujit_iface.c index 587b626230ec35..a425aecde051b7 100644 --- a/ujit_iface.c +++ b/ujit_iface.c @@ -14,6 +14,7 @@ #include "ujit_core.h" #include "ujit_hooks.inc" #include "ujit.rbinc" +#include "dary.h" #if HAVE_LIBCAPSTONE #include @@ -131,66 +132,22 @@ struct ujit_root_struct {}; // is only valid when cme_or_cc is valid static st_table *method_lookup_dependency; -struct compiled_region_array { - int32_t size; - int32_t capa; - struct compiled_region { - block_t *block; - } data[]; +struct compiled_region { + block_t *block; }; -// Add an element to a region array, or allocate a new region array. -static struct compiled_region_array * -add_compiled_region(struct compiled_region_array *array, struct compiled_region *region) -{ - if (!array) { - // Allocate a brand new array with space for one - array = malloc(sizeof(*array) + sizeof(array->data[0])); - if (!array) { - return NULL; - } - array->size = 0; - array->capa = 1; - } - if (array->size == INT32_MAX) { - return NULL; - } - // Check if the region is already present - for (int32_t i = 0; i < array->size; i++) { - if (array->data[i].block == region->block) { - return array; - } - } - if (array->size + 1 > array->capa) { - // Double the array's capacity. - int64_t double_capa = ((int64_t)array->capa) * 2; - int32_t new_capa = (int32_t)double_capa; - if (new_capa != double_capa) { - return NULL; - } - array = realloc(array, sizeof(*array) + new_capa * sizeof(array->data[0])); - if (array == NULL) { - return NULL; - } - array->capa = new_capa; - } - - array->data[array->size] = *region; - array->size++; - return array; -} +typedef rb_dary(struct compiled_region) block_array_t; static int add_lookup_dependency_i(st_data_t *key, st_data_t *value, st_data_t data, int existing) { struct compiled_region *region = (struct compiled_region *)data; - struct compiled_region_array *regions = NULL; + block_array_t regions = NULL; if (existing) { - regions = (struct compiled_region_array *)*value; + regions = (block_array_t )*value; } - regions = add_compiled_region(regions, region); - if (!regions) { + if (!rb_dary_append(®ions, *region)) { rb_bug("ujit: failed to add method lookup dependency"); // TODO: we could bail out of compiling instead } @@ -310,13 +267,14 @@ rb_ujit_method_lookup_change(VALUE cme_or_cc) // Invalidate all regions that depend on the cme or cc st_data_t key = (st_data_t)cme_or_cc, image; if (st_delete(method_lookup_dependency, &key, &image)) { - struct compiled_region_array *array = (void *)image; + block_array_t array = (void *)image; + struct compiled_region *elem; - for (int32_t i = 0; i < array->size; i++) { - invalidate_block_version(array->data[i].block); + rb_dary_foreach(array, i, elem) { + invalidate_block_version(elem->block); } - free(array); + rb_dary_free(array); } RB_VM_LOCK_LEAVE(); @@ -328,26 +286,25 @@ remove_method_lookup_dependency(VALUE cc_or_cme, block_t *block) { st_data_t key = (st_data_t)cc_or_cme, image; if (st_lookup(method_lookup_dependency, key, &image)) { - struct compiled_region_array *array = (void *)image; + block_array_t array = (void *)image; + struct compiled_region *elem; - const int32_t size = array->size; // Find the block we are removing - for (int32_t i = 0; i < size; i++) { - if (array->data[i].block == block) { - // Do a shuffle remove. Order in the region array doesn't matter. - array->data[i] = array->data[size - 1]; - array->size--; + rb_dary_foreach(array, i, elem) { + if (elem->block == block) { + // Remove the current element by moving the last element here. + // Order in the region array doesn't matter. + *elem = *rb_dary_ref(array, rb_dary_size(array) - 1); + rb_dary_pop_back(array); break; } } - RUBY_ASSERT(array->size < size); - if (array->size == 0) { + if (rb_dary_size(array) == 0) { st_delete(method_lookup_dependency, &key, NULL); - free(array); + rb_dary_free(array); } } - } void From e0a5e1ce011eb14739adf80200b6d30dd0b7ab76 Mon Sep 17 00:00:00 2001 From: Alan Wu Date: Fri, 12 Feb 2021 13:06:05 -0500 Subject: [PATCH 3/7] Keep track of blocks per iseq Remove global version_tbl --- iseq.c | 1 - ujit_core.c | 105 ++++++++++++++++++++++++++------------------------- ujit_core.h | 7 ++-- ujit_iface.c | 88 +++++++++++++++++------------------------- vm_core.h | 5 ++- 5 files changed, 97 insertions(+), 109 deletions(-) diff --git a/iseq.c b/iseq.c index e63e77431d3b58..27eb03352bd32b 100644 --- a/iseq.c +++ b/iseq.c @@ -493,7 +493,6 @@ rb_iseq_constant_body_alloc(void) { struct rb_iseq_constant_body *iseq_body; iseq_body = ZALLOC(struct rb_iseq_constant_body); - list_head_init(&iseq_body->ujit_blocks); return iseq_body; } diff --git a/ujit_core.c b/ujit_core.c index 5134b7a9127227..3c71d9ed7bb230 100644 --- a/ujit_core.c +++ b/ujit_core.c @@ -16,9 +16,6 @@ // Maximum number of branch instructions we can track #define MAX_BRANCHES 32768 -// Table of block versions indexed by (iseq, index) tuples -st_table *version_tbl; - // Registered branch entries branch_t branch_entries[MAX_BRANCHES]; uint32_t num_branches = 0; @@ -147,33 +144,50 @@ int ctx_diff(const ctx_t* src, const ctx_t* dst) return diff; } +static block_t * +get_first_version(const rb_iseq_t *iseq, unsigned idx) +{ + struct rb_iseq_constant_body *body = iseq->body; + if (rb_dary_size(body->ujit_blocks) == 0) { + return NULL; + } + return rb_dary_get(body->ujit_blocks, idx); +} + // Add a block version to the map. Block should be fully constructed static void add_block_version(blockid_t blockid, block_t* block) { // Function entry blocks must have stack size 0 RUBY_ASSERT(!(block->blockid.idx == 0 && block->ctx.stack_size > 0)); + const rb_iseq_t *iseq = block->blockid.iseq; + struct rb_iseq_constant_body *body = iseq->body; + + // Ensure ujit_blocks is initialized + if (rb_dary_size(body->ujit_blocks) == 0) { + // Initialize ujit_blocks to be as wide as body->iseq_encoded + // TODO: add resize API for dary + while ((unsigned)rb_dary_size(body->ujit_blocks) < body->iseq_size) { + (void)rb_dary_append(&body->ujit_blocks, NULL); + } + } - // If there exists a version for this block id - block_t* first_version = NULL; - st_lookup(version_tbl, (st_data_t)&blockid, (st_data_t*)&first_version); + block_t *first_version = get_first_version(iseq, blockid.idx); - // Link to the next version in a linked list + // If there exists a version for this block id if (first_version != NULL) { + // Link to the next version in a linked list RUBY_ASSERT(block->next == NULL); block->next = first_version; } - // Add the block version to the map - st_insert(version_tbl, (st_data_t)&block->blockid, (st_data_t)block); + // Make new block the first version + rb_dary_set(body->ujit_blocks, blockid.idx, block); RUBY_ASSERT(find_block_version(blockid, &block->ctx) != NULL); { - // Store the block on the iseq - list_add_tail(&block->blockid.iseq->body->ujit_blocks, &block->iseq_block_node); - - const rb_iseq_t *iseq = block->blockid.iseq; - // Run write barriers for block dependencies + // By writing the new block to the iseq, the iseq now + // contains new references to Ruby objects. Run write barriers. RB_OBJ_WRITTEN(iseq, Qundef, block->dependencies.iseq); RB_OBJ_WRITTEN(iseq, Qundef, block->dependencies.cc); RB_OBJ_WRITTEN(iseq, Qundef, block->dependencies.cme); @@ -194,15 +208,11 @@ static void add_incoming(block_t* p_block, uint32_t branch_idx) // Count the number of block versions matching a given blockid static size_t count_block_versions(blockid_t blockid) { - // If there exists a version for this block id - block_t* first_version; - if (!rb_st_lookup(version_tbl, (st_data_t)&blockid, (st_data_t*)&first_version)) - return 0; - size_t count = 0; + block_t *first_version = get_first_version(blockid.iseq, blockid.idx); // For each version matching the blockid - for (block_t* version = first_version; version != NULL; version = version->next) + for (block_t *version = first_version; version != NULL; version = version->next) { count += 1; } @@ -213,10 +223,10 @@ static size_t count_block_versions(blockid_t blockid) // Retrieve a basic block version for an (iseq, idx) tuple block_t* find_block_version(blockid_t blockid, const ctx_t* ctx) { + block_t *first_version = get_first_version(blockid.iseq, blockid.idx); + // If there exists a version for this block id - block_t* first_version; - if (!rb_st_lookup(version_tbl, (st_data_t)&blockid, (st_data_t*)&first_version)) - return NULL; + if (!first_version) return NULL; // Best match found block_t* best_version = NULL; @@ -568,32 +578,36 @@ void gen_direct_jump( branch_entries[branch_idx] = branch_entry; } +// Remove all references to a block then free it. +void +ujit_free_block(block_t *block) +{ + ujit_unlink_method_lookup_dependency(block); + + free(block->incoming); + free(block); +} + // Invalidate one specific block version void invalidate_block_version(block_t* block) { + const rb_iseq_t *iseq = block->blockid.iseq; + fprintf(stderr, "invalidating block (%p, %d)\n", block->blockid.iseq, block->blockid.idx); fprintf(stderr, "block=%p\n", block); - // Find the first version for this blockid - block_t* first_block = NULL; - rb_st_lookup(version_tbl, (st_data_t)&block->blockid, (st_data_t*)&first_block); + block_t *first_block = get_first_version(iseq, block->blockid.idx); RUBY_ASSERT(first_block != NULL); // Remove the version object from the map so we can re-generate stubs - if (first_block == block) - { - st_data_t key = (st_data_t)&block->blockid; - int success = st_delete(version_tbl, &key, NULL); - RUBY_ASSERT(success); + if (first_block == block) { + rb_dary_set(iseq->body->ujit_blocks, block->blockid.idx, NULL); } - else - { + else { bool deleted = false; - for (block_t* cur = first_block; cur != NULL; cur = cur->next) - { - if (cur->next == block) - { + for (block_t* cur = first_block; cur != NULL; cur = cur->next) { + if (cur->next == block) { cur->next = cur->next->next; deleted = true; break; @@ -645,8 +659,8 @@ invalidate_block_version(block_t* block) } } - const rb_iseq_t* iseq = block->blockid.iseq; uint32_t idx = block->blockid.idx; + // FIXME: the following says "if", but it's unconditional. // If the block is an entry point, it needs to be unmapped from its iseq VALUE* entry_pc = &iseq->body->iseq_encoded[idx]; int entry_opcode = opcode_at_pc(iseq, entry_pc); @@ -664,12 +678,7 @@ invalidate_block_version(block_t* block) // FIXME: // Call continuation addresses on the stack can also be atomically replaced by jumps going to the stub. - // Remove block from iseq - list_del_from(&iseq->body->ujit_blocks, &block->iseq_block_node); - ujit_unlink_method_lookup_dependency(block); - // Free the old block version object - free(block->incoming); - free(block); + ujit_free_block(block); fprintf(stderr, "invalidation done\n"); } @@ -691,14 +700,8 @@ st_index_t blockid_hash(st_data_t arg) return hash0 ^ hash1; } -static const struct st_hash_type hashtype_blockid = { - blockid_cmp, - blockid_hash, -}; - void ujit_init_core(void) { - // Initialize the version hash table - version_tbl = st_init_table(&hashtype_blockid); + // Nothing yet } diff --git a/ujit_core.h b/ujit_core.h index c4729d4e2ac4f0..76b8325cbb8054 100644 --- a/ujit_core.h +++ b/ujit_core.h @@ -103,7 +103,7 @@ Basic block version Represents a portion of an iseq compiled with a given context Note: care must be taken to minimize the size of block_t objects */ -typedef struct BlockVersion +typedef struct ujit_block_version { // Bytecode sequence (iseq, idx) this is a version of blockid_t blockid; @@ -119,11 +119,11 @@ typedef struct BlockVersion uint32_t end_pos; // List of incoming branches indices - uint32_t* incoming; + uint32_t *incoming; uint32_t num_incoming; // Next block version for this blockid (singly-linked list) - struct BlockVersion* next; + struct ujit_block_version *next; // List node for all block versions in an iseq struct list_node iseq_block_node; @@ -147,6 +147,7 @@ int ctx_diff(const ctx_t* src, const ctx_t* dst); block_t* find_block_version(blockid_t blockid, const ctx_t* ctx); block_t* gen_block_version(blockid_t blockid, const ctx_t* ctx); uint8_t* gen_entry_point(const rb_iseq_t *iseq, uint32_t insn_idx); +void ujit_free_block(block_t *block); void gen_branch( const ctx_t* src_ctx, diff --git a/ujit_iface.c b/ujit_iface.c index a425aecde051b7..519836cc61e761 100644 --- a/ujit_iface.c +++ b/ujit_iface.c @@ -31,7 +31,6 @@ int64_t rb_ujit_exec_insns_count = 0; static int64_t exit_op_count[VM_INSTRUCTION_SIZE] = { 0 }; static int64_t compiled_iseq_count = 0; -extern st_table * version_tbl; extern codeblock_t *cb; extern codeblock_t *ocb; // Hash table of encoded instructions @@ -194,15 +193,6 @@ replace_all(st_data_t key, st_data_t value, st_data_t argp, int error) return ST_REPLACE; } -static int -update_blockid_iseq(st_data_t *key, st_data_t *value, st_data_t argp, int existing) -{ - blockid_t *blockid = (blockid_t *)*key; - blockid->iseq = (const rb_iseq_t *)rb_gc_location((VALUE)blockid->iseq); - - return ST_CONTINUE; -} - // GC callback during compaction static void ujit_root_update_references(void *ptr) @@ -212,14 +202,6 @@ ujit_root_update_references(void *ptr) RUBY_ASSERT(false); } } - - if (version_tbl) { - // If any of the iseqs in the version table moves, we need to rehash their corresponding blockid. - // TODO: this seems very inefficient. This can be more targeted if each store their own version_tbl. - if (st_foreach_with_replace(version_tbl, replace_all, update_blockid_iseq, 0)) { - RUBY_ASSERT(false); - } - } } // GC callback during mark phase @@ -294,7 +276,7 @@ remove_method_lookup_dependency(VALUE cc_or_cme, block_t *block) if (elem->block == block) { // Remove the current element by moving the last element here. // Order in the region array doesn't matter. - *elem = *rb_dary_ref(array, rb_dary_size(array) - 1); + *elem = rb_dary_get(array, rb_dary_size(array) - 1); rb_dary_pop_back(array); break; } @@ -342,19 +324,6 @@ struct ujit_block_itr { VALUE list; }; -static int -iseqw_ujit_collect_blocks(st_data_t key, st_data_t value, st_data_t argp) -{ - block_t * block = (block_t *)value; - struct ujit_block_itr * itr = (struct ujit_block_itr *)argp; - - if (block->blockid.iseq == itr->iseq) { - VALUE rb_block = TypedData_Wrap_Struct(cUjitBlock, &ujit_block_type, block); - rb_ary_push(itr->list, rb_block); - } - return ST_CONTINUE; -} - /* Get a list of the UJIT blocks associated with `rb_iseq` */ static VALUE ujit_blocks_for(VALUE mod, VALUE rb_iseq) @@ -362,15 +331,19 @@ ujit_blocks_for(VALUE mod, VALUE rb_iseq) if (CLASS_OF(rb_iseq) != rb_cISeq) { return rb_ary_new(); } + const rb_iseq_t *iseq = rb_iseqw_to_iseq(rb_iseq); - st_table * vt = (st_table *)version_tbl; - struct ujit_block_itr itr; - itr.iseq = iseq; - itr.list = rb_ary_new(); + block_t **element; + VALUE all_versions = rb_ary_new(); - rb_st_foreach(vt, iseqw_ujit_collect_blocks, (st_data_t)&itr); + rb_dary_foreach(iseq->body->ujit_blocks, idx, element) { + for (block_t *version = *element; version; version = version->next) { + VALUE rb_block = TypedData_Wrap_Struct(cUjitBlock, &ujit_block_type, version); + rb_ary_push(all_versions, rb_block); + } + } - return itr.list; + return all_versions; } static VALUE @@ -580,35 +553,44 @@ print_ujit_stats(void) void rb_ujit_iseq_mark(const struct rb_iseq_constant_body *body) { - block_t *block = NULL; - list_for_each(&body->ujit_blocks, block, iseq_block_node) { - rb_gc_mark_movable((VALUE)block->blockid.iseq); + block_t **element; + rb_dary_foreach(body->ujit_blocks, idx, element) { + for (block_t *block = *element; block; block = block->next) { + rb_gc_mark_movable((VALUE)block->blockid.iseq); - rb_gc_mark_movable(block->dependencies.cc); - rb_gc_mark_movable(block->dependencies.cme); - rb_gc_mark_movable(block->dependencies.iseq); + rb_gc_mark_movable(block->dependencies.cc); + rb_gc_mark_movable(block->dependencies.cme); + rb_gc_mark_movable(block->dependencies.iseq); + } } } void rb_ujit_iseq_update_references(const struct rb_iseq_constant_body *body) { - block_t *block = NULL; - list_for_each(&body->ujit_blocks, block, iseq_block_node) { - block->blockid.iseq = (const rb_iseq_t *)rb_gc_location((VALUE)block->blockid.iseq); + block_t **element; + rb_dary_foreach(body->ujit_blocks, idx, element) { + for (block_t *block = *element; block; block = block->next) { + block->blockid.iseq = (const rb_iseq_t *)rb_gc_location((VALUE)block->blockid.iseq); - block->dependencies.cc = rb_gc_location(block->dependencies.cc); - block->dependencies.cme = rb_gc_location(block->dependencies.cme); - block->dependencies.iseq = rb_gc_location(block->dependencies.iseq); + block->dependencies.cc = rb_gc_location(block->dependencies.cc); + block->dependencies.cme = rb_gc_location(block->dependencies.cme); + block->dependencies.iseq = rb_gc_location(block->dependencies.iseq); + } } } void rb_ujit_iseq_free(const struct rb_iseq_constant_body *body) { - block_t *block = NULL; - list_for_each(&body->ujit_blocks, block, iseq_block_node) { - invalidate_block_version(block); + block_t **element; + rb_dary_foreach(body->ujit_blocks, idx, element) { + block_t *block = *element; + while (block) { + block_t *next = block->next; + ujit_free_block(block); + block = next; + } } } diff --git a/vm_core.h b/vm_core.h index 5ab723298070ab..390280a07e996f 100644 --- a/vm_core.h +++ b/vm_core.h @@ -77,6 +77,7 @@ #include "ruby/st.h" #include "ruby_atomic.h" #include "vm_opts.h" +#include "dary.h" #include "ruby/thread_native.h" #if defined(_WIN32) @@ -302,6 +303,8 @@ pathobj_realpath(VALUE pathobj) /* Forward declarations */ struct rb_mjit_unit; +typedef rb_dary(struct ujit_block_version *) rb_ujit_block_array_t; + struct rb_iseq_constant_body { enum iseq_type { ISEQ_TYPE_TOP, @@ -438,7 +441,7 @@ struct rb_iseq_constant_body { struct rb_mjit_unit *jit_unit; #endif - struct list_head ujit_blocks; + rb_ujit_block_array_t ujit_blocks; }; /* T_IMEMO/iseq */ From 38ac48fe6714457e0222332836bd8d10666e74e3 Mon Sep 17 00:00:00 2001 From: Alan Wu Date: Fri, 12 Feb 2021 14:12:54 -0500 Subject: [PATCH 4/7] Block version bookkeeping fix --- ujit_core.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/ujit_core.c b/ujit_core.c index 3c71d9ed7bb230..bff69678ce015d 100644 --- a/ujit_core.c +++ b/ujit_core.c @@ -600,9 +600,10 @@ invalidate_block_version(block_t* block) block_t *first_block = get_first_version(iseq, block->blockid.idx); RUBY_ASSERT(first_block != NULL); - // Remove the version object from the map so we can re-generate stubs + // Remove references to this block if (first_block == block) { - rb_dary_set(iseq->body->ujit_blocks, block->blockid.idx, NULL); + // Make the next block the new first version + rb_dary_set(iseq->body->ujit_blocks, block->blockid.idx, block->next); } else { bool deleted = false; From 73754e5873cf61ad14a6870730c2a34ce94f7da6 Mon Sep 17 00:00:00 2001 From: Alan Wu Date: Fri, 12 Feb 2021 14:58:43 -0500 Subject: [PATCH 5/7] dary -> darray --- common.mk | 34 +++++++++++++++++++++ dary.h => darray.h | 72 ++++++++++++++++++++++----------------------- ext/coverage/depend | 1 + ext/objspace/depend | 1 + ujit_core.c | 14 ++++----- ujit_iface.c | 28 +++++++++--------- vm_core.h | 4 +-- 7 files changed, 95 insertions(+), 59 deletions(-) rename dary.h => darray.h (55%) diff --git a/common.mk b/common.mk index a6f85b52037b44..642cd47b3c809a 100644 --- a/common.mk +++ b/common.mk @@ -1902,6 +1902,7 @@ ast.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h ast.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h ast.$(OBJEXT): {$(VPATH)}builtin.h ast.$(OBJEXT): {$(VPATH)}config.h +ast.$(OBJEXT): {$(VPATH)}darray.h ast.$(OBJEXT): {$(VPATH)}defines.h ast.$(OBJEXT): {$(VPATH)}encoding.h ast.$(OBJEXT): {$(VPATH)}id.h @@ -2273,6 +2274,7 @@ builtin.$(OBJEXT): {$(VPATH)}builtin.c builtin.$(OBJEXT): {$(VPATH)}builtin.h builtin.$(OBJEXT): {$(VPATH)}builtin_binary.inc builtin.$(OBJEXT): {$(VPATH)}config.h +builtin.$(OBJEXT): {$(VPATH)}darray.h builtin.$(OBJEXT): {$(VPATH)}defines.h builtin.$(OBJEXT): {$(VPATH)}id.h builtin.$(OBJEXT): {$(VPATH)}intern.h @@ -2464,6 +2466,7 @@ class.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h class.$(OBJEXT): {$(VPATH)}class.c class.$(OBJEXT): {$(VPATH)}config.h class.$(OBJEXT): {$(VPATH)}constant.h +class.$(OBJEXT): {$(VPATH)}darray.h class.$(OBJEXT): {$(VPATH)}defines.h class.$(OBJEXT): {$(VPATH)}encoding.h class.$(OBJEXT): {$(VPATH)}id.h @@ -2839,6 +2842,7 @@ compile.$(OBJEXT): {$(VPATH)}builtin.h compile.$(OBJEXT): {$(VPATH)}compile.c compile.$(OBJEXT): {$(VPATH)}config.h compile.$(OBJEXT): {$(VPATH)}constant.h +compile.$(OBJEXT): {$(VPATH)}darray.h compile.$(OBJEXT): {$(VPATH)}debug_counter.h compile.$(OBJEXT): {$(VPATH)}defines.h compile.$(OBJEXT): {$(VPATH)}encindex.h @@ -3230,6 +3234,7 @@ cont.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h cont.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h cont.$(OBJEXT): {$(VPATH)}config.h cont.$(OBJEXT): {$(VPATH)}cont.c +cont.$(OBJEXT): {$(VPATH)}darray.h cont.$(OBJEXT): {$(VPATH)}debug_counter.h cont.$(OBJEXT): {$(VPATH)}defines.h cont.$(OBJEXT): {$(VPATH)}eval_intern.h @@ -3422,6 +3427,7 @@ debug.$(OBJEXT): {$(VPATH)}backward/2/long_long.h debug.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h debug.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h debug.$(OBJEXT): {$(VPATH)}config.h +debug.$(OBJEXT): {$(VPATH)}darray.h debug.$(OBJEXT): {$(VPATH)}debug.c debug.$(OBJEXT): {$(VPATH)}debug_counter.h debug.$(OBJEXT): {$(VPATH)}defines.h @@ -5055,6 +5061,7 @@ error.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h error.$(OBJEXT): {$(VPATH)}builtin.h error.$(OBJEXT): {$(VPATH)}config.h error.$(OBJEXT): {$(VPATH)}constant.h +error.$(OBJEXT): {$(VPATH)}darray.h error.$(OBJEXT): {$(VPATH)}defines.h error.$(OBJEXT): {$(VPATH)}encoding.h error.$(OBJEXT): {$(VPATH)}error.c @@ -5257,6 +5264,7 @@ eval.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h eval.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h eval.$(OBJEXT): {$(VPATH)}config.h eval.$(OBJEXT): {$(VPATH)}constant.h +eval.$(OBJEXT): {$(VPATH)}darray.h eval.$(OBJEXT): {$(VPATH)}debug_counter.h eval.$(OBJEXT): {$(VPATH)}defines.h eval.$(OBJEXT): {$(VPATH)}encoding.h @@ -5682,6 +5690,7 @@ gc.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h gc.$(OBJEXT): {$(VPATH)}builtin.h gc.$(OBJEXT): {$(VPATH)}config.h gc.$(OBJEXT): {$(VPATH)}constant.h +gc.$(OBJEXT): {$(VPATH)}darray.h gc.$(OBJEXT): {$(VPATH)}debug.h gc.$(OBJEXT): {$(VPATH)}debug_counter.h gc.$(OBJEXT): {$(VPATH)}defines.h @@ -5892,6 +5901,7 @@ golf_prelude.$(OBJEXT): {$(VPATH)}backward/2/long_long.h golf_prelude.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h golf_prelude.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h golf_prelude.$(OBJEXT): {$(VPATH)}config.h +golf_prelude.$(OBJEXT): {$(VPATH)}darray.h golf_prelude.$(OBJEXT): {$(VPATH)}defines.h golf_prelude.$(OBJEXT): {$(VPATH)}golf_prelude.c golf_prelude.$(OBJEXT): {$(VPATH)}golf_prelude.rb @@ -6819,6 +6829,7 @@ iseq.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h iseq.$(OBJEXT): {$(VPATH)}builtin.h iseq.$(OBJEXT): {$(VPATH)}config.h iseq.$(OBJEXT): {$(VPATH)}constant.h +iseq.$(OBJEXT): {$(VPATH)}darray.h iseq.$(OBJEXT): {$(VPATH)}debug_counter.h iseq.$(OBJEXT): {$(VPATH)}defines.h iseq.$(OBJEXT): {$(VPATH)}encoding.h @@ -7027,6 +7038,7 @@ load.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h load.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h load.$(OBJEXT): {$(VPATH)}config.h load.$(OBJEXT): {$(VPATH)}constant.h +load.$(OBJEXT): {$(VPATH)}darray.h load.$(OBJEXT): {$(VPATH)}defines.h load.$(OBJEXT): {$(VPATH)}dln.h load.$(OBJEXT): {$(VPATH)}encoding.h @@ -8233,6 +8245,7 @@ miniinit.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h miniinit.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h miniinit.$(OBJEXT): {$(VPATH)}builtin.h miniinit.$(OBJEXT): {$(VPATH)}config.h +miniinit.$(OBJEXT): {$(VPATH)}darray.h miniinit.$(OBJEXT): {$(VPATH)}defines.h miniinit.$(OBJEXT): {$(VPATH)}dir.rb miniinit.$(OBJEXT): {$(VPATH)}encoding.h @@ -8472,6 +8485,7 @@ mjit.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h mjit.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h mjit.$(OBJEXT): {$(VPATH)}config.h mjit.$(OBJEXT): {$(VPATH)}constant.h +mjit.$(OBJEXT): {$(VPATH)}darray.h mjit.$(OBJEXT): {$(VPATH)}debug.h mjit.$(OBJEXT): {$(VPATH)}debug_counter.h mjit.$(OBJEXT): {$(VPATH)}defines.h @@ -8688,6 +8702,7 @@ mjit_compile.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h mjit_compile.$(OBJEXT): {$(VPATH)}builtin.h mjit_compile.$(OBJEXT): {$(VPATH)}config.h mjit_compile.$(OBJEXT): {$(VPATH)}constant.h +mjit_compile.$(OBJEXT): {$(VPATH)}darray.h mjit_compile.$(OBJEXT): {$(VPATH)}debug_counter.h mjit_compile.$(OBJEXT): {$(VPATH)}defines.h mjit_compile.$(OBJEXT): {$(VPATH)}id.h @@ -8885,6 +8900,7 @@ node.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h node.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h node.$(OBJEXT): {$(VPATH)}config.h node.$(OBJEXT): {$(VPATH)}constant.h +node.$(OBJEXT): {$(VPATH)}darray.h node.$(OBJEXT): {$(VPATH)}defines.h node.$(OBJEXT): {$(VPATH)}id.h node.$(OBJEXT): {$(VPATH)}id_table.h @@ -9865,6 +9881,7 @@ proc.$(OBJEXT): {$(VPATH)}backward/2/long_long.h proc.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h proc.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h proc.$(OBJEXT): {$(VPATH)}config.h +proc.$(OBJEXT): {$(VPATH)}darray.h proc.$(OBJEXT): {$(VPATH)}defines.h proc.$(OBJEXT): {$(VPATH)}encoding.h proc.$(OBJEXT): {$(VPATH)}eval_intern.h @@ -10068,6 +10085,7 @@ process.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h process.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h process.$(OBJEXT): {$(VPATH)}config.h process.$(OBJEXT): {$(VPATH)}constant.h +process.$(OBJEXT): {$(VPATH)}darray.h process.$(OBJEXT): {$(VPATH)}debug_counter.h process.$(OBJEXT): {$(VPATH)}defines.h process.$(OBJEXT): {$(VPATH)}dln.h @@ -10275,6 +10293,7 @@ ractor.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h ractor.$(OBJEXT): {$(VPATH)}builtin.h ractor.$(OBJEXT): {$(VPATH)}config.h ractor.$(OBJEXT): {$(VPATH)}constant.h +ractor.$(OBJEXT): {$(VPATH)}darray.h ractor.$(OBJEXT): {$(VPATH)}debug.h ractor.$(OBJEXT): {$(VPATH)}debug_counter.h ractor.$(OBJEXT): {$(VPATH)}defines.h @@ -12203,6 +12222,7 @@ ruby.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h ruby.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h ruby.$(OBJEXT): {$(VPATH)}config.h ruby.$(OBJEXT): {$(VPATH)}constant.h +ruby.$(OBJEXT): {$(VPATH)}darray.h ruby.$(OBJEXT): {$(VPATH)}debug_counter.h ruby.$(OBJEXT): {$(VPATH)}defines.h ruby.$(OBJEXT): {$(VPATH)}dln.h @@ -12397,6 +12417,7 @@ scheduler.$(OBJEXT): {$(VPATH)}backward/2/long_long.h scheduler.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h scheduler.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h scheduler.$(OBJEXT): {$(VPATH)}config.h +scheduler.$(OBJEXT): {$(VPATH)}darray.h scheduler.$(OBJEXT): {$(VPATH)}defines.h scheduler.$(OBJEXT): {$(VPATH)}encoding.h scheduler.$(OBJEXT): {$(VPATH)}id.h @@ -12749,6 +12770,7 @@ signal.$(OBJEXT): {$(VPATH)}backward/2/long_long.h signal.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h signal.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h signal.$(OBJEXT): {$(VPATH)}config.h +signal.$(OBJEXT): {$(VPATH)}darray.h signal.$(OBJEXT): {$(VPATH)}debug_counter.h signal.$(OBJEXT): {$(VPATH)}defines.h signal.$(OBJEXT): {$(VPATH)}encoding.h @@ -13698,6 +13720,7 @@ struct.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h struct.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h struct.$(OBJEXT): {$(VPATH)}builtin.h struct.$(OBJEXT): {$(VPATH)}config.h +struct.$(OBJEXT): {$(VPATH)}darray.h struct.$(OBJEXT): {$(VPATH)}defines.h struct.$(OBJEXT): {$(VPATH)}encoding.h struct.$(OBJEXT): {$(VPATH)}id.h @@ -14087,6 +14110,7 @@ thread.$(OBJEXT): {$(VPATH)}backward/2/long_long.h thread.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h thread.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h thread.$(OBJEXT): {$(VPATH)}config.h +thread.$(OBJEXT): {$(VPATH)}darray.h thread.$(OBJEXT): {$(VPATH)}debug.h thread.$(OBJEXT): {$(VPATH)}debug_counter.h thread.$(OBJEXT): {$(VPATH)}defines.h @@ -14851,6 +14875,7 @@ ujit_codegen.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h ujit_codegen.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h ujit_codegen.$(OBJEXT): {$(VPATH)}builtin.h ujit_codegen.$(OBJEXT): {$(VPATH)}config.h +ujit_codegen.$(OBJEXT): {$(VPATH)}darray.h ujit_codegen.$(OBJEXT): {$(VPATH)}debug_counter.h ujit_codegen.$(OBJEXT): {$(VPATH)}defines.h ujit_codegen.$(OBJEXT): {$(VPATH)}id.h @@ -15242,6 +15267,7 @@ ujit_core.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h ujit_core.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h ujit_core.$(OBJEXT): {$(VPATH)}builtin.h ujit_core.$(OBJEXT): {$(VPATH)}config.h +ujit_core.$(OBJEXT): {$(VPATH)}darray.h ujit_core.$(OBJEXT): {$(VPATH)}debug_counter.h ujit_core.$(OBJEXT): {$(VPATH)}defines.h ujit_core.$(OBJEXT): {$(VPATH)}id.h @@ -15439,6 +15465,7 @@ ujit_iface.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h ujit_iface.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h ujit_iface.$(OBJEXT): {$(VPATH)}builtin.h ujit_iface.$(OBJEXT): {$(VPATH)}config.h +ujit_iface.$(OBJEXT): {$(VPATH)}darray.h ujit_iface.$(OBJEXT): {$(VPATH)}debug_counter.h ujit_iface.$(OBJEXT): {$(VPATH)}defines.h ujit_iface.$(OBJEXT): {$(VPATH)}id.h @@ -15819,6 +15846,7 @@ variable.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h variable.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h variable.$(OBJEXT): {$(VPATH)}config.h variable.$(OBJEXT): {$(VPATH)}constant.h +variable.$(OBJEXT): {$(VPATH)}darray.h variable.$(OBJEXT): {$(VPATH)}debug_counter.h variable.$(OBJEXT): {$(VPATH)}defines.h variable.$(OBJEXT): {$(VPATH)}encoding.h @@ -16017,6 +16045,7 @@ version.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h version.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h version.$(OBJEXT): {$(VPATH)}builtin.h version.$(OBJEXT): {$(VPATH)}config.h +version.$(OBJEXT): {$(VPATH)}darray.h version.$(OBJEXT): {$(VPATH)}debug_counter.h version.$(OBJEXT): {$(VPATH)}defines.h version.$(OBJEXT): {$(VPATH)}id.h @@ -16229,6 +16258,7 @@ vm.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h vm.$(OBJEXT): {$(VPATH)}builtin.h vm.$(OBJEXT): {$(VPATH)}config.h vm.$(OBJEXT): {$(VPATH)}constant.h +vm.$(OBJEXT): {$(VPATH)}darray.h vm.$(OBJEXT): {$(VPATH)}debug_counter.h vm.$(OBJEXT): {$(VPATH)}defines.h vm.$(OBJEXT): {$(VPATH)}defs/opt_operand.def @@ -16445,6 +16475,7 @@ vm_backtrace.$(OBJEXT): {$(VPATH)}backward/2/long_long.h vm_backtrace.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h vm_backtrace.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h vm_backtrace.$(OBJEXT): {$(VPATH)}config.h +vm_backtrace.$(OBJEXT): {$(VPATH)}darray.h vm_backtrace.$(OBJEXT): {$(VPATH)}debug.h vm_backtrace.$(OBJEXT): {$(VPATH)}defines.h vm_backtrace.$(OBJEXT): {$(VPATH)}encoding.h @@ -16636,6 +16667,7 @@ vm_dump.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h vm_dump.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h vm_dump.$(OBJEXT): {$(VPATH)}config.h vm_dump.$(OBJEXT): {$(VPATH)}constant.h +vm_dump.$(OBJEXT): {$(VPATH)}darray.h vm_dump.$(OBJEXT): {$(VPATH)}defines.h vm_dump.$(OBJEXT): {$(VPATH)}gc.h vm_dump.$(OBJEXT): {$(VPATH)}id.h @@ -16827,6 +16859,7 @@ vm_sync.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h vm_sync.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h vm_sync.$(OBJEXT): {$(VPATH)}config.h vm_sync.$(OBJEXT): {$(VPATH)}constant.h +vm_sync.$(OBJEXT): {$(VPATH)}darray.h vm_sync.$(OBJEXT): {$(VPATH)}debug_counter.h vm_sync.$(OBJEXT): {$(VPATH)}defines.h vm_sync.$(OBJEXT): {$(VPATH)}gc.h @@ -17027,6 +17060,7 @@ vm_trace.$(OBJEXT): {$(VPATH)}backward/2/stdalign.h vm_trace.$(OBJEXT): {$(VPATH)}backward/2/stdarg.h vm_trace.$(OBJEXT): {$(VPATH)}builtin.h vm_trace.$(OBJEXT): {$(VPATH)}config.h +vm_trace.$(OBJEXT): {$(VPATH)}darray.h vm_trace.$(OBJEXT): {$(VPATH)}debug.h vm_trace.$(OBJEXT): {$(VPATH)}debug_counter.h vm_trace.$(OBJEXT): {$(VPATH)}defines.h diff --git a/dary.h b/darray.h similarity index 55% rename from dary.h rename to darray.h index 33eaacd3ce9219..c7e352cded975f 100644 --- a/dary.h +++ b/darray.h @@ -1,5 +1,5 @@ -#ifndef RUBY_DARY_H -#define RUBY_DARY_H +#ifndef RUBY_DARRAY_H +#define RUBY_DARRAY_H #include #include @@ -12,39 +12,39 @@ // NULL is a valid empty dynamic array. // // Example: -// rb_dary(char) char_array = NULL; -// if (!rb_dary_append(&char_array, 'e')) abort(); -// printf("pushed %c\n", *rb_dary_ref(char_array, 0)); -// rb_dary_free(char_array); +// rb_darray(char) char_array = NULL; +// if (!rb_darray_append(&char_array, 'e')) abort(); +// printf("pushed %c\n", *rb_darray_ref(char_array, 0)); +// rb_darray_free(char_array); // -#define rb_dary(T) struct { rb_dary_meta_t meta; T data[]; } * +#define rb_darray(T) struct { rb_darray_meta_t meta; T data[]; } * // Copy an element out of the array. Warning: not bounds checked. // -// T rb_dary_get(rb_dary(T) ary, int32_t idx); +// T rb_darray_get(rb_darray(T) ary, int32_t idx); // -#define rb_dary_get(ary, idx) ((ary)->data[(idx)]) +#define rb_darray_get(ary, idx) ((ary)->data[(idx)]) // Assign to an element. Warning: not bounds checked. // -// void rb_dary_set(rb_dary(T) ary, int32_t idx, T element); +// void rb_darray_set(rb_darray(T) ary, int32_t idx, T element); // -#define rb_dary_set(ary, idx, element) ((ary)->data[(idx)] = (element)) +#define rb_darray_set(ary, idx, element) ((ary)->data[(idx)] = (element)) // Get a pointer to an element. Warning: not bounds checked. // -// T *rb_dary_ref(rb_dary(T) ary, int32_t idx); +// T *rb_darray_ref(rb_darray(T) ary, int32_t idx); // -#define rb_dary_ref(ary, idx) (&((ary)->data[(idx)])) +#define rb_darray_ref(ary, idx) (&((ary)->data[(idx)])) // Copy a new element into the array. Return 1 on success and 0 on failure. // ptr_to_ary is evaluated multiple times. // -// bool rb_dary_append(rb_dary(T) *ptr_to_ary, T element); +// bool rb_darray_append(rb_darray(T) *ptr_to_ary, T element); // -#define rb_dary_append(ptr_to_ary, element) ( \ - rb_dary_ensure_space((ptr_to_ary)) ? ( \ - rb_dary_set(*(ptr_to_ary), \ +#define rb_darray_append(ptr_to_ary, element) ( \ + rb_darray_ensure_space((ptr_to_ary)) ? ( \ + rb_darray_set(*(ptr_to_ary), \ (*(ptr_to_ary))->meta.size, \ (element)), \ ++((*(ptr_to_ary))->meta.size), \ @@ -53,59 +53,59 @@ // Iterate over items of the array in a for loop // -#define rb_dary_foreach(ary, idx_name, elem_ptr_var) \ - for (int idx_name = 0; idx_name < rb_dary_size(ary) && ((elem_ptr_var) = rb_dary_ref(ary, idx_name)); ++idx_name) +#define rb_darray_foreach(ary, idx_name, elem_ptr_var) \ + for (int idx_name = 0; idx_name < rb_darray_size(ary) && ((elem_ptr_var) = rb_darray_ref(ary, idx_name)); ++idx_name) -typedef struct rb_dary_meta { +typedef struct rb_darray_meta { int32_t size; int32_t capa; -} rb_dary_meta_t; +} rb_darray_meta_t; // Get the size of the dynamic array. // static inline int32_t -rb_dary_size(const void *ary) +rb_darray_size(const void *ary) { - const rb_dary_meta_t *meta = ary; + const rb_darray_meta_t *meta = ary; return meta ? meta->size : 0; } // Get the capacity of the dynamic array. // static inline int32_t -rb_dary_capa(const void *ary) +rb_darray_capa(const void *ary) { - const rb_dary_meta_t *meta = ary; + const rb_darray_meta_t *meta = ary; return meta ? meta->capa : 0; } // Free the dynamic array. // static inline void -rb_dary_free(void *ary) +rb_darray_free(void *ary) { free(ary); } // Remove the last element of the array. // -#define rb_dary_pop_back(ary) ((ary)->meta.size--) +#define rb_darray_pop_back(ary) ((ary)->meta.size--) // Internal macro // Ensure there is space for one more element. Return 1 on success and 0 on failure. // `ptr_to_ary` is evaluated multiple times. -#define rb_dary_ensure_space(ptr_to_ary) ( \ - (rb_dary_capa(*(ptr_to_ary)) > rb_dary_size(*(ptr_to_ary))) ? \ +#define rb_darray_ensure_space(ptr_to_ary) ( \ + (rb_darray_capa(*(ptr_to_ary)) > rb_darray_size(*(ptr_to_ary))) ? \ 1 : \ - rb_dary_double(ptr_to_ary, sizeof((*(ptr_to_ary))->data[0]))) + rb_darray_double(ptr_to_ary, sizeof((*(ptr_to_ary))->data[0]))) // Internal function static inline int -rb_dary_double(void *ptr_to_ary, size_t element_size) +rb_darray_double(void *ptr_to_ary, size_t element_size) { - rb_dary_meta_t **ptr_to_ptr_to_meta = ptr_to_ary; - const rb_dary_meta_t *meta = *ptr_to_ptr_to_meta; - int32_t current_capa = rb_dary_capa(meta); + rb_darray_meta_t **ptr_to_ptr_to_meta = ptr_to_ary; + const rb_darray_meta_t *meta = *ptr_to_ptr_to_meta; + int32_t current_capa = rb_darray_capa(meta); int32_t new_capa; // Calculate new capacity @@ -123,7 +123,7 @@ rb_dary_double(void *ptr_to_ary, size_t element_size) size_t new_buffer_size = element_size * (size_t)new_capa + sizeof(*meta); if (new_buffer_size <= current_buffer_size) return 0; - rb_dary_meta_t *doubled_ary = realloc(*ptr_to_ptr_to_meta, new_buffer_size); + rb_darray_meta_t *doubled_ary = realloc(*ptr_to_ptr_to_meta, new_buffer_size); if (!doubled_ary) return 0; if (meta == NULL) { @@ -138,4 +138,4 @@ rb_dary_double(void *ptr_to_ary, size_t element_size) return 1; } -#endif /* RUBY_DARY_H */ +#endif /* RUBY_DARRAY_H */ diff --git a/ext/coverage/depend b/ext/coverage/depend index 650b480b9b42c2..d12b449f5491f8 100644 --- a/ext/coverage/depend +++ b/ext/coverage/depend @@ -165,6 +165,7 @@ coverage.o: $(top_srcdir)/ccan/check_type/check_type.h coverage.o: $(top_srcdir)/ccan/container_of/container_of.h coverage.o: $(top_srcdir)/ccan/list/list.h coverage.o: $(top_srcdir)/ccan/str/str.h +coverage.o: $(top_srcdir)/darray.h coverage.o: $(top_srcdir)/gc.h coverage.o: $(top_srcdir)/internal.h coverage.o: $(top_srcdir)/internal/array.h diff --git a/ext/objspace/depend b/ext/objspace/depend index d3a702007e1168..cf372703da4ec7 100644 --- a/ext/objspace/depend +++ b/ext/objspace/depend @@ -525,6 +525,7 @@ objspace_dump.o: $(top_srcdir)/ccan/check_type/check_type.h objspace_dump.o: $(top_srcdir)/ccan/container_of/container_of.h objspace_dump.o: $(top_srcdir)/ccan/list/list.h objspace_dump.o: $(top_srcdir)/ccan/str/str.h +objspace_dump.o: $(top_srcdir)/darray.h objspace_dump.o: $(top_srcdir)/gc.h objspace_dump.o: $(top_srcdir)/internal.h objspace_dump.o: $(top_srcdir)/internal/array.h diff --git a/ujit_core.c b/ujit_core.c index bff69678ce015d..eb5b1e10f6fae2 100644 --- a/ujit_core.c +++ b/ujit_core.c @@ -148,10 +148,10 @@ static block_t * get_first_version(const rb_iseq_t *iseq, unsigned idx) { struct rb_iseq_constant_body *body = iseq->body; - if (rb_dary_size(body->ujit_blocks) == 0) { + if (rb_darray_size(body->ujit_blocks) == 0) { return NULL; } - return rb_dary_get(body->ujit_blocks, idx); + return rb_darray_get(body->ujit_blocks, idx); } // Add a block version to the map. Block should be fully constructed @@ -164,11 +164,11 @@ add_block_version(blockid_t blockid, block_t* block) struct rb_iseq_constant_body *body = iseq->body; // Ensure ujit_blocks is initialized - if (rb_dary_size(body->ujit_blocks) == 0) { + if (rb_darray_size(body->ujit_blocks) == 0) { // Initialize ujit_blocks to be as wide as body->iseq_encoded // TODO: add resize API for dary - while ((unsigned)rb_dary_size(body->ujit_blocks) < body->iseq_size) { - (void)rb_dary_append(&body->ujit_blocks, NULL); + while ((unsigned)rb_darray_size(body->ujit_blocks) < body->iseq_size) { + (void)rb_darray_append(&body->ujit_blocks, NULL); } } @@ -182,7 +182,7 @@ add_block_version(blockid_t blockid, block_t* block) } // Make new block the first version - rb_dary_set(body->ujit_blocks, blockid.idx, block); + rb_darray_set(body->ujit_blocks, blockid.idx, block); RUBY_ASSERT(find_block_version(blockid, &block->ctx) != NULL); { @@ -603,7 +603,7 @@ invalidate_block_version(block_t* block) // Remove references to this block if (first_block == block) { // Make the next block the new first version - rb_dary_set(iseq->body->ujit_blocks, block->blockid.idx, block->next); + rb_darray_set(iseq->body->ujit_blocks, block->blockid.idx, block->next); } else { bool deleted = false; diff --git a/ujit_iface.c b/ujit_iface.c index 519836cc61e761..031969efc5c2fc 100644 --- a/ujit_iface.c +++ b/ujit_iface.c @@ -14,7 +14,7 @@ #include "ujit_core.h" #include "ujit_hooks.inc" #include "ujit.rbinc" -#include "dary.h" +#include "darray.h" #if HAVE_LIBCAPSTONE #include @@ -135,7 +135,7 @@ struct compiled_region { block_t *block; }; -typedef rb_dary(struct compiled_region) block_array_t; +typedef rb_darray(struct compiled_region) block_array_t; static int add_lookup_dependency_i(st_data_t *key, st_data_t *value, st_data_t data, int existing) @@ -146,7 +146,7 @@ add_lookup_dependency_i(st_data_t *key, st_data_t *value, st_data_t data, int ex if (existing) { regions = (block_array_t )*value; } - if (!rb_dary_append(®ions, *region)) { + if (!rb_darray_append(®ions, *region)) { rb_bug("ujit: failed to add method lookup dependency"); // TODO: we could bail out of compiling instead } @@ -252,11 +252,11 @@ rb_ujit_method_lookup_change(VALUE cme_or_cc) block_array_t array = (void *)image; struct compiled_region *elem; - rb_dary_foreach(array, i, elem) { + rb_darray_foreach(array, i, elem) { invalidate_block_version(elem->block); } - rb_dary_free(array); + rb_darray_free(array); } RB_VM_LOCK_LEAVE(); @@ -272,19 +272,19 @@ remove_method_lookup_dependency(VALUE cc_or_cme, block_t *block) struct compiled_region *elem; // Find the block we are removing - rb_dary_foreach(array, i, elem) { + rb_darray_foreach(array, i, elem) { if (elem->block == block) { // Remove the current element by moving the last element here. // Order in the region array doesn't matter. - *elem = rb_dary_get(array, rb_dary_size(array) - 1); - rb_dary_pop_back(array); + *elem = rb_darray_get(array, rb_darray_size(array) - 1); + rb_darray_pop_back(array); break; } } - if (rb_dary_size(array) == 0) { + if (rb_darray_size(array) == 0) { st_delete(method_lookup_dependency, &key, NULL); - rb_dary_free(array); + rb_darray_free(array); } } } @@ -336,7 +336,7 @@ ujit_blocks_for(VALUE mod, VALUE rb_iseq) block_t **element; VALUE all_versions = rb_ary_new(); - rb_dary_foreach(iseq->body->ujit_blocks, idx, element) { + rb_darray_foreach(iseq->body->ujit_blocks, idx, element) { for (block_t *version = *element; version; version = version->next) { VALUE rb_block = TypedData_Wrap_Struct(cUjitBlock, &ujit_block_type, version); rb_ary_push(all_versions, rb_block); @@ -554,7 +554,7 @@ void rb_ujit_iseq_mark(const struct rb_iseq_constant_body *body) { block_t **element; - rb_dary_foreach(body->ujit_blocks, idx, element) { + rb_darray_foreach(body->ujit_blocks, idx, element) { for (block_t *block = *element; block; block = block->next) { rb_gc_mark_movable((VALUE)block->blockid.iseq); @@ -569,7 +569,7 @@ void rb_ujit_iseq_update_references(const struct rb_iseq_constant_body *body) { block_t **element; - rb_dary_foreach(body->ujit_blocks, idx, element) { + rb_darray_foreach(body->ujit_blocks, idx, element) { for (block_t *block = *element; block; block = block->next) { block->blockid.iseq = (const rb_iseq_t *)rb_gc_location((VALUE)block->blockid.iseq); @@ -584,7 +584,7 @@ void rb_ujit_iseq_free(const struct rb_iseq_constant_body *body) { block_t **element; - rb_dary_foreach(body->ujit_blocks, idx, element) { + rb_darray_foreach(body->ujit_blocks, idx, element) { block_t *block = *element; while (block) { block_t *next = block->next; diff --git a/vm_core.h b/vm_core.h index 390280a07e996f..c6b4024e8e0925 100644 --- a/vm_core.h +++ b/vm_core.h @@ -77,7 +77,7 @@ #include "ruby/st.h" #include "ruby_atomic.h" #include "vm_opts.h" -#include "dary.h" +#include "darray.h" #include "ruby/thread_native.h" #if defined(_WIN32) @@ -303,7 +303,7 @@ pathobj_realpath(VALUE pathobj) /* Forward declarations */ struct rb_mjit_unit; -typedef rb_dary(struct ujit_block_version *) rb_ujit_block_array_t; +typedef rb_darray(struct ujit_block_version *) rb_ujit_block_array_t; struct rb_iseq_constant_body { enum iseq_type { From d677fe0b118f37f36e077de53dced99ff0c35c63 Mon Sep 17 00:00:00 2001 From: Alan Wu Date: Fri, 12 Feb 2021 15:03:26 -0500 Subject: [PATCH 6/7] free ujit_blocks --- ujit_iface.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/ujit_iface.c b/ujit_iface.c index 031969efc5c2fc..e25d515d174f43 100644 --- a/ujit_iface.c +++ b/ujit_iface.c @@ -592,6 +592,8 @@ rb_ujit_iseq_free(const struct rb_iseq_constant_body *body) block = next; } } + + rb_darray_free(body->ujit_blocks); } void From c3cd250d49dfeee8bc78d81af9d5642d03384138 Mon Sep 17 00:00:00 2001 From: Alan Wu Date: Fri, 12 Feb 2021 15:48:56 -0500 Subject: [PATCH 7/7] comment about size of ujit_blocks --- ujit_core.c | 1 + vm_core.h | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/ujit_core.c b/ujit_core.c index eb5b1e10f6fae2..0d743c3d89f758 100644 --- a/ujit_core.c +++ b/ujit_core.c @@ -151,6 +151,7 @@ get_first_version(const rb_iseq_t *iseq, unsigned idx) if (rb_darray_size(body->ujit_blocks) == 0) { return NULL; } + RUBY_ASSERT((unsigned)rb_darray_size(body->ujit_blocks) == body->iseq_size); return rb_darray_get(body->ujit_blocks, idx); } diff --git a/vm_core.h b/vm_core.h index c6b4024e8e0925..116070abbc46a4 100644 --- a/vm_core.h +++ b/vm_core.h @@ -441,7 +441,7 @@ struct rb_iseq_constant_body { struct rb_mjit_unit *jit_unit; #endif - rb_ujit_block_array_t ujit_blocks; + rb_ujit_block_array_t ujit_blocks; // empty, or has a size equal to iseq_size }; /* T_IMEMO/iseq */