From 54dafedc2a52c46974ec98c57d85e758662ae6e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lourens=20Naud=C3=A9?= Date: Wed, 11 Dec 2019 17:11:55 +0000 Subject: [PATCH 01/14] Let rb_call_cache also track method arity and the flags set on rb_call_info to group those concerns into a single cache line as well --- internal.h | 7 ++--- mjit_compile.c | 2 +- vm_core.h | 4 +-- vm_eval.c | 2 +- vm_insnhelper.c | 68 +++++++++++++++++++++++++++++-------------------- vm_insnhelper.h | 13 +++++----- 6 files changed, 55 insertions(+), 41 deletions(-) diff --git a/internal.h b/internal.h index 77101d0195a8de..f91f5afcd38b60 100644 --- a/internal.h +++ b/internal.h @@ -2359,7 +2359,8 @@ struct rb_call_cache { struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, struct rb_call_data *cd); - + short int orig_argc; + short int flag; union { unsigned int index; /* used by ivar */ enum method_missing_reason method_missing_reason; /* used by method_missing */ @@ -2369,8 +2370,8 @@ STATIC_ASSERT(cachelined, sizeof(struct rb_call_cache) <= CACHELINE); struct rb_call_info { /* fixed at compile time */ ID mid; - unsigned int flag; - int orig_argc; + unsigned short int flag; + short int orig_argc; }; struct rb_call_data { struct rb_call_cache cc; diff --git a/mjit_compile.c b/mjit_compile.c index f379a896a8782c..b985149552a272 100644 --- a/mjit_compile.c +++ b/mjit_compile.c @@ -100,7 +100,7 @@ fastpath_applied_iseq_p(const CALL_INFO ci, const CALL_CACHE cc, const rb_iseq_t return iseq != NULL && !(ci->flag & VM_CALL_KW_SPLAT) && rb_simple_iseq_p(iseq) // Top of vm_callee_setup_arg. In this case, opt_pc is 0. && ci->orig_argc == iseq->body->param.lead_num // exclude argument_arity_error (assumption: `calling->argc == ci->orig_argc` in send insns) - && vm_call_iseq_optimizable_p(ci, cc); // CC_SET_FASTPATH condition + && vm_call_iseq_optimizable_p(cc); // CC_SET_FASTPATH condition } static int diff --git a/vm_core.h b/vm_core.h index 4b1b9e43d02604..9e6a352c033709 100644 --- a/vm_core.h +++ b/vm_core.h @@ -250,8 +250,8 @@ struct rb_call_info_with_kwarg { struct rb_calling_info { VALUE block_handler; VALUE recv; - int argc; - int kw_splat; + short int argc; + short int kw_splat; }; struct rb_kwarg_call_data { diff --git a/vm_eval.c b/vm_eval.c index b0179423d88728..d9180149ef3a1f 100644 --- a/vm_eval.c +++ b/vm_eval.c @@ -47,7 +47,7 @@ rb_vm_call0(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE { struct rb_calling_info calling = { Qundef, recv, argc, kw_splat, }; struct rb_call_info ci = { id, (kw_splat ? VM_CALL_KW_SPLAT : 0), argc, }; - struct rb_call_cache cc = { 0, { 0, }, me, me->def, vm_call_general, { 0, }, }; + struct rb_call_cache cc = { 0, { 0, }, me, me->def, vm_call_general, argc, ci.flag, { 0, }, }; struct rb_call_data cd = { cc, ci, }; return vm_call0_body(ec, &calling, &cd, argv); } diff --git a/vm_insnhelper.c b/vm_insnhelper.c index c8ea3f9b1bb6fd..22978e8499ca1a 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -1452,7 +1452,7 @@ calccall(const struct rb_call_data *cd, const rb_callable_method_entry_t *me) * formerly-private method now publicised is an absolutely safe thing. * Calling a private method without specifying a receiver is also safe. */ else if ((METHOD_ENTRY_VISI(cc->me) != METHOD_VISI_PUBLIC) && - !(ci->flag & VM_CALL_FCALL)) { + !(cc->flag & VM_CALL_FCALL)) { RB_DEBUG_COUNTER_INC(mc_miss_by_visi); return vm_call_general; } @@ -1476,6 +1476,9 @@ rb_vm_search_method_slowpath(struct rb_call_data *cd, VALUE klass) me, me ? me->def : NULL, call, + // Cached on rb_call_cache to fit into the same cache line + ci->orig_argc, + ci->flag }; if (call != vm_call_general) { for (int i = 0; i < numberof(cc->class_serial) - 1; i++) { @@ -1498,6 +1501,8 @@ rb_vm_search_method_slowpath(struct rb_call_data *cd, VALUE klass) * rb_callable_method_entry_t *me; * rb_method_definition_struct *def; * vm_call_handler call; + * short int orig_argc; + * short int flag; * union { ... snip ... } aux; * }; * ``` @@ -2112,7 +2117,7 @@ vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t const struct rb_call_info *MAYBE_UNUSED(ci) = &cd->ci; const struct rb_call_cache *cc = &cd->cc; - VM_ASSERT((ci->flag & VM_CALL_KWARG) == 0); + VM_ASSERT((cc->flag & VM_CALL_KWARG) == 0); RB_DEBUG_COUNTER_INC(ccf_iseq_kw2); const rb_iseq_t *iseq = def_iseq_ptr(cc->me->def); @@ -2141,7 +2146,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, const struct rb_call_info *ci = &cd->ci; struct rb_call_cache *cc = &cd->cc; - if (LIKELY(!(ci->flag & VM_CALL_KW_SPLAT))) { + if (LIKELY(!(cc->flag & VM_CALL_KW_SPLAT))) { if (LIKELY(rb_simple_iseq_p(iseq))) { rb_control_frame_t *cfp = ec->cfp; CALLER_SETUP_ARG(cfp, calling, ci); @@ -2151,7 +2156,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num); } - CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), vm_call_iseq_optimizable_p(&cd->ci, &cd->cc)); + CC_SET_FASTPATH(cc, vm_call_iseq_setup_func(ci, param_size, local_size), vm_call_iseq_optimizable_p(&cd->cc)); return 0; } else if (rb_iseq_only_optparam_p(iseq)) { @@ -2168,14 +2173,14 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, argument_arity_error(ec, iseq, argc, lead_num, lead_num + opt_num); } - if (LIKELY(!(ci->flag & VM_CALL_TAILCALL))) { + if (LIKELY(!(cc->flag & VM_CALL_TAILCALL))) { CC_SET_FASTPATH(cc, vm_call_iseq_setup_normal_opt_start, - !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) && + !IS_ARGS_SPLAT(cc) && !IS_ARGS_KEYWORD(cc) && !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED)); } else { CC_SET_FASTPATH(cc, vm_call_iseq_setup_tailcall_opt_start, - !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) && + !IS_ARGS_SPLAT(cc) && !IS_ARGS_KEYWORD(cc) && !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED)); } @@ -2186,12 +2191,12 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, } return (int)iseq->body->param.opt_table[opt]; } - else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(ci)) { + else if (rb_iseq_only_kwparam_p(iseq) && !IS_ARGS_SPLAT(cc)) { const int lead_num = iseq->body->param.lead_num; const int argc = calling->argc; const struct rb_iseq_param_keyword *kw_param = iseq->body->param.keyword; - if (ci->flag & VM_CALL_KWARG) { + if (cc->flag & VM_CALL_KWARG) { const struct rb_call_info_kw_arg *kw_arg = ((struct rb_call_info_with_kwarg *)ci)->kw_arg; if (argc - kw_arg->keyword_len == lead_num) { @@ -2248,7 +2253,7 @@ vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct const struct rb_call_info *ci = &cd->ci; const struct rb_call_cache *cc = &cd->cc; - if (LIKELY(!(ci->flag & VM_CALL_TAILCALL))) { + if (LIKELY(!(cc->flag & VM_CALL_TAILCALL))) { return vm_call_iseq_setup_normal(ec, cfp, calling, cc->me, opt_pc, param_size, local_size); } else { @@ -2590,12 +2595,12 @@ vm_call_bmethod(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_c } static enum method_missing_reason -ci_missing_reason(const struct rb_call_info *ci) +cc_missing_reason(const struct rb_call_cache *cc) { enum method_missing_reason stat = MISSING_NOENTRY; - if (ci->flag & VM_CALL_VCALL) stat |= MISSING_VCALL; - if (ci->flag & VM_CALL_FCALL) stat |= MISSING_FCALL; - if (ci->flag & VM_CALL_SUPER) stat |= MISSING_SUPER; + if (cc->flag & VM_CALL_VCALL) stat |= MISSING_VCALL; + if (cc->flag & VM_CALL_FCALL) stat |= MISSING_FCALL; + if (cc->flag & VM_CALL_SUPER) stat |= MISSING_SUPER; return stat; } @@ -2644,7 +2649,7 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct } TOPN(i) = rb_str_intern(sym); ci->mid = idMethodMissing; - ec->method_missing_reason = cc->aux.method_missing_reason = ci_missing_reason(ci); + ec->method_missing_reason = cc->aux.method_missing_reason = cc_missing_reason(cc); } else { /* shift arguments */ @@ -2656,7 +2661,7 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct } cc->me = rb_callable_method_entry_with_refinements(CLASS_OF(calling->recv), ci->mid, NULL); - ci->flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0); + cc->flag = ci->flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0); return vm_call_method(ec, reg_cfp, calling, (CALL_DATA)&cd); } @@ -2727,6 +2732,9 @@ vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, cd.cc.me = rb_callable_method_entry_without_refinements(CLASS_OF(calling->recv), idMethodMissing, NULL); + // Cached on rb_call_cache to fit into the same cache line + cd.cc.flag = cd.ci.flag; + cd.cc.orig_argc = argc; calling->argc = argc; @@ -2934,7 +2942,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st rb_check_arity(calling->argc, 1, 1); cc->aux.index = 0; - CC_SET_FASTPATH(cc, vm_call_attrset, !((ci->flag & VM_CALL_ARGS_SPLAT) || (ci->flag & VM_CALL_KWARG))); + CC_SET_FASTPATH(cc, vm_call_attrset, !((cc->flag & VM_CALL_ARGS_SPLAT) || (cc->flag & VM_CALL_KWARG))); return vm_call_attrset(ec, cfp, calling, cd); case VM_METHOD_TYPE_IVAR: @@ -2942,7 +2950,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci); rb_check_arity(calling->argc, 0, 0); cc->aux.index = 0; - CC_SET_FASTPATH(cc, vm_call_ivar, !(ci->flag & VM_CALL_ARGS_SPLAT)); + CC_SET_FASTPATH(cc, vm_call_ivar, !(cc->flag & VM_CALL_ARGS_SPLAT)); return vm_call_ivar(ec, cfp, calling, cd); case VM_METHOD_TYPE_MISSING: @@ -2999,7 +3007,7 @@ vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct /* method missing */ const struct rb_call_info *ci = &cd->ci; struct rb_call_cache *cc = &cd->cc; - const int stat = ci_missing_reason(ci); + const int stat = cc_missing_reason(cc); if (ci->mid == idMethodMissing) { rb_control_frame_t *reg_cfp = cfp; @@ -3027,9 +3035,9 @@ vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca return vm_call_method_each_type(ec, cfp, calling, cd); case METHOD_VISI_PRIVATE: - if (!(ci->flag & VM_CALL_FCALL)) { + if (!(cc->flag & VM_CALL_FCALL)) { enum method_missing_reason stat = MISSING_PRIVATE; - if (ci->flag & VM_CALL_VCALL) stat |= MISSING_VCALL; + if (cc->flag & VM_CALL_VCALL) stat |= MISSING_VCALL; cc->aux.method_missing_reason = stat; CC_SET_FASTPATH(cc, vm_call_method_missing, TRUE); @@ -3038,7 +3046,7 @@ vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca return vm_call_method_each_type(ec, cfp, calling, cd); case METHOD_VISI_PROTECTED: - if (!(ci->flag & VM_CALL_OPT_SEND)) { + if (!(cc->flag & VM_CALL_OPT_SEND)) { if (!rb_obj_is_kind_of(cfp->self, cc->me->defined_class)) { cc->aux.method_missing_reason = MISSING_PROTECTED; return vm_call_method_missing(ec, cfp, calling, cd); @@ -3046,7 +3054,7 @@ vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca else { /* caching method info to dummy cc */ VM_ASSERT(cc->me != NULL); - if (ci->flag & VM_CALL_KWARG) { + if (cc->flag & VM_CALL_KWARG) { struct rb_kwarg_call_data *kcd = (void *)cd; struct rb_kwarg_call_data cd_entry = *kcd; return vm_call_method_each_type(ec, cfp, calling, (void *)&cd_entry); @@ -3138,7 +3146,7 @@ vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *c rb_obj_class(recv), m); } - if (me->def->type == VM_METHOD_TYPE_BMETHOD && (ci->flag & VM_CALL_ZSUPER)) { + if (me->def->type == VM_METHOD_TYPE_BMETHOD && (cc->flag & VM_CALL_ZSUPER)) { rb_raise(rb_eRuntimeError, "implicit argument passing of super from method defined" " by define_method() is not supported." @@ -4004,15 +4012,19 @@ vm_sendish( struct rb_call_data *cd, VALUE recv)) { - CALL_INFO ci = &cd->ci; - CALL_CACHE cc = &cd->cc; + const CALL_INFO ci = &cd->ci; + const CALL_CACHE cc = &cd->cc; VALUE val; - int argc = ci->orig_argc; + if (UNLIKELY(cc->me == NULL)) { + cc->orig_argc = ci->orig_argc; + cc->flag = ci->flag; + } + int argc = cc->orig_argc; VALUE recv = TOPN(argc); struct rb_calling_info calling; calling.block_handler = block_handler; - calling.kw_splat = IS_ARGS_KW_SPLAT(ci) > 0; + calling.kw_splat = IS_ARGS_KW_SPLAT(cc) > 0; calling.recv = recv; calling.argc = argc; diff --git a/vm_insnhelper.h b/vm_insnhelper.h index 99555fd4ed1d71..ecbac1f71ed8b2 100644 --- a/vm_insnhelper.h +++ b/vm_insnhelper.h @@ -238,17 +238,18 @@ THROW_DATA_CONSUMED_SET(struct vm_throw_data *obj) } } -#define IS_ARGS_SPLAT(ci) ((ci)->flag & VM_CALL_ARGS_SPLAT) -#define IS_ARGS_KEYWORD(ci) ((ci)->flag & VM_CALL_KWARG) -#define IS_ARGS_KW_SPLAT(ci) ((ci)->flag & VM_CALL_KW_SPLAT) -#define IS_ARGS_KW_OR_KW_SPLAT(ci) ((ci)->flag & (VM_CALL_KWARG | VM_CALL_KW_SPLAT)) +// Macro argument can be either rb_call_info or rb_call_cache +#define IS_ARGS_SPLAT(c) ((c)->flag & VM_CALL_ARGS_SPLAT) +#define IS_ARGS_KEYWORD(c) ((c)->flag & VM_CALL_KWARG) +#define IS_ARGS_KW_SPLAT(c) ((c)->flag & VM_CALL_KW_SPLAT) +#define IS_ARGS_KW_OR_KW_SPLAT(c) ((c)->flag & (VM_CALL_KWARG | VM_CALL_KW_SPLAT)) /* If this returns true, an optimized function returned by `vm_call_iseq_setup_func` can be used as a fastpath. */ static bool -vm_call_iseq_optimizable_p(const struct rb_call_info *ci, const struct rb_call_cache *cc) +vm_call_iseq_optimizable_p( const struct rb_call_cache *cc) { - return !IS_ARGS_SPLAT(ci) && !IS_ARGS_KEYWORD(ci) && + return !IS_ARGS_SPLAT(cc) && !IS_ARGS_KEYWORD(cc) && !(METHOD_ENTRY_VISI(cc->me) == METHOD_VISI_PROTECTED); } From 6740477f68f352bf2ef8f5415ad6eed3d281cde1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lourens=20Naud=C3=A9?= Date: Wed, 11 Dec 2019 17:38:52 +0000 Subject: [PATCH 02/14] Let the inline metchod cache miss branch due to method visibility change be unlikely (spurious is much more common from debug counters with Rails) --- vm_insnhelper.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/vm_insnhelper.c b/vm_insnhelper.c index 22978e8499ca1a..ccb808c0da6d26 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -1451,8 +1451,8 @@ calccall(const struct rb_call_data *cd, const rb_callable_method_entry_t *me) * explicit receiver" is the only situation we have to check here. A * formerly-private method now publicised is an absolutely safe thing. * Calling a private method without specifying a receiver is also safe. */ - else if ((METHOD_ENTRY_VISI(cc->me) != METHOD_VISI_PUBLIC) && - !(cc->flag & VM_CALL_FCALL)) { + else if (UNLIKELY((METHOD_ENTRY_VISI(cc->me) != METHOD_VISI_PUBLIC) && + !(cc->flag & VM_CALL_FCALL))) { RB_DEBUG_COUNTER_INC(mc_miss_by_visi); return vm_call_general; } From e1cc92265d8a76b1ed8afead6afa59da2d0740d8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lourens=20Naud=C3=A9?= Date: Wed, 11 Dec 2019 17:40:06 +0000 Subject: [PATCH 03/14] Oust rb_call_info local from calccall --- vm_insnhelper.c | 1 - 1 file changed, 1 deletion(-) diff --git a/vm_insnhelper.c b/vm_insnhelper.c index ccb808c0da6d26..bf2603a8849590 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -1432,7 +1432,6 @@ __attribute__((__artificial__)) static inline vm_call_handler calccall(const struct rb_call_data *cd, const rb_callable_method_entry_t *me) { - const struct rb_call_info *ci = &cd->ci; const struct rb_call_cache *cc = &cd->cc; if (UNLIKELY(!me)) { From 4a58a1a0c924f1062ff70cfcecd4924ea2bc0dbe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lourens=20Naud=C3=A9?= Date: Wed, 11 Dec 2019 17:43:01 +0000 Subject: [PATCH 04/14] Oust rb_call_info local from vm_call_iseq_setup_2 --- vm_insnhelper.c | 1 - 1 file changed, 1 deletion(-) diff --git a/vm_insnhelper.c b/vm_insnhelper.c index bf2603a8849590..1bc2eb963c05ab 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -2249,7 +2249,6 @@ static inline VALUE vm_call_iseq_setup_2(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd, int opt_pc, int param_size, int local_size) { - const struct rb_call_info *ci = &cd->ci; const struct rb_call_cache *cc = &cd->cc; if (LIKELY(!(cc->flag & VM_CALL_TAILCALL))) { From 1d9dd58ca584ae75016582b7b988f051edeb0ced Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lourens=20Naud=C3=A9?= Date: Wed, 11 Dec 2019 17:49:17 +0000 Subject: [PATCH 05/14] Oust rb_call_info local from vm_call_method --- vm_insnhelper.c | 1 - 1 file changed, 1 deletion(-) diff --git a/vm_insnhelper.c b/vm_insnhelper.c index 1bc2eb963c05ab..42dd722e04d096 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -3022,7 +3022,6 @@ vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct static inline VALUE vm_call_method(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd) { - const struct rb_call_info *ci = &cd->ci; struct rb_call_cache *cc = &cd->cc; VM_ASSERT(callable_method_entry_p(cc->me)); From a6e27ce15beeb3e8f8cf8e6d32860304e3d359b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lourens=20Naud=C3=A9?= Date: Wed, 11 Dec 2019 22:43:38 +0000 Subject: [PATCH 06/14] Move call info to call cache backfill from vm_sendish and handle in compile.c instead + handle loading bytecode as well --- compile.c | 8 ++++++++ vm_insnhelper.c | 4 ---- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/compile.c b/compile.c index e46035a7e042d4..cd5ef163452ed5 100644 --- a/compile.c +++ b/compile.c @@ -2213,11 +2213,15 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor) struct rb_kwarg_call_data *cd_kw = &kw_calls[ISEQ_COMPILE_DATA(iseq)->ci_kw_index++]; cd_kw->ci_kw = *((struct rb_call_info_with_kwarg *)source_ci); cd = (struct rb_call_data *)cd_kw; + cd->cc.orig_argc = cd->ci.orig_argc; + cd->cc.flag = cd->ci.flag; assert(ISEQ_COMPILE_DATA(iseq)->ci_kw_index <= body->ci_kw_size); } else { cd = &body->call_data[ISEQ_COMPILE_DATA(iseq)->ci_index++]; cd->ci = *source_ci; + cd->cc.orig_argc = cd->ci.orig_argc; + cd->cc.flag = cd->ci.flag; assert(ISEQ_COMPILE_DATA(iseq)->ci_index <= body->ci_size); } @@ -10320,6 +10324,8 @@ ibf_load_ci_entries(const struct ibf_load *load, calls[i].ci.mid = ibf_load_id(load, mid_index); calls[i].ci.flag = (unsigned int)ibf_load_small_value(load, &reading_pos); calls[i].ci.orig_argc = (int)ibf_load_small_value(load, &reading_pos); + calls[i].cc.flag = calls[i].ci.flag; + calls[i].cc.orig_argc = calls[i].ci.orig_argc; } for (i = 0; i < ci_kw_size; i++) { @@ -10328,6 +10334,8 @@ ibf_load_ci_entries(const struct ibf_load *load, kw_calls[i].ci_kw.ci.mid = ibf_load_id(load, mid_index); kw_calls[i].ci_kw.ci.flag = (unsigned int)ibf_load_small_value(load, &reading_pos); kw_calls[i].ci_kw.ci.orig_argc = (int)ibf_load_small_value(load, &reading_pos); + kw_calls[i].ci_kw.ci.flag = kw_calls[i].ci_kw.ci.flag; + kw_calls[i].ci_kw.ci.orig_argc = kw_calls[i].ci_kw.ci.orig_argc; int keyword_len = (int)ibf_load_small_value(load, &reading_pos); diff --git a/vm_insnhelper.c b/vm_insnhelper.c index 42dd722e04d096..f16ba77b27ed19 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -4012,10 +4012,6 @@ vm_sendish( const CALL_INFO ci = &cd->ci; const CALL_CACHE cc = &cd->cc; VALUE val; - if (UNLIKELY(cc->me == NULL)) { - cc->orig_argc = ci->orig_argc; - cc->flag = ci->flag; - } int argc = cc->orig_argc; VALUE recv = TOPN(argc); struct rb_calling_info calling; From 5771478e1c318829aa708bb4d5abf85f0edfd4d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lourens=20Naud=C3=A9?= Date: Wed, 11 Dec 2019 23:09:17 +0000 Subject: [PATCH 07/14] Remove the call info local from vm_sendish --- internal.h | 1 + vm_insnhelper.c | 1 - 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/internal.h b/internal.h index f91f5afcd38b60..5a412784c76271 100644 --- a/internal.h +++ b/internal.h @@ -2359,6 +2359,7 @@ struct rb_call_cache { struct rb_control_frame_struct *cfp, struct rb_calling_info *calling, struct rb_call_data *cd); + /* cache call info argc and flags in the 4 byte cache line padding */ short int orig_argc; short int flag; union { diff --git a/vm_insnhelper.c b/vm_insnhelper.c index f16ba77b27ed19..07f3055999a7e7 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -4009,7 +4009,6 @@ vm_sendish( struct rb_call_data *cd, VALUE recv)) { - const CALL_INFO ci = &cd->ci; const CALL_CACHE cc = &cd->cc; VALUE val; int argc = cc->orig_argc; From f0cbcbf6a457be84dc5b3412e530735b84fa653e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lourens=20Naud=C3=A9?= Date: Thu, 12 Dec 2019 00:03:52 +0000 Subject: [PATCH 08/14] Remove another call info local from vm_call_iseq_setup_kwparm_nokwarg --- vm_insnhelper.c | 1 - 1 file changed, 1 deletion(-) diff --git a/vm_insnhelper.c b/vm_insnhelper.c index 07f3055999a7e7..f6b02441d3920d 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -2113,7 +2113,6 @@ vm_call_iseq_setup_kwparm_nokwarg(rb_execution_context_t *ec, rb_control_frame_t struct rb_calling_info *calling, struct rb_call_data *cd) { - const struct rb_call_info *MAYBE_UNUSED(ci) = &cd->ci; const struct rb_call_cache *cc = &cd->cc; VM_ASSERT((cc->flag & VM_CALL_KWARG) == 0); From 754d542220174c12bfc6a34578de1de29187c4af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lourens=20Naud=C3=A9?= Date: Thu, 12 Dec 2019 10:08:29 +0000 Subject: [PATCH 09/14] Prefer unsigned short int for method info / cache flag and orig_argc members --- b/a | 0 internal.h | 6 +++--- 2 files changed, 3 insertions(+), 3 deletions(-) create mode 100644 b/a diff --git a/b/a b/b/a new file mode 100644 index 00000000000000..e69de29bb2d1d6 diff --git a/internal.h b/internal.h index 5a412784c76271..177fb47be85164 100644 --- a/internal.h +++ b/internal.h @@ -2360,8 +2360,8 @@ struct rb_call_cache { struct rb_calling_info *calling, struct rb_call_data *cd); /* cache call info argc and flags in the 4 byte cache line padding */ - short int orig_argc; - short int flag; + unsigned short int orig_argc; + unsigned short int flag; union { unsigned int index; /* used by ivar */ enum method_missing_reason method_missing_reason; /* used by method_missing */ @@ -2372,7 +2372,7 @@ struct rb_call_info { /* fixed at compile time */ ID mid; unsigned short int flag; - short int orig_argc; + unsigned short int orig_argc; }; struct rb_call_data { struct rb_call_cache cc; From 62c27ee310d4d4d329493187ece7ef2b08cc8b07 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lourens=20Naud=C3=A9?= Date: Thu, 12 Dec 2019 11:21:12 +0000 Subject: [PATCH 10/14] Replace the method definition member with method ID instead and check for refinement by new VM_CALL_REFINED instead --- internal.h | 4 ++-- vm_core.h | 2 ++ vm_eval.c | 5 +++-- vm_insnhelper.c | 9 +++++---- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/internal.h b/internal.h index 177fb47be85164..572e0d30d993ad 100644 --- a/internal.h +++ b/internal.h @@ -2341,7 +2341,7 @@ struct rb_call_cache { (CACHELINE - sizeof(rb_serial_t) /* method_state */ - sizeof(struct rb_callable_method_entry_struct *) /* me */ - - sizeof(struct rb_callable_method_definition_struct *) /* def */ + - sizeof(ID) /* mid */ - sizeof(enum method_missing_reason) /* aux */ - sizeof(VALUE (*)( /* call */ struct rb_execution_context_struct *e, @@ -2353,7 +2353,7 @@ struct rb_call_cache { /* inline cache: values */ const struct rb_callable_method_entry_struct *me; - const struct rb_method_definition_struct *def; + ID mid; VALUE (*call)(struct rb_execution_context_struct *ec, struct rb_control_frame_struct *cfp, diff --git a/vm_core.h b/vm_core.h index 9e6a352c033709..9dc2503d419942 100644 --- a/vm_core.h +++ b/vm_core.h @@ -1095,6 +1095,7 @@ enum vm_call_flag_bits { VM_CALL_SUPER_bit, /* super */ VM_CALL_ZSUPER_bit, /* zsuper */ VM_CALL_OPT_SEND_bit, /* internal flag */ + VM_CALL_REFINED_bit, /* refined */ VM_CALL__END }; @@ -1110,6 +1111,7 @@ enum vm_call_flag_bits { #define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit) #define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit) #define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit) +#define VM_CALL_REFINED (0x01 << VM_CALL_REFINED_bit) enum vm_special_object_type { VM_SPECIAL_OBJECT_VMCORE = 1, diff --git a/vm_eval.c b/vm_eval.c index d9180149ef3a1f..275c8e8f750181 100644 --- a/vm_eval.c +++ b/vm_eval.c @@ -47,7 +47,7 @@ rb_vm_call0(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE { struct rb_calling_info calling = { Qundef, recv, argc, kw_splat, }; struct rb_call_info ci = { id, (kw_splat ? VM_CALL_KW_SPLAT : 0), argc, }; - struct rb_call_cache cc = { 0, { 0, }, me, me->def, vm_call_general, argc, ci.flag, { 0, }, }; + struct rb_call_cache cc = { 0, { 0, }, me, id, vm_call_general, argc, ci.flag, { 0, }, }; struct rb_call_data cd = { cc, ci, }; return vm_call0_body(ec, &calling, &cd, argv); } @@ -180,7 +180,8 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc } else if (cc->me->def->body.refined.orig_me) { cc->me = refined_method_callable_without_refinement(cc->me); - goto again; + cc->flag |= VM_CALL_REFINED; + goto again; } super_class = RCLASS_SUPER(super_class); diff --git a/vm_insnhelper.c b/vm_insnhelper.c index f6b02441d3920d..28fed35636de86 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -1430,9 +1430,9 @@ __attribute__((__artificial__)) #endif #endif static inline vm_call_handler -calccall(const struct rb_call_data *cd, const rb_callable_method_entry_t *me) +calccall(struct rb_call_data *cd, const rb_callable_method_entry_t *me) { - const struct rb_call_cache *cc = &cd->cc; + struct rb_call_cache *cc = &cd->cc; if (UNLIKELY(!me)) { RB_DEBUG_COUNTER_INC(mc_miss_by_nome); @@ -1442,8 +1442,9 @@ calccall(const struct rb_call_data *cd, const rb_callable_method_entry_t *me) RB_DEBUG_COUNTER_INC(mc_miss_by_distinct); return vm_call_general; /* normal cases */ } - else if (UNLIKELY(cc->def != me->def)) { + else if (UNLIKELY(cc->flag & VM_CALL_REFINED)) { RB_DEBUG_COUNTER_INC(mc_miss_by_refine); + cc->flag &= ~VM_CALL_REFINED; return vm_call_general; /* cc->me was refined elsewhere */ } /* "Calling a formerly-public method, which is now privatised, with an @@ -1473,7 +1474,7 @@ rb_vm_search_method_slowpath(struct rb_call_data *cd, VALUE klass) GET_GLOBAL_METHOD_STATE(), { RCLASS_SERIAL(klass) }, me, - me ? me->def : NULL, + ci->mid, call, // Cached on rb_call_cache to fit into the same cache line ci->orig_argc, From 73c819f958a965702e18ed8f05b4f49acd860288 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lourens=20Naud=C3=A9?= Date: Thu, 12 Dec 2019 14:09:55 +0000 Subject: [PATCH 11/14] Prefer an inverse VM_CALL_UNREFINED flag instead and plugin method method ID from the call cache into dispatch helpers --- vm_core.h | 4 ++-- vm_eval.c | 20 +++++++++----------- vm_insnhelper.c | 27 +++++++++++++-------------- 3 files changed, 24 insertions(+), 27 deletions(-) diff --git a/vm_core.h b/vm_core.h index 9dc2503d419942..d4d95e74d045db 100644 --- a/vm_core.h +++ b/vm_core.h @@ -1095,7 +1095,7 @@ enum vm_call_flag_bits { VM_CALL_SUPER_bit, /* super */ VM_CALL_ZSUPER_bit, /* zsuper */ VM_CALL_OPT_SEND_bit, /* internal flag */ - VM_CALL_REFINED_bit, /* refined */ + VM_CALL_UNREFINED_bit, /* refined */ VM_CALL__END }; @@ -1111,7 +1111,7 @@ enum vm_call_flag_bits { #define VM_CALL_SUPER (0x01 << VM_CALL_SUPER_bit) #define VM_CALL_ZSUPER (0x01 << VM_CALL_ZSUPER_bit) #define VM_CALL_OPT_SEND (0x01 << VM_CALL_OPT_SEND_bit) -#define VM_CALL_REFINED (0x01 << VM_CALL_REFINED_bit) +#define VM_CALL_UNREFINED (0x01 << VM_CALL_UNREFINED_bit) enum vm_special_object_type { VM_SPECIAL_OBJECT_VMCORE = 1, diff --git a/vm_eval.c b/vm_eval.c index 275c8e8f750181..3543219d14bdb0 100644 --- a/vm_eval.c +++ b/vm_eval.c @@ -49,13 +49,14 @@ rb_vm_call0(rb_execution_context_t *ec, VALUE recv, ID id, int argc, const VALUE struct rb_call_info ci = { id, (kw_splat ? VM_CALL_KW_SPLAT : 0), argc, }; struct rb_call_cache cc = { 0, { 0, }, me, id, vm_call_general, argc, ci.flag, { 0, }, }; struct rb_call_data cd = { cc, ci, }; + cc.flag = ci.flag; + if (LIKELY(me->def->type != VM_METHOD_TYPE_REFINED)) cc.flag |= VM_CALL_UNREFINED; return vm_call0_body(ec, &calling, &cd, argv); } static VALUE vm_call0_cfunc_with_frame(rb_execution_context_t* ec, struct rb_calling_info *calling, struct rb_call_data *cd, const VALUE *argv) { - const struct rb_call_info *ci = &cd->ci; const struct rb_call_cache *cc = &cd->cc; VALUE val; const rb_callable_method_entry_t *me = cc->me; @@ -63,7 +64,7 @@ vm_call0_cfunc_with_frame(rb_execution_context_t* ec, struct rb_calling_info *ca int len = cfunc->argc; VALUE recv = calling->recv; int argc = calling->argc; - ID mid = ci->mid; + ID mid = cc->mid; VALUE block_handler = calling->block_handler; int frame_flags = VM_FRAME_MAGIC_CFUNC | VM_FRAME_FLAG_CFRAME | VM_ENV_FLAG_LOCAL; @@ -109,7 +110,6 @@ vm_call0_cfunc(rb_execution_context_t *ec, struct rb_calling_info *calling, stru static VALUE vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struct rb_call_data *cd, const VALUE *argv) { - const struct rb_call_info *ci = &cd->ci; struct rb_call_cache *cc = &cd->cc; VALUE ret; @@ -180,16 +180,15 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc } else if (cc->me->def->body.refined.orig_me) { cc->me = refined_method_callable_without_refinement(cc->me); - cc->flag |= VM_CALL_REFINED; goto again; } super_class = RCLASS_SUPER(super_class); - if (!super_class || !(cc->me = rb_callable_method_entry(super_class, ci->mid))) { - enum method_missing_reason ex = (type == VM_METHOD_TYPE_ZSUPER) ? MISSING_SUPER : 0; - ret = method_missing(calling->recv, ci->mid, calling->argc, argv, ex, calling->kw_splat); - goto success; + if (!super_class || !(cc->me = rb_callable_method_entry(super_class, cc->mid))) { + enum method_missing_reason ex = (type == VM_METHOD_TYPE_ZSUPER) ? MISSING_SUPER : 0; + ret = method_missing(calling->recv, cc->mid, calling->argc, argv, ex, calling->kw_splat); + goto success; } RUBY_VM_CHECK_INTS(ec); goto again; @@ -200,7 +199,7 @@ vm_call0_body(rb_execution_context_t *ec, struct rb_calling_info *calling, struc case VM_METHOD_TYPE_MISSING: { vm_passed_block_handler_set(ec, calling->block_handler); - return method_missing(calling->recv, ci->mid, calling->argc, + return method_missing(calling->recv, cc->mid, calling->argc, argv, MISSING_NOENTRY, calling->kw_splat); } case VM_METHOD_TYPE_OPTIMIZED: @@ -1007,10 +1006,9 @@ rb_funcallv_public_kw(VALUE recv, ID mid, int argc, const VALUE *argv, int kw_sp VALUE rb_funcallv_with_cc(struct rb_call_data *cd, VALUE recv, ID mid, int argc, const VALUE *argv) { - const struct rb_call_info *ci = &cd->ci; struct rb_call_cache *cc = &cd->cc; - if (LIKELY(ci->mid == mid)) { + if (LIKELY(cc->mid == mid)) { vm_search_method(cd, recv); if (LIKELY(! UNDEFINED_METHOD_ENTRY_P(cc->me))) { diff --git a/vm_insnhelper.c b/vm_insnhelper.c index 28fed35636de86..128b92558c2cb1 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -1442,9 +1442,9 @@ calccall(struct rb_call_data *cd, const rb_callable_method_entry_t *me) RB_DEBUG_COUNTER_INC(mc_miss_by_distinct); return vm_call_general; /* normal cases */ } - else if (UNLIKELY(cc->flag & VM_CALL_REFINED)) { + else if (UNLIKELY(cc->flag & VM_CALL_UNREFINED && cc->me->def->type == VM_METHOD_TYPE_REFINED)) { RB_DEBUG_COUNTER_INC(mc_miss_by_refine); - cc->flag &= ~VM_CALL_REFINED; + cc->flag &= ~VM_CALL_UNREFINED; return vm_call_general; /* cc->me was refined elsewhere */ } /* "Calling a formerly-public method, which is now privatised, with an @@ -1476,10 +1476,10 @@ rb_vm_search_method_slowpath(struct rb_call_data *cd, VALUE klass) me, ci->mid, call, - // Cached on rb_call_cache to fit into the same cache line ci->orig_argc, ci->flag }; + if (LIKELY(me && me->def->type != VM_METHOD_TYPE_REFINED)) buf.flag |= VM_CALL_UNREFINED; if (call != vm_call_general) { for (int i = 0; i < numberof(cc->class_serial) - 1; i++) { buf.class_serial[i + 1] = cc->class_serial[i]; @@ -2483,7 +2483,6 @@ vm_method_cfunc_entry(const rb_callable_method_entry_t *me) static VALUE vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb_calling_info *calling, struct rb_call_data *cd, int empty_kw_splat) { - const struct rb_call_info *ci = &cd->ci; const struct rb_call_cache *cc = &cd->cc; VALUE val; const rb_callable_method_entry_t *me = cc->me; @@ -2504,7 +2503,7 @@ vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp } RUBY_DTRACE_CMETHOD_ENTRY_HOOK(ec, me->owner, me->def->original_id); - EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, ci->mid, me->owner, Qundef); + EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_CALL, recv, me->def->original_id, cc->mid, me->owner, Qundef); vm_push_frame(ec, NULL, frame_type, recv, block_handler, (VALUE)me, @@ -2519,7 +2518,7 @@ vm_call_cfunc_with_frame(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp rb_vm_pop_frame(ec); - EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, ci->mid, me->owner, val); + EXEC_EVENT_HOOK(ec, RUBY_EVENT_C_RETURN, recv, me->def->original_id, cc->mid, me->owner, val); RUBY_DTRACE_CMETHOD_RETURN_HOOK(ec, me->owner, me->def->original_id); return val; @@ -2660,6 +2659,7 @@ vm_call_opt_send(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct cc->me = rb_callable_method_entry_with_refinements(CLASS_OF(calling->recv), ci->mid, NULL); cc->flag = ci->flag = VM_CALL_FCALL | VM_CALL_OPT_SEND | (calling->kw_splat ? VM_CALL_KW_SPLAT : 0); + cc->mid = ci->mid; return vm_call_method(ec, reg_cfp, calling, (CALL_DATA)&cd); } @@ -2733,6 +2733,7 @@ vm_call_method_missing(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, // Cached on rb_call_cache to fit into the same cache line cd.cc.flag = cd.ci.flag; cd.cc.orig_argc = argc; + cd.cc.mid = cd.ci.mid; calling->argc = argc; @@ -2755,12 +2756,11 @@ vm_call_zsuper(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_ca { RB_DEBUG_COUNTER_INC(ccf_method_missing); - const struct rb_call_info *ci = &cd->ci; struct rb_call_cache *cc = &cd->cc; klass = RCLASS_SUPER(klass); - cc->me = klass ? rb_callable_method_entry(klass, ci->mid) : NULL; + cc->me = klass ? rb_callable_method_entry(klass, cc->mid) : NULL; - if (!cc->me) { + if (UNLIKELY(!cc->me)) { return vm_call_method_nome(ec, cfp, calling, cd); } if (cc->me->def->type == VM_METHOD_TYPE_REFINED && @@ -2988,7 +2988,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st return vm_call_zsuper(ec, cfp, calling, cd, RCLASS_ORIGIN(cc->me->defined_class)); case VM_METHOD_TYPE_REFINED: - if (search_refined_method(ec, cfp, ci->mid, cc)) + if (search_refined_method(ec, cfp, cc->mid, cc)) return vm_call_method(ec, cfp, calling, cd); else return vm_call_method_nome(ec, cfp, calling, cd); @@ -3003,11 +3003,10 @@ static VALUE vm_call_method_nome(rb_execution_context_t *ec, rb_control_frame_t *cfp, struct rb_calling_info *calling, struct rb_call_data *cd) { /* method missing */ - const struct rb_call_info *ci = &cd->ci; struct rb_call_cache *cc = &cd->cc; const int stat = cc_missing_reason(cc); - if (ci->mid == idMethodMissing) { + if (cc->mid == idMethodMissing) { rb_control_frame_t *reg_cfp = cfp; VALUE *argv = STACK_ADDR_FROM_TOP(calling->argc); vm_raise_method_missing(ec, calling->argc, argv, calling->recv, stat); @@ -3150,7 +3149,7 @@ vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *c " Specify all arguments explicitly."); } - ci->mid = me->def->original_id; + ci->mid = cc->mid = me->def->original_id; klass = vm_search_normal_superclass(me->defined_class); if (!klass) { @@ -3160,7 +3159,7 @@ vm_search_super_method(const rb_control_frame_t *reg_cfp, struct rb_call_data *c } else { /* TODO: use inline cache */ - cc->me = rb_callable_method_entry(klass, ci->mid); + cc->me = rb_callable_method_entry(klass, cc->mid); CC_SET_FASTPATH(cc, vm_call_super_method, TRUE); } } From 3edf2370df2a80893604df18acc1f49125a07c86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lourens=20Naud=C3=A9?= Date: Fri, 13 Dec 2019 12:03:42 +0000 Subject: [PATCH 12/14] Also cover a few remaining edge cases of flag, orig_argc and mid not properly set on the call cache --- compile.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/compile.c b/compile.c index cd5ef163452ed5..072427ba0c3421 100644 --- a/compile.c +++ b/compile.c @@ -2215,6 +2215,7 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor) cd = (struct rb_call_data *)cd_kw; cd->cc.orig_argc = cd->ci.orig_argc; cd->cc.flag = cd->ci.flag; + cd->cc.mid = cd->ci.mid; assert(ISEQ_COMPILE_DATA(iseq)->ci_kw_index <= body->ci_kw_size); } else { @@ -2222,6 +2223,7 @@ iseq_set_sequence(rb_iseq_t *iseq, LINK_ANCHOR *const anchor) cd->ci = *source_ci; cd->cc.orig_argc = cd->ci.orig_argc; cd->cc.flag = cd->ci.flag; + cd->cc.mid = cd->ci.mid; assert(ISEQ_COMPILE_DATA(iseq)->ci_index <= body->ci_size); } @@ -10326,6 +10328,7 @@ ibf_load_ci_entries(const struct ibf_load *load, calls[i].ci.orig_argc = (int)ibf_load_small_value(load, &reading_pos); calls[i].cc.flag = calls[i].ci.flag; calls[i].cc.orig_argc = calls[i].ci.orig_argc; + calls[i].cc.mid = calls[i].ci.mid; } for (i = 0; i < ci_kw_size; i++) { @@ -10334,8 +10337,9 @@ ibf_load_ci_entries(const struct ibf_load *load, kw_calls[i].ci_kw.ci.mid = ibf_load_id(load, mid_index); kw_calls[i].ci_kw.ci.flag = (unsigned int)ibf_load_small_value(load, &reading_pos); kw_calls[i].ci_kw.ci.orig_argc = (int)ibf_load_small_value(load, &reading_pos); - kw_calls[i].ci_kw.ci.flag = kw_calls[i].ci_kw.ci.flag; - kw_calls[i].ci_kw.ci.orig_argc = kw_calls[i].ci_kw.ci.orig_argc; + kw_calls[i].cc.flag = kw_calls[i].ci_kw.ci.flag; + kw_calls[i].cc.orig_argc = kw_calls[i].ci_kw.ci.orig_argc; + kw_calls[i].cc.mid = kw_calls[i].ci_kw.ci.mid; int keyword_len = (int)ibf_load_small_value(load, &reading_pos); From 2766293398f84c2124b6b567b64b3c9545cbce54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lourens=20Naud=C3=A9?= Date: Fri, 13 Dec 2019 15:37:07 +0000 Subject: [PATCH 13/14] Also let vm_caller_setup_arg_block prefer the call cache --- insns.def | 4 ++-- tool/ruby_vm/views/_mjit_compile_send.erb | 2 +- vm_args.c | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/insns.def b/insns.def index bd1bffbe02d8d4..a4f09f877fe0bf 100644 --- a/insns.def +++ b/insns.def @@ -778,7 +778,7 @@ send // attr rb_snum_t sp_inc = sp_inc_of_sendish(&cd->ci); // attr rb_snum_t comptime_sp_inc = sp_inc_of_sendish(ci); { - VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), &cd->ci, blockiseq, false); + VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), &cd->cc, blockiseq, false); val = vm_sendish(ec, GET_CFP(), cd, bh, vm_search_method_wrap); if (val == Qundef) { @@ -884,7 +884,7 @@ invokesuper // attr rb_snum_t sp_inc = sp_inc_of_sendish(&cd->ci); // attr rb_snum_t comptime_sp_inc = sp_inc_of_sendish(ci); { - VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), &cd->ci, blockiseq, true); + VALUE bh = vm_caller_setup_arg_block(ec, GET_CFP(), &cd->cc, blockiseq, true); val = vm_sendish(ec, GET_CFP(), cd, bh, vm_search_super_method); if (val == Qundef) { diff --git a/tool/ruby_vm/views/_mjit_compile_send.erb b/tool/ruby_vm/views/_mjit_compile_send.erb index ec8eec5589c42a..0958af12429e3f 100644 --- a/tool/ruby_vm/views/_mjit_compile_send.erb +++ b/tool/ruby_vm/views/_mjit_compile_send.erb @@ -59,7 +59,7 @@ fprintf(f, " {\n"); fprintf(f, " struct rb_calling_info calling;\n"); % if insn.name == 'send' - fprintf(f, " calling.block_handler = vm_caller_setup_arg_block(ec, reg_cfp, (CALL_INFO)0x%"PRIxVALUE", (rb_iseq_t *)0x%"PRIxVALUE", FALSE);\n", (VALUE)ci, (VALUE)blockiseq); + fprintf(f, " calling.block_handler = vm_caller_setup_arg_block(ec, reg_cfp, (CALL_CACHE)0x%"PRIxVALUE", (rb_iseq_t *)0x%"PRIxVALUE", FALSE);\n", (VALUE)cc_copy, (VALUE)blockiseq); % else fprintf(f, " calling.block_handler = VM_BLOCK_HANDLER_NONE;\n"); % end diff --git a/vm_args.c b/vm_args.c index c6c111865ee20d..1f74d0ed2113ba 100644 --- a/vm_args.c +++ b/vm_args.c @@ -1191,9 +1191,9 @@ refine_sym_proc_call(RB_BLOCK_CALL_FUNC_ARGLIST(yielded_arg, callback_arg)) static VALUE vm_caller_setup_arg_block(const rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, - const struct rb_call_info *ci, const rb_iseq_t *blockiseq, const int is_super) + const struct rb_call_cache *cc, const rb_iseq_t *blockiseq, const int is_super) { - if (ci->flag & VM_CALL_ARGS_BLOCKARG) { + if (cc->flag & VM_CALL_ARGS_BLOCKARG) { VALUE block_code = *(--reg_cfp->sp); if (NIL_P(block_code)) { From 2cddcdaff476a53211d3c02badcc641b7387f41f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lourens=20Naud=C3=A9?= Date: Fri, 13 Dec 2019 15:47:02 +0000 Subject: [PATCH 14/14] Remove spurious rb_call_info* argument from CALLER_REMOVE_EMPTY_KW_SPLAT --- vm_insnhelper.c | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/vm_insnhelper.c b/vm_insnhelper.c index 128b92558c2cb1..410748c1b9c815 100644 --- a/vm_insnhelper.c +++ b/vm_insnhelper.c @@ -1985,8 +1985,7 @@ CALLER_SETUP_ARG(struct rb_control_frame_struct *restrict cfp, static inline void CALLER_REMOVE_EMPTY_KW_SPLAT(struct rb_control_frame_struct *restrict cfp, - struct rb_calling_info *restrict calling, - const struct rb_call_info *restrict ci) + struct rb_calling_info *restrict calling) { if (UNLIKELY(calling->kw_splat)) { /* This removes the last Hash object if it is empty. @@ -2149,7 +2148,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, if (LIKELY(rb_simple_iseq_p(iseq))) { rb_control_frame_t *cfp = ec->cfp; CALLER_SETUP_ARG(cfp, calling, ci); - CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci); + CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling); if (calling->argc != iseq->body->param.lead_num) { argument_arity_error(ec, iseq, calling->argc, iseq->body->param.lead_num, iseq->body->param.lead_num); @@ -2161,7 +2160,7 @@ vm_callee_setup_arg(rb_execution_context_t *ec, struct rb_calling_info *calling, else if (rb_iseq_only_optparam_p(iseq)) { rb_control_frame_t *cfp = ec->cfp; CALLER_SETUP_ARG(cfp, calling, ci); - CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci); + CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling); const int lead_num = iseq->body->param.lead_num; const int opt_num = iseq->body->param.opt_num; @@ -2533,7 +2532,7 @@ vm_call_cfunc(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, struct rb CALLER_SETUP_ARG(reg_cfp, calling, ci); empty_kw_splat = calling->kw_splat; - CALLER_REMOVE_EMPTY_KW_SPLAT(reg_cfp, calling, ci); + CALLER_REMOVE_EMPTY_KW_SPLAT(reg_cfp, calling); if (empty_kw_splat && calling->kw_splat) { empty_kw_splat = 0; } @@ -2935,7 +2934,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st rb_warn_keyword_to_last_hash(ec, calling, ci, NULL); } else { - CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci); + CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling); } rb_check_arity(calling->argc, 1, 1); @@ -2945,7 +2944,7 @@ vm_call_method_each_type(rb_execution_context_t *ec, rb_control_frame_t *cfp, st case VM_METHOD_TYPE_IVAR: CALLER_SETUP_ARG(cfp, calling, ci); - CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci); + CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling); rb_check_arity(calling->argc, 0, 0); cc->aux.index = 0; CC_SET_FASTPATH(cc, vm_call_ivar, !(cc->flag & VM_CALL_ARGS_SPLAT)); @@ -3271,7 +3270,7 @@ vm_callee_setup_block_arg(rb_execution_context_t *ec, struct rb_calling_info *ca rb_warn_keyword_to_last_hash(ec, calling, ci, iseq); } else { - CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling, ci); + CALLER_REMOVE_EMPTY_KW_SPLAT(cfp, calling); } if (arg_setup_type == arg_setup_block && @@ -3372,7 +3371,7 @@ vm_invoke_ifunc_block(rb_execution_context_t *ec, rb_control_frame_t *reg_cfp, int argc; int kw_splat = calling->kw_splat; CALLER_SETUP_ARG(ec->cfp, calling, ci); - CALLER_REMOVE_EMPTY_KW_SPLAT(ec->cfp, calling, ci); + CALLER_REMOVE_EMPTY_KW_SPLAT(ec->cfp, calling); if (kw_splat && !calling->kw_splat) { kw_splat = 2; }