@@ -202,12 +202,12 @@ fn jit_peek_at_stack(jit: &JITState, ctx: &Context, n:isize) -> VALUE
202
202
}
203
203
}
204
204
205
- /*
206
205
fn jit_peek_at_self ( jit : & JITState , ctx : & Context ) -> VALUE
207
206
{
208
207
unsafe { cfp_get_self ( ec_get_cfp ( jit. ec . unwrap ( ) ) ) }
209
208
}
210
209
210
+ /*
211
211
fn jit_peek_at_local(jit: &JITState, ctx: &Context, n: i32) -> VALUE
212
212
{
213
213
assert!(jit_at_current_insn(jit));
@@ -1944,15 +1944,12 @@ pub const OPT_AREF_MAX_CHAIN_DEPTH:i32 = 2;
1944
1944
// up to 5 different classes
1945
1945
pub const SEND_MAX_DEPTH : i32 = 5 ;
1946
1946
1947
- /*
1948
- VALUE rb_vm_set_ivar_idx(VALUE obj, uint32_t idx, VALUE val);
1949
-
1950
1947
// Codegen for setting an instance variable.
1951
1948
// Preconditions:
1952
1949
// - receiver is in REG0
1953
1950
// - receiver has the same class as CLASS_OF(comptime_receiver)
1954
1951
// - no stack push or pops to ctx since the entry to the codegen of the instruction being compiled
1955
- fn gen_set_ivar(jitstate_t * jit, ctx_t * ctx, VALUE recv, VALUE klass, ID ivar_name)
1952
+ fn gen_set_ivar ( jit : & mut JITState , ctx : & mut Context , cb : & mut CodeBlock , recv : VALUE , klass : VALUE , ivar_name : ID ) -> CodegenStatus
1956
1953
{
1957
1954
// Save the PC and SP because the callee may allocate
1958
1955
// Note that this modifies REG_SP, which is why we do it first
@@ -1962,13 +1959,14 @@ fn gen_set_ivar(jitstate_t *jit, ctx_t *ctx, VALUE recv, VALUE klass, ID ivar_na
1962
1959
let val_opnd = ctx. stack_pop ( 1 ) ;
1963
1960
let recv_opnd = ctx. stack_pop ( 1 ) ;
1964
1961
1965
- uint32_t ivar_index = rb_obj_ensure_iv_index_mapping(recv, ivar_name);
1962
+ let ivar_index: u32 = unsafe { rb_obj_ensure_iv_index_mapping ( recv, ivar_name) } ;
1966
1963
1967
1964
// Call rb_vm_set_ivar_idx with the receiver, the index of the ivar, and the value
1968
1965
mov ( cb, C_ARG_REGS [ 0 ] , recv_opnd) ;
1969
- mov(cb, C_ARG_REGS[1], imm_opnd(ivar_index));
1966
+ mov ( cb, C_ARG_REGS [ 1 ] , imm_opnd ( ivar_index. into ( ) ) ) ;
1970
1967
mov ( cb, C_ARG_REGS [ 2 ] , val_opnd) ;
1971
- call_ptr(cb, REG0, (void *)rb_vm_set_ivar_idx);
1968
+ let set_ivar_idx = CodePtr :: from ( rb_vm_set_ivar_idx as * mut u8 ) ;
1969
+ call_ptr ( cb, REG0 , set_ivar_idx) ;
1972
1970
1973
1971
let out_opnd = ctx. stack_push ( Type :: Unknown ) ;
1974
1972
mov ( cb, out_opnd, RAX ) ;
@@ -1981,17 +1979,20 @@ fn gen_set_ivar(jitstate_t *jit, ctx_t *ctx, VALUE recv, VALUE klass, ID ivar_na
1981
1979
// - receiver is in REG0
1982
1980
// - receiver has the same class as CLASS_OF(comptime_receiver)
1983
1981
// - no stack push or pops to ctx since the entry to the codegen of the instruction being compiled
1984
- fn gen_get_ivar(jitstate_t * jit, ctx_t * ctx, const int max_chain_depth, VALUE comptime_receiver, ID ivar_name, insn_opnd_t reg0_opnd, uint8_t * side_exit)
1982
+ fn gen_get_ivar ( jit : & mut JITState , ctx : & mut Context , cb : & mut CodeBlock , ocb : & mut OutlinedCb , max_chain_depth : i32 , comptime_receiver : VALUE , ivar_name : ID , reg0_opnd : InsnOpnd , side_exit : CodePtr ) -> CodegenStatus
1985
1983
{
1986
- VALUE comptime_val_klass = CLASS_OF(comptime_receiver);
1987
- const ctx_t starting_context = *ctx; // make a copy for use with jit_chain_guard
1984
+ let comptime_val_klass = comptime_receiver. class_of ( ) ;
1985
+ let starting_context = ctx. clone ( ) ; // make a copy for use with jit_chain_guard
1986
+
1987
+ let custom_allocator = unsafe { rb_get_alloc_func ( comptime_val_klass) . unwrap ( ) as * mut u8 } ;
1988
+ let allocate_instance = rb_class_allocate_instance as * mut u8 ;
1988
1989
1989
1990
// If the class uses the default allocator, instances should all be T_OBJECT
1990
1991
// NOTE: This assumes nobody changes the allocator of the class after allocation.
1991
1992
// Eventually, we can encode whether an object is T_OBJECT or not
1992
1993
// inside object shapes.
1993
- if (! RB_TYPE_P(comptime_receiver, T_OBJECT) ||
1994
- rb_get_alloc_func(comptime_val_klass) != rb_class_allocate_instance) {
1994
+ if ! unsafe { RB_TYPE_P ( comptime_receiver, RUBY_T_OBJECT ) } ||
1995
+ custom_allocator != allocate_instance {
1995
1996
// General case. Call rb_ivar_get().
1996
1997
// VALUE rb_ivar_get(VALUE obj, ID id)
1997
1998
add_comment ( cb, "call rb_ivar_get()" ) ;
@@ -2000,18 +2001,19 @@ fn gen_get_ivar(jitstate_t *jit, ctx_t *ctx, const int max_chain_depth, VALUE co
2000
2001
jit_prepare_routine_call ( jit, ctx, cb, REG1 ) ;
2001
2002
2002
2003
mov ( cb, C_ARG_REGS [ 0 ] , REG0 ) ;
2003
- mov(cb, C_ARG_REGS[1], imm_opnd((int64_t)ivar_name));
2004
- call_ptr(cb, REG1, (void *)rb_ivar_get);
2004
+ mov ( cb, C_ARG_REGS [ 1 ] , uimm_opnd ( ivar_name) ) ;
2005
+ let ivar_get = CodePtr :: from ( rb_ivar_get as * mut u8 ) ;
2006
+ call_ptr ( cb, REG1 , ivar_get) ;
2005
2007
2006
- if (! reg0_opnd.is_self) {
2007
- (void) ctx.stack_pop(1);
2008
+ if reg0_opnd != InsnOpnd :: SelfOpnd {
2009
+ ctx. stack_pop ( 1 ) ;
2008
2010
}
2009
2011
// Push the ivar on the stack
2010
2012
let out_opnd = ctx. stack_push ( Type :: Unknown ) ;
2011
2013
mov ( cb, out_opnd, RAX ) ;
2012
2014
2013
2015
// Jump to next instruction. This allows guard chains to share the same successor.
2014
- jit_jump_to_next_insn(jit, ctx);
2016
+ jit_jump_to_next_insn ( jit, ctx, cb , ocb ) ;
2015
2017
return EndBlock ;
2016
2018
}
2017
2019
@@ -2037,31 +2039,33 @@ fn gen_get_ivar(jitstate_t *jit, ctx_t *ctx, const int max_chain_depth, VALUE co
2037
2039
// FIXME: Mapping the index could fail when there is too many ivar names. If we're
2038
2040
// compiling for a branch stub that can cause the exception to be thrown from the
2039
2041
// wrong PC.
2040
- uint32_t ivar_index = rb_obj_ensure_iv_index_mapping(comptime_receiver, ivar_name);
2042
+ let ivar_index: usize = unsafe { rb_obj_ensure_iv_index_mapping ( comptime_receiver, ivar_name) } as usize ;
2041
2043
2042
2044
// Pop receiver if it's on the temp stack
2043
- if (! reg0_opnd.is_self) {
2044
- (void) ctx.stack_pop(1);
2045
+ if reg0_opnd != InsnOpnd :: SelfOpnd {
2046
+ ctx. stack_pop ( 1 ) ;
2045
2047
}
2046
2048
2047
2049
// Compile time self is embedded and the ivar index lands within the object
2048
- if (RB_FL_TEST_RAW(comptime_receiver, ROBJECT_EMBED) && ivar_index < ROBJECT_EMBED_LEN_MAX) {
2050
+ let test_result = unsafe { FL_TEST_RAW ( comptime_receiver, VALUE ( ROBJECT_EMBED ) ) != VALUE ( 0 ) } ;
2051
+ if test_result && ivar_index < ROBJECT_EMBED_LEN_MAX {
2049
2052
// See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
2050
2053
2051
2054
// Guard that self is embedded
2052
2055
// TODO: BT and JC is shorter
2053
2056
add_comment ( cb, "guard embedded getivar" ) ;
2054
- let flags_opnd = member_opnd(REG0, struct RBasic, flags );
2055
- test(cb, flags_opnd, imm_opnd (ROBJECT_EMBED));
2057
+ let flags_opnd = mem_opnd ( 64 , REG0 , RUBY_OFFSET_RBASIC_FLAGS ) ;
2058
+ test ( cb, flags_opnd, uimm_opnd ( ROBJECT_EMBED as u64 ) ) ;
2056
2059
jit_chain_guard ( JCC_JZ , jit, & starting_context, cb, max_chain_depth, counted_exit ! ( ocb, side_exit, getivar_megamorphic) ) ;
2057
2060
2058
2061
// Load the variable
2059
- let ivar_opnd = mem_opnd(64, REG0, offsetof(struct RObject, as.ary) + ivar_index * SIZEOF_VALUE);
2062
+ let offs = RUBY_OFFSET_ROBJECT_AS_ARY + ( ivar_index * SIZEOF_VALUE ) as i32 ;
2063
+ let ivar_opnd = mem_opnd ( 64 , REG0 , offs) ;
2060
2064
mov( cb, REG1 , ivar_opnd) ;
2061
2065
2062
2066
// Guard that the variable is not Qundef
2063
- cmp(cb, REG1, imm_opnd (Qundef));
2064
- mov(cb, REG0, imm_opnd (Qnil));
2067
+ cmp ( cb, REG1 , uimm_opnd ( Qundef. into ( ) ) ) ;
2068
+ mov ( cb, REG0 , uimm_opnd ( Qnil . into ( ) ) ) ;
2065
2069
cmove ( cb, REG1 , REG0 ) ;
2066
2070
2067
2071
// Push the ivar on the stack
@@ -2074,29 +2078,29 @@ fn gen_get_ivar(jitstate_t *jit, ctx_t *ctx, const int max_chain_depth, VALUE co
2074
2078
// Guard that value is *not* embedded
2075
2079
// See ROBJECT_IVPTR() from include/ruby/internal/core/robject.h
2076
2080
add_comment ( cb, "guard extended getivar" ) ;
2077
- let flags_opnd = member_opnd(REG0, struct RBasic, flags );
2078
- test(cb, flags_opnd, imm_opnd (ROBJECT_EMBED));
2081
+ let flags_opnd = mem_opnd ( 64 , REG0 , RUBY_OFFSET_RBASIC_FLAGS ) ;
2082
+ test ( cb, flags_opnd, uimm_opnd ( ROBJECT_EMBED as u64 ) ) ;
2079
2083
jit_chain_guard ( JCC_JNZ , jit, & starting_context, cb, max_chain_depth, counted_exit ! ( ocb, side_exit, getivar_megamorphic) ) ;
2080
2084
2081
2085
// check that the extended table is big enough
2082
- if ( ivar_index >= ROBJECT_EMBED_LEN_MAX + 1) {
2086
+ if ivar_index >= ROBJECT_EMBED_LEN_MAX + 1 {
2083
2087
// Check that the slot is inside the extended table (num_slots > index)
2084
- let num_slots = mem_opnd(32, REG0, offsetof(struct RObject, as.heap.numiv) );
2085
- cmp(cb, num_slots, imm_opnd (ivar_index));
2088
+ let num_slots = mem_opnd ( 32 , REG0 , RUBY_OFFSET_ROBJECT_AS_HEAP_NUMIV ) ;
2089
+ cmp ( cb, num_slots, uimm_opnd ( ivar_index as u64 ) ) ;
2086
2090
jle_ptr ( cb, counted_exit ! ( ocb, side_exit, getivar_idx_out_of_range) ) ;
2087
2091
}
2088
2092
2089
2093
// Get a pointer to the extended table
2090
- let tbl_opnd = mem_opnd(64, REG0, offsetof(struct RObject, as.heap.ivptr) );
2094
+ let tbl_opnd = mem_opnd ( 64 , REG0 , RUBY_OFFSET_ROBJECT_AS_HEAP_IVPTR ) ;
2091
2095
mov ( cb, REG0 , tbl_opnd) ;
2092
2096
2093
2097
// Read the ivar from the extended table
2094
- let ivar_opnd = mem_opnd(64, REG0, SIZEOF_VALUE * ivar_index);
2098
+ let ivar_opnd = mem_opnd ( 64 , REG0 , ( SIZEOF_VALUE * ivar_index) as i32 ) ;
2095
2099
mov ( cb, REG0 , ivar_opnd) ;
2096
2100
2097
2101
// Check that the ivar is not Qundef
2098
- cmp(cb, REG0, imm_opnd (Qundef));
2099
- mov(cb, REG1, imm_opnd (Qnil));
2102
+ cmp ( cb, REG0 , uimm_opnd ( Qundef . into ( ) ) ) ;
2103
+ mov ( cb, REG1 , uimm_opnd ( Qnil . into ( ) ) ) ;
2100
2104
cmove ( cb, REG0 , REG1 ) ;
2101
2105
2102
2106
// Push the ivar on the stack
@@ -2105,40 +2109,38 @@ fn gen_get_ivar(jitstate_t *jit, ctx_t *ctx, const int max_chain_depth, VALUE co
2105
2109
}
2106
2110
2107
2111
// Jump to next instruction. This allows guard chains to share the same successor.
2108
- jit_jump_to_next_insn(jit, ctx);
2112
+ jit_jump_to_next_insn ( jit, ctx, cb , ocb ) ;
2109
2113
EndBlock
2110
2114
}
2111
2115
2112
2116
fn gen_getinstancevariable ( jit : & mut JITState , ctx : & mut Context , cb : & mut CodeBlock , ocb : & mut OutlinedCb ) -> CodegenStatus
2113
2117
{
2114
2118
// Defer compilation so we can specialize on a runtime `self`
2115
- if ( !jit_at_current_insn(jit) ) {
2119
+ if !jit_at_current_insn ( jit) {
2116
2120
defer_compilation ( jit, cb, ctx) ;
2117
2121
return EndBlock ;
2118
2122
}
2119
2123
2120
- ID ivar_name = (ID) jit_get_arg(jit, 0);
2124
+ let ivar_name = jit_get_arg ( jit, 0 ) . as_u64 ( ) ;
2121
2125
2122
- VALUE comptime_val = jit_peek_at_self(jit, ctx);
2123
- VALUE comptime_val_klass = CLASS_OF( comptime_val);
2126
+ let comptime_val = jit_peek_at_self ( jit, ctx) ;
2127
+ let comptime_val_klass = comptime_val. class_of ( ) ;
2124
2128
2125
2129
// Generate a side exit
2126
- uint8_t * side_exit = get_side_exit(jit, ocb, ctx);
2130
+ let side_exit = get_side_exit ( jit, ocb, ctx) ;
2127
2131
2128
2132
// Guard that the receiver has the same class as the one from compile time.
2129
- mov(cb, REG0, member_opnd(REG_CFP, rb_control_frame_t, self ));
2133
+ mov ( cb, REG0 , mem_opnd ( 64 , REG_CFP , RUBY_OFFSET_CFP_SELF ) ) ;
2130
2134
2131
- jit_guard_known_klass(jit, ctx, cb, comptime_val_klass, OPND_SELF , comptime_val, GETIVAR_MAX_DEPTH , side_exit);
2135
+ jit_guard_known_klass ( jit, ctx, cb, comptime_val_klass, InsnOpnd :: SelfOpnd , comptime_val, GET_IVAR_MAX_DEPTH , side_exit) ;
2132
2136
2133
- return gen_get_ivar(jit, ctx, GETIVAR_MAX_DEPTH, comptime_val, ivar_name, OPND_SELF , side_exit);
2137
+ gen_get_ivar ( jit, ctx, cb , ocb , GET_IVAR_MAX_DEPTH , comptime_val, ivar_name, InsnOpnd :: SelfOpnd , side_exit)
2134
2138
}
2135
2139
2136
- void rb_vm_setinstancevariable(const rb_iseq_t *iseq, VALUE obj, ID id, VALUE val, IVC ic);
2137
-
2138
2140
fn gen_setinstancevariable ( jit : & mut JITState , ctx : & mut Context , cb : & mut CodeBlock , ocb : & mut OutlinedCb ) -> CodegenStatus
2139
2141
{
2140
- ID id = (ID) jit_get_arg(jit, 0);
2141
- IVC ic = (IVC) jit_get_arg(jit, 1);
2142
+ let id = jit_get_arg ( jit, 0 ) ;
2143
+ let ic = jit_get_arg ( jit, 1 ) . as_u64 ( ) ; // type IVC
2142
2144
2143
2145
// Save the PC and SP because the callee may allocate
2144
2146
// Note that this modifies REG_SP, which is why we do it first
@@ -2148,16 +2150,17 @@ fn gen_setinstancevariable(jit: &mut JITState, ctx: &mut Context, cb: &mut CodeB
2148
2150
let val_opnd = ctx. stack_pop ( 1 ) ;
2149
2151
2150
2152
// Call rb_vm_setinstancevariable(iseq, obj, id, val, ic);
2151
- mov(cb, C_ARG_REGS[1], member_opnd(REG_CFP, rb_control_frame_t, self ));
2153
+ mov ( cb, C_ARG_REGS [ 1 ] , mem_opnd ( 64 , REG_CFP , RUBY_OFFSET_CFP_SELF ) ) ;
2152
2154
mov ( cb, C_ARG_REGS [ 3 ] , val_opnd) ;
2153
- mov(cb, C_ARG_REGS[2], imm_opnd(id));
2154
- mov(cb, C_ARG_REGS[4], const_ptr_opnd(ic));
2155
- jit_mov_gc_ptr(jit, cb, C_ARG_REGS[0], (VALUE)jit->iseq);
2156
- call_ptr(cb, REG0, (void *)rb_vm_setinstancevariable);
2155
+ mov ( cb, C_ARG_REGS [ 2 ] , uimm_opnd ( id. into ( ) ) ) ;
2156
+ mov ( cb, C_ARG_REGS [ 4 ] , const_ptr_opnd ( ic as * const u8 ) ) ;
2157
+ let iseq = VALUE ( jit. iseq as usize ) ;
2158
+ jit_mov_gc_ptr ( jit, cb, C_ARG_REGS [ 0 ] , iseq) ;
2159
+ let vm_setinstancevar = CodePtr :: from ( rb_vm_setinstancevariable as * mut u8 ) ;
2160
+ call_ptr ( cb, REG0 , vm_setinstancevar) ;
2157
2161
2158
2162
KeepCompiling
2159
2163
}
2160
- */
2161
2164
2162
2165
fn gen_defined ( jit : & mut JITState , ctx : & mut Context , cb : & mut CodeBlock , ocb : & mut OutlinedCb ) -> CodegenStatus
2163
2166
{
@@ -5248,11 +5251,10 @@ fn get_gen_fn(opcode: VALUE) -> Option<CodeGenFn>
5248
5251
OP_DEFINED => Some ( gen_defined) ,
5249
5252
OP_CHECKKEYWORD => Some ( gen_checkkeyword) ,
5250
5253
OP_CONCATSTRINGS => Some ( gen_concatstrings) ,
5254
+ OP_GETINSTANCEVARIABLE => Some ( gen_getinstancevariable) ,
5255
+ OP_SETINSTANCEVARIABLE => Some ( gen_setinstancevariable) ,
5251
5256
5252
5257
/*
5253
- yjit_reg_op(BIN(concatstrings), gen_concatstrings);
5254
- yjit_reg_op(BIN(getinstancevariable), gen_getinstancevariable);
5255
- yjit_reg_op(BIN(setinstancevariable), gen_setinstancevariable);
5256
5258
yjit_reg_op(BIN(opt_eq), gen_opt_eq);
5257
5259
yjit_reg_op(BIN(opt_neq), gen_opt_neq);
5258
5260
yjit_reg_op(BIN(opt_aref), gen_opt_aref);
0 commit comments