@@ -1402,6 +1402,42 @@ gen_opt_aref(jitstate_t *jit, ctx_t *ctx)
1402
1402
}
1403
1403
}
1404
1404
1405
+ VALUE rb_vm_opt_aset (VALUE recv , VALUE obj , VALUE set );
1406
+
1407
+ static codegen_status_t
1408
+ gen_opt_aset (jitstate_t * jit , ctx_t * ctx )
1409
+ {
1410
+ // Save the PC and SP because the callee may allocate
1411
+ // Note that this modifies REG_SP, which is why we do it first
1412
+ jit_save_pc (jit , REG0 );
1413
+ jit_save_sp (jit , ctx );
1414
+
1415
+ uint8_t * side_exit = yjit_side_exit (jit , ctx );
1416
+
1417
+ // Get the operands from the stack
1418
+ x86opnd_t arg2 = ctx_stack_pop (ctx , 1 );
1419
+ x86opnd_t arg1 = ctx_stack_pop (ctx , 1 );
1420
+ x86opnd_t arg0 = ctx_stack_pop (ctx , 1 );
1421
+
1422
+ // Call rb_vm_opt_mod(VALUE recv, VALUE obj)
1423
+ yjit_save_regs (cb );
1424
+ mov (cb , C_ARG_REGS [0 ], arg0 );
1425
+ mov (cb , C_ARG_REGS [1 ], arg1 );
1426
+ mov (cb , C_ARG_REGS [2 ], arg2 );
1427
+ call_ptr (cb , REG0 , (void * )rb_vm_opt_aset );
1428
+ yjit_load_regs (cb );
1429
+
1430
+ // If val == Qundef, bail to do a method call
1431
+ cmp (cb , RAX , imm_opnd (Qundef ));
1432
+ je_ptr (cb , side_exit );
1433
+
1434
+ // Push the return value onto the stack
1435
+ x86opnd_t stack_ret = ctx_stack_push (ctx , TYPE_UNKNOWN );
1436
+ mov (cb , stack_ret , RAX );
1437
+
1438
+ return YJIT_KEEP_COMPILING ;
1439
+ }
1440
+
1405
1441
static codegen_status_t
1406
1442
gen_opt_and (jitstate_t * jit , ctx_t * ctx )
1407
1443
{
@@ -2533,6 +2569,7 @@ yjit_init_codegen(void)
2533
2569
yjit_reg_op (BIN (opt_gt ), gen_opt_gt );
2534
2570
yjit_reg_op (BIN (opt_eq ), gen_opt_eq );
2535
2571
yjit_reg_op (BIN (opt_aref ), gen_opt_aref );
2572
+ yjit_reg_op (BIN (opt_aset ), gen_opt_aset );
2536
2573
yjit_reg_op (BIN (opt_and ), gen_opt_and );
2537
2574
yjit_reg_op (BIN (opt_or ), gen_opt_or );
2538
2575
yjit_reg_op (BIN (opt_minus ), gen_opt_minus );
0 commit comments