@@ -434,7 +434,7 @@ fn gen_exit(exit_pc: *mut VALUE, ctx: &Context, cb: &mut CodeBlock) -> CodePtr
434
434
}
435
435
436
436
// Update CFP->PC
437
- // mov(cb, RAX, const_ptr_opnd(exit_pc as *const u8));
437
+ mov ( cb, RAX , const_ptr_opnd ( exit_pc as * const u8 ) ) ;
438
438
mov ( cb, mem_opnd ( 64 , REG_CFP , RUBY_OFFSET_CFP_PC ) , RAX ) ;
439
439
440
440
// Accumulate stats about interpreter exits
@@ -808,7 +808,7 @@ pub fn gen_single_block(blockref: &BlockRef, ec: EcPtr, cb: &mut CodeBlock, ocb:
808
808
// Call the code generation function
809
809
status = gen_fn ( & mut jit, & mut ctx, cb, ocb) ;
810
810
}
811
- dbg ! ( & status, opcode) ;
811
+ // dbg!(&status, opcode);
812
812
813
813
// If we can't compile this instruction
814
814
// exit to the interpreter and stop compiling
@@ -1746,7 +1746,7 @@ fn gen_setlocal_generic(jit:&mut JITState, ctx: &mut Context, cb: &mut CodeBlock
1746
1746
1747
1747
// flags & VM_ENV_FLAG_WB_REQUIRED
1748
1748
let flags_opnd = mem_opnd ( 64 , REG0 , SIZEOF_VALUE as i32 * VM_ENV_DATA_INDEX_FLAGS as i32 ) ;
1749
- test ( cb, flags_opnd, imm_opnd ( VM_ENV_FLAG_WB_REQUIRED as i64 ) ) ;
1749
+ test ( cb, flags_opnd, uimm_opnd ( VM_ENV_FLAG_WB_REQUIRED . into ( ) ) ) ;
1750
1750
1751
1751
// Create a side-exit to fall back to the interpreter
1752
1752
let side_exit = get_side_exit ( jit, ocb, ctx) ;
@@ -5100,111 +5100,6 @@ fn gen_opt_invokebuiltin_delegate(jit: &mut JITState, ctx: &mut Context, cb: &mu
5100
5100
5101
5101
KeepCompiling
5102
5102
}
5103
-
5104
- // Invalidate all generated code and patch C method return code to contain
5105
- // logic for firing the c_return TracePoint event. Once rb_vm_barrier()
5106
- // returns, all other ractors are pausing inside RB_VM_LOCK_ENTER(), which
5107
- // means they are inside a C routine. If there are any generated code on-stack,
5108
- // they are waiting for a return from a C routine. For every routine call, we
5109
- // patch in an exit after the body of the containing VM instruction. This makes
5110
- // it so all the invalidated code exit as soon as execution logically reaches
5111
- // the next VM instruction. The interpreter takes care of firing the tracing
5112
- // event if it so happens that the next VM instruction has one attached.
5113
- //
5114
- // The c_return event needs special handling as our codegen never outputs code
5115
- // that contains tracing logic. If we let the normal output code run until the
5116
- // start of the next VM instruction by relying on the patching scheme above, we
5117
- // would fail to fire the c_return event. The interpreter doesn't fire the
5118
- // event at an instruction boundary, so simply exiting to the interpreter isn't
5119
- // enough. To handle it, we patch in the full logic at the return address. See
5120
- // full_cfunc_return().
5121
- //
5122
- // In addition to patching, we prevent future entries into invalidated code by
5123
- // removing all live blocks from their iseq.
5124
- void
5125
- rb_yjit_tracing_invalidate_all(void)
5126
- {
5127
- if (!rb_yjit_enabled_p()) return;
5128
-
5129
- // Stop other ractors since we are going to patch machine code.
5130
- RB_VM_LOCK_ENTER();
5131
- rb_vm_barrier();
5132
-
5133
- // Make it so all live block versions are no longer valid branch targets
5134
- rb_objspace_each_objects(tracing_invalidate_all_i, NULL);
5135
-
5136
- // Apply patches
5137
- const uint32_t old_pos = cb->write_pos;
5138
- rb_darray_for(global_inval_patches, patch_idx) {
5139
- struct codepage_patch patch = rb_darray_get(global_inval_patches, patch_idx);
5140
- cb.set_pos(patch.inline_patch_pos);
5141
- uint8_t *jump_target = cb_get_ptr(ocb, patch.outlined_target_pos);
5142
- jmp_ptr(cb, jump_target);
5143
- }
5144
- cb.set_pos(old_pos);
5145
-
5146
- // Freeze invalidated part of the codepage. We only want to wait for
5147
- // running instances of the code to exit from now on, so we shouldn't
5148
- // change the code. There could be other ractors sleeping in
5149
- // branch_stub_hit(), for example. We could harden this by changing memory
5150
- // protection on the frozen range.
5151
- RUBY_ASSERT_ALWAYS(yjit_codepage_frozen_bytes <= old_pos && "frozen bytes should increase monotonically");
5152
- yjit_codepage_frozen_bytes = old_pos;
5153
-
5154
- cb_mark_all_executable(ocb);
5155
- cb_mark_all_executable(cb);
5156
- RB_VM_LOCK_LEAVE();
5157
- }
5158
-
5159
- static int
5160
- tracing_invalidate_all_i(void *vstart, void *vend, size_t stride, void *data)
5161
- {
5162
- VALUE v = (VALUE)vstart;
5163
- for (; v != (VALUE)vend; v += stride) {
5164
- void *ptr = asan_poisoned_object_p(v);
5165
- asan_unpoison_object(v, false);
5166
-
5167
- if (rb_obj_is_iseq(v)) {
5168
- rb_iseq_t *iseq = (rb_iseq_t *)v;
5169
- invalidate_all_blocks_for_tracing(iseq);
5170
- }
5171
-
5172
- asan_poison_object_if(ptr, v);
5173
- }
5174
- return 0;
5175
- }
5176
-
5177
- static void
5178
- invalidate_all_blocks_for_tracing(const rb_iseq_t *iseq)
5179
- {
5180
- struct rb_iseq_constant_body *body = iseq->body;
5181
- if (!body) return; // iseq yet to be initialized
5182
-
5183
- ASSERT_vm_locking();
5184
-
5185
- // Empty all blocks on the iseq so we don't compile new blocks that jump to the
5186
- // invalidted region.
5187
- // TODO Leaking the blocks for now since we might have situations where
5188
- // a different ractor is waiting in branch_stub_hit(). If we free the block
5189
- // that ractor can wake up with a dangling block.
5190
- rb_darray_for(body->yjit_blocks, version_array_idx) {
5191
- rb_yjit_block_array_t version_array = rb_darray_get(body->yjit_blocks, version_array_idx);
5192
- rb_darray_for(version_array, version_idx) {
5193
- // Stop listening for invalidation events like basic operation redefinition.
5194
- block_t *block = rb_darray_get(version_array, version_idx);
5195
- yjit_unlink_method_lookup_dependency(block);
5196
- yjit_block_assumptions_free(block);
5197
- }
5198
- rb_darray_free(version_array);
5199
- }
5200
- rb_darray_free(body->yjit_blocks);
5201
- body->yjit_blocks = NULL;
5202
-
5203
- #if USE_MJIT
5204
- // Reset output code entry point
5205
- body->jit_func = NULL;
5206
- #endif
5207
- }
5208
5103
*/
5209
5104
5210
5105
0 commit comments