8000 ZJIT: Implement side exits for entry frames by k0kubun · Pull Request #13469 · ruby/ruby · GitHub
[go: up one dir, main page]

Skip to content

ZJIT: Implement side exits for entry frames #13469

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 2 commits into from
Jun 4, 2025
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Next Next commit
ZJIT: Implement side exits for entry frames
  • Loading branch information
k0kubun committed May 29, 2025
commit 229170713f2046d0b433a969161d7fdc57862718
18 changes: 18 additions & 0 deletions test/ruby/test_zjit.rb
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,24 @@ def test(a) = 1 + a
}, call_threshold: 2
end

def test_opt_plus_type_guard_exit
assert_compiles '[3, 3.0]', %q{
def test(a) = 1 + a
test(1) # profile opt_plus
[test(2), test(2.0)]
}, call_threshold: 2
end

def test_opt_plus_type_guard_nested_exit
omit 'rewind_caller_frames is not implemented yet'
assert_compiles '[3, 3.0]', %q{
def side_exit(n) = 1 + n
def jit_frame(n) = 1 + side_exit(n)
def entry(n) = jit_frame(n)
[entry(2), entry(2.0)]
}, call_threshold: 2
end

# Test argument ordering
def test_opt_minus
assert_compiles '2', %q{
Expand Down
4 changes: 2 additions & 2 deletions zjit/src/asm/arm64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -644,7 +644,7 @@ pub fn mov(cb: &mut CodeBlock, rd: A64Opnd, rm: A64Opnd) {

LogicalImm::mov(rd.reg_no, bitmask_imm, rd.num_bits).into()
},
_ => panic!("Invalid operand combination to mov instruction")
_ => panic!("Invalid operand combination to mov instruction: {rd:?}, {rm:?}")
};

cb.write_bytes(&bytes);
Expand Down Expand Up @@ -940,7 +940,7 @@ pub fn stur(cb: &mut CodeBlock, rt: A64Opnd, rn: A64Opnd) {

LoadStore::stur(rt.reg_no, rn.base_reg_no, rn.disp as i16, rn.num_bits).into()
},
_ => panic!("Invalid operand combination to stur instruction.")
_ => panic!("Invalid operand combination to stur instruction: {rt:?}, {rn:?}")
};

cb.write_bytes(&bytes);
Expand Down
5 changes: 4 additions & 1 deletion zjit/src/asm/arm64/opnd.rs
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,9 @@ pub const X20_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 20 };
pub const X21_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 21 };
pub const X22_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 22 };

// link register
pub const X30_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 30 };

// zero register
pub const XZR_REG: A64Reg = A64Reg { num_bits: 64, reg_no: 31 };

Expand Down Expand Up @@ -153,7 +156,7 @@ pub const X26: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 26 });
pub const X27: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 27 });
pub const X28: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 28 });
pub const X29: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 29 });
pub const X30: A64Opnd = A64Opnd::Reg(A64Reg { num_bits: 64, reg_no: 30 });
pub const X30: A64Opnd = A64Opnd::Reg(X30_REG);
pub const X31: A64Opnd = A64Opnd::Reg(XZR_REG);

// 32-bit registers
Expand Down
18 changes: 12 additions & 6 deletions zjit/src/backend/arm64/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,11 @@ impl Assembler
vec![X1_REG, X9_REG, X10_REG, X11_REG, X12_REG, X13_REG, X14_REG, X15_REG]
}

/// Get the address that the current frame returns to
pub fn return_addr_opnd() -> Opnd {
Opnd::Reg(X30_REG)
}

/// Split platform-specific instructions
/// The transformations done here are meant to make our lives simpler in later
/// stages of the compilation pipeline.
Expand Down Expand Up @@ -757,7 +762,7 @@ impl Assembler
/// called when lowering any of the conditional jump instructions.
fn emit_conditional_jump<const CONDITION: u8>(cb: &mut CodeBlock, target: Target) {
match target {
Target::CodePtr(dst_ptr) | Target::SideExitPtr(dst_ptr) => {
Target::CodePtr(dst_ptr) => {
let dst_addr = dst_ptr.as_offset();
let src_addr = cb.get_write_ptr().as_offset();

Expand Down Expand Up @@ -829,8 +834,10 @@ impl Assembler
}

/// Emit a CBZ or CBNZ which branches when a register is zero or non-zero
fn emit_cmp_zero_jump(cb: &mut CodeBlock, reg: A64Opnd, branch_if_zero: bool, target: Target) {
if let Target::SideExitPtr(dst_ptr) = target {
fn emit_cmp_zero_jump(_cb: &mut CodeBlock, _reg: A64Opnd, _branch_if_zero: bool, target: Target) {
if let Target::Label(_) = target {
unimplemented!("this should be re-implemented with Label for side exits");
/*
let dst_addr = dst_ptr.as_offset();
let src_addr = cb.get_write_ptr().as_offset();

Expand Down Expand Up @@ -862,6 +869,7 @@ impl Assembler
br(cb, Assembler::SCRATCH0);

}
*/
} else {
unreachable!("We should only generate Joz/Jonz with side-exit targets");
}
Expand Down Expand Up @@ -1162,9 +1170,6 @@ impl Assembler
Target::CodePtr(dst_ptr) => {
emit_jmp_ptr(cb, dst_ptr, true);
},
Target::SideExitPtr(dst_ptr) => {
emit_jmp_ptr(cb, dst_ptr, false);
},
Target::Label(label_idx) => {
// Here we're going to save enough space for
// ourselves and then come back and write the
Expand Down Expand Up @@ -1297,6 +1302,7 @@ impl Assembler
pub fn compile_with_regs(self, cb: &mut CodeBlock, regs: Vec<Reg>) -> Option<(CodePtr, Vec<u32>)> {
let asm = self.arm64_split();
let mut asm = asm.alloc_regs(regs);
asm.compile_side_exits()?;

// Create label instances in the code block
for (idx, name) in asm.label_names.iter().enumerate() {
Expand Down
Loading
Loading
0