-
-
Notifications
You must be signed in to change notification settings - Fork 32.1k
gh-112354: Add executor for less-taken branch #112902
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
36feeb1
d12533b
f21f2d8
8463965
6403752
329dead
f1998c0
b0944e6
26b5f89
649581c
835bf13
256b156
75c7c32
a94c7f1
747a3f0
682cf5a
359c6fc
ca6ed3a
e2a26b5
38c7aab
0f64231
83297df
d065a94
0f71a03
075ab91
c54daef
934a115
52c49eb
8f5e623
10b98f1
1450ca6
dcde4d3
15df63f
c786418
ee0734b
655a841
32e36fa
f5b317a
4804a3c
46c7d26
b991279
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
- Loading branch information
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -752,7 +752,7 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int | |
goto exit_unwind; | ||
} | ||
|
||
// Jump here from ENTER_EXECUTOR, and code under the deoptimize label | ||
// Jump here from ENTER_EXECUTOR and exit_trace. | ||
enter_tier_one: | ||
next_instr = frame->instr_ptr; | ||
|
||
|
@@ -1083,34 +1083,34 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int | |
int pc = next_uop - 1 - current_executor->trace; | ||
_PyExecutorObject **pexecutor = current_executor->executors + pc; | ||
if (*pexecutor != NULL) { | ||
#ifdef Py_DEBUG | ||
PyCodeObject *code = _PyFrame_GetCode(frame); | ||
DPRINTF(2, "Jumping to new executor for %s (%s:%d) at byte offset %d\n", | ||
PyUnicode_AsUTF8(code->co_qualname), | ||
PyUnicode_AsUTF8(code->co_filename), | ||
code->co_firstlineno, | ||
2 * (int)(frame->instr_ptr - _PyCode_CODE(_PyFrame_GetCode(frame)))); | ||
#endif | ||
Py_DECREF(current_executor); | ||
current_executor = (_PyUOpExecutorObject *)*pexecutor; | ||
Py_INCREF(current_executor); | ||
goto enter_tier_two; | ||
} | ||
|
||
// Increment and check side exit counter. | ||
next_instr = frame->instr_ptr; | ||
uint16_t *pcounter = current_executor->counters + pc; | ||
*pcounter += 1; | ||
if (*pcounter != 32 || // TODO: use resume_threshold | ||
tstate->interp->optimizer == &_PyOptimizer_Default) | ||
{ | ||
goto enter_tier_one; | ||
goto resume_frame; | ||
} | ||
|
||
// Decode instruction to look past EXTENDED_ARG. | ||
_Py_CODEUNIT *src, *dest; | ||
src = dest = frame->instr_ptr; | ||
opcode = src->op.code; | ||
opcode = next_instr[0].op.code; | ||
if (opcode == EXTENDED_ARG) { | ||
src++; | ||
opcode = src->op.code; | ||
opcode = next_instr[1].op.code; | ||
} | ||
|
||
// For selected opcodes build a new executor and enter it now. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why "selected opcodes", why not everywhere? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In an earlier version that somehow didn't work. Right now the check whether the new trace isn't going to immediately deopt again relies on these opcodes. I figured once we have the side exit machinery working we could gradually increase the scope to other deoptimizations. Also, not all deoptimizations are worthy of the effort (e.g. the PEP 523 test). There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. No special cases, please, it just make the code more complicated and slower. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There are several reasons. First, as I explain below, for bytecodes other than branches, I can't promise an exact check for whether the newly created sub-executor doesn't just repeat the same deoptimizing uop that triggered its creation (in which case the sub-executor would always deopt immediately if it is entered at all). Second, for most bytecodes other than branches, deoptimization paths are relatively rare (IIRC this is apparent from the pystats data -- with the exception of some For branches, we expect many cases where the "common" path is not much more common than the "uncommon" path (e.g. 60/40 or 70/30). Now, it might make sense have a different special case here, where if e.g. I propose this PR as a starting point for futher iterations, not as the ultimate design for side-exits. Let's discuss this Monday. |
||
|
@@ -1122,39 +1122,49 @@ _PyEval_EvalFrameDefault(PyThreadState *tstate, _PyInterpreterFrame *frame, int | |
DPRINTF(2, "--> %s @ %d in %p has %d side exits\n", | ||
_PyUOpName(uopcode), pc, current_executor, (int)(*pcounter)); | ||
DPRINTF(2, " T1: %s\n", _PyOpcode_OpName[opcode]); | ||
// The counter will cycle around once the 16 bits overflow | ||
int optimized = _PyOptimizer_Unanchored(frame, dest, pexecutor, stack_pointer); | ||
|
||
int optimized = _PyOptimizer_Unanchored(frame, next_instr, pexecutor, stack_pointer); | ||
if (optimized < 0) { | ||
goto error_tier_two; | ||
} | ||
|
||
if (!optimized) { | ||
DPRINTF(2, "--> Failed to optimize %s @ %d in %p\n", | ||
_PyUOpName(uopcode), pc, current_executor); | ||
} | ||
else { | ||
#ifdef Py_DEBUG | ||
DPRINTF(1, "--> Optimized %s @ %d in %p\n", | ||
_PyUOpName(uopcode), pc, current_executor); | ||
DPRINTF(1, " T1: %s\n", _PyOpcode_OpName[src->op.code]); | ||
PyCodeObject *code = _PyFrame_GetCode(frame); | ||
DPRINTF(2, "Jumping to fresh executor for %s (%s:%d) at byte offset %d\n", | ||
PyUnicode_AsUTF8(code->co_qualname), | ||
PyUnicode_AsUTF8(code->co_filename), | ||
code->co_firstlineno, | ||
2 * (int)(frame->instr_ptr - _PyCode_CODE(_PyFrame_GetCode(frame)))); | ||
#endif | ||
Py_DECREF(current_executor); | ||
current_executor = (_PyUOpExecutorObject *)*pexecutor; | ||
gvanrossum marked this conversation as resolved.
Show resolved
Hide resolved
|
||
// TODO: Check at least two uops: _IS_NONE, _POP_JUMP_IF_TRUE/FALSE. | ||
if (current_executor->trace[0].opcode != uopcode) { | ||
|
||
// Reject trace if it repeats the uop that just deoptimized. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This test may be a bit imprecise(*), but it tries to discard the case where, even though the counter in the executor indicated that this side exit is "hot", the Tier 1 bytecode hasn't been re-specialized yet. In that case the trace projection will just repeat the uop that just took a deopt side exit, causing it to immediately deopt again. This seems a waste of time and executors -- eventually the sub-executor's deopt counter will also indicate it is hot, and then we'll try again, but it seems better (if we can catch it) to avoid creating the sub-executor in the first place, relying on exponential backoff for the side-exit counter instead (implemented below at L1180 and ff.). For variou 8000 s reasons, the side-exit counters and the Tier 1 deopt counters don't run in sync, so it's possible that the side-exit counter triggers before the Tier 1 counter has re-specialized. This check gives that another chance. The test that I would like to use here would be check if the Tier 1 opcode is still unchanged (i.e., not re-specialized), but the executor doesn't record that information (and it would take up a lot of space, we'd need an extra byte for each uop that can deoptimize at least). (*) The test I wrote is exact for the conditional branches I special-cased above (that's why there's a further special case here for |
||
int jump_opcode = current_executor->trace[0].opcode; | ||
if (jump_opcode == _IS_NONE) { | ||
jump_opcode = current_executor->trace[1].opcode; | ||
} | ||
if (jump_opcode != uopcode) { | ||
Py_INCREF(current_executor); | ||
goto enter_tier_two; // Yes! | ||
goto enter_tier_two; // All systems go! | ||
} | ||
// This is guaranteed to deopt again; forget about it | ||
DPRINTF(2, "Alas, it's the same uop again -- discarding trace\n"); | ||
|
||
// The trace is guaranteed to deopt again; forget about it. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is it? Why? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. See explanation above. |
||
DPRINTF(2, "Alas, it's the same uop again (%s) -- discarding trace\n", | ||
_PyUOpName(jump_opcode)); | ||
*pexecutor = NULL; | ||
// It will be decref'ed below. | ||
} | ||
} | ||
Py_DECREF(current_executor); | ||
goto enter_tier_one; | ||
goto resume_frame; | ||
|
||
// Jump here from _EXIT_TRACE | ||
exit_trace: | ||
|
Uh oh!
There was an error while loading. Please reload this page.