Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

bpo-47172: Make virtual opcodes negative. Make is_jump detect only actual jumps, use is_block_push for the exception block setup opcodes. #32200

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 12 commits into from
Apr 1, 2022
Merged
Changes from 8 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
66 changes: 31 additions & 35 deletions 66 Python/compile.c
Original file line number Diff line number Diff line change
Expand Up @@ -72,18 +72,20 @@

/* Pseudo-instructions used in the compiler,
* but turned into NOPs by the assembler. */
#define SETUP_FINALLY 255
#define SETUP_CLEANUP 254
#define SETUP_WITH 253
#define POP_BLOCK 252
#define JUMP 251
#define SETUP_FINALLY 256
Copy link
Member

@markshannon markshannon Mar 31, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Just an idea, but would it make sense for the virtual opcodes to be negative?
>0: Real
==0: Cache
<0: Virtual
It seems like a simpler scheme.

#define SETUP_CLEANUP 257
#define SETUP_WITH 258
#define POP_BLOCK 259
#define JUMP 260

iritkatriel marked this conversation as resolved.
Show resolved Hide resolved
#define IS_VIRTUAL_OPCODE(opcode) ((opcode) >= 256)

#define IS_TOP_LEVEL_AWAIT(c) ( \
(c->c_flags->cf_flags & PyCF_ALLOW_TOP_LEVEL_AWAIT) \
&& (c->u->u_ste->ste_type == ModuleBlock))

struct instr {
unsigned char i_opcode;
int i_opcode;
int i_oparg;
/* target block (if jump instruction) */
struct basicblock_ *i_target;
Expand Down Expand Up @@ -115,8 +117,13 @@ is_bit_set_in_table(const uint32_t *table, int bitindex) {
* Word is indexed by (bitindex>>ln(size of int in bits)).
* Bit within word is the low bits of bitindex.
*/
uint32_t word = table[bitindex >> LOG_BITS_PER_INT];
return (word >> (bitindex & MASK_LOW_LOG_BITS)) & 1;
if (bitindex < 256) {
uint32_t word = table[bitindex >> LOG_BITS_PER_INT];
return (word >> (bitindex & MASK_LOW_LOG_BITS)) & 1;
}
else {
return 0;
}
}

static inline int
Expand All @@ -125,18 +132,25 @@ is_relative_jump(struct instr *i)
return is_bit_set_in_table(_PyOpcode_RelativeJump, i->i_opcode);
}

static inline int
is_block_push(struct instr *instr)
{
int opcode = instr->i_opcode;
return opcode == SETUP_FINALLY || opcode == SETUP_WITH || opcode == SETUP_CLEANUP;
}

static inline int
is_jump(struct instr *i)
{
return i->i_opcode >= SETUP_WITH ||
i->i_opcode == JUMP ||
return i->i_opcode == JUMP ||
is_bit_set_in_table(_PyOpcode_Jump, i->i_opcode);
}

static int
instr_size(struct instr *instruction)
{
int opcode = instruction->i_opcode;
assert(!IS_VIRTUAL_OPCODE(opcode));
int oparg = HAS_ARG(opcode) ? instruction->i_oparg : 0;
int extended_args = (0xFFFFFF < oparg) + (0xFFFF < oparg) + (0xFF < oparg);
int caches = _PyOpcode_Caches[opcode];
Expand All @@ -147,6 +161,7 @@ static void
write_instr(_Py_CODEUNIT *codestr, struct instr *instruction, int ilen)
{
int opcode = instruction->i_opcode;
assert(!IS_VIRTUAL_OPCODE(opcode));
int oparg = HAS_ARG(opcode) ? instruction->i_oparg : 0;
int caches = _PyOpcode_Caches[opcode];
switch (ilen - caches) {
Expand Down Expand Up @@ -7038,7 +7053,7 @@ stackdepth(struct compiler *c)
maxdepth = new_depth;
}
assert(depth >= 0); /* invalid code or bug in stackdepth() */
if (is_jump(instr)) {
if (is_jump(instr) || is_block_push(instr)) {
effect = stack_effect(instr->i_opcode, instr->i_oparg, 1);
assert(effect != PY_INVALID_STACK_EFFECT);
int target_depth = depth + effect;
Expand Down Expand Up @@ -7157,13 +7172,6 @@ assemble_emit_table_pair(struct assembler* a, PyObject** table, int* offset,
return 1;
}

static int
is_block_push(struct instr *instr)
{
int opcode = instr->i_opcode;
return opcode == SETUP_FINALLY || opcode == SETUP_WITH || opcode == SETUP_CLEANUP;
}

static basicblock *
push_except_block(ExceptStack *stack, struct instr *setup) {
assert(is_block_push(setup));
Expand Down Expand Up @@ -8598,6 +8606,7 @@ apply_static_swaps(basicblock *block, int i)
static bool
jump_thread(struct instr *inst, struct instr *target, int opcode)
{
assert(!IS_VIRTUAL_OPCODE(opcode) || opcode == JUMP);
assert(is_jump(inst));
assert(is_jump(target));
// bpo-45773: If inst->i_target == target->i_target, then nothing actually
Expand Down Expand Up @@ -8627,7 +8636,7 @@ optimize_basic_block(struct compiler *c, basicblock *bb, PyObject *consts)
struct instr *inst = &bb->b_instr[i];
int oparg = inst->i_oparg;
int nextop = i+1 < bb->b_iused ? bb->b_instr[i+1].i_opcode : 0;
if (is_jump(inst)) {
if (is_jump(inst) || is_block_push(inst)) {
brandtbucher marked this conversation as resolved.
Show resolved Hide resolved
/* Skip over empty basic blocks. */
while (inst->i_target->b_iused == 0) {
inst->i_target = inst->i_target->b_next;
Expand Down Expand Up @@ -8994,8 +9003,9 @@ mark_reachable(struct assembler *a) {
}
for (int i = 0; i < b->b_iused; i++) {
basicblock *target;
if (is_jump(&b->b_instr[i])) {
target = b->b_instr[i].i_target;
struct instr *instr = &b->b_instr[i];
if (is_jump(instr) || is_block_push(instr)) {
target = instr->i_target;
if (target->b_predecessors == 0) {
*sp++ = target;
}
Expand Down Expand Up @@ -9071,13 +9081,6 @@ propagate_line_numbers(struct assembler *a) {
}
}
if (is_jump(&b->b_instr[b->b_iused-1])) {
switch (b->b_instr[b->b_iused-1].i_opcode) {
/* Note: Only actual jumps, not exception handlers */
case SETUP_WITH:
case SETUP_FINALLY:
case SETUP_CLEANUP:
continue;
}
basicblock *target = b->b_instr[b->b_iused-1].i_target;
if (target->b_predecessors == 1) {
if (target->b_instr[0].i_lineno < 0) {
Expand Down Expand Up @@ -9203,13 +9206,6 @@ duplicate_exits_without_lineno(struct compiler *c)
*/
for (basicblock *b = c->u->u_blocks; b != NULL; b = b->b_list) {
if (b->b_iused > 0 && is_jump(&b->b_instr[b->b_iused-1])) {
switch (b->b_instr[b->b_iused-1].i_opcode) {
/* Note: Only actual jumps, not exception handlers */
case SETUP_WITH:
case SETUP_FINALLY:
case SETUP_CLEANUP:
continue;
}
brandtbucher marked this conversation as resolved.
Show resolved Hide resolved
basicblock *target = b->b_instr[b->b_iused-1].i_target;
if (is_exit_without_lineno(target) && target->b_predecessors > 1) {
basicblock *new_target = compiler_copy_block(c, target);
Expand Down
Morty Proxy This is a proxified and sanitized view of the page, visit original site.