@@ -95,7 +95,8 @@ typedef struct basicblock_ {
95
95
struct basicblock_ * b_next ;
96
96
/* b_return is true if a RETURN_VALUE opcode is inserted. */
97
97
unsigned b_return : 1 ;
98
- unsigned b_reachable : 1 ;
98
+ /* Number of predecssors that a block has. */
99
+ int b_predecessors ;
99
100
/* Basic block has no fall through (it ends with a return, raise or jump) */
100
101
unsigned b_nofallthrough : 1 ;
101
102
/* Basic block exits scope (it ends with a return or raise) */
@@ -825,6 +826,7 @@ compiler_copy_block(struct compiler *c, basicblock *block)
825
826
result -> b_instr [n ] = block -> b_instr [i ];
826
827
}
827
828
result -> b_exit = block -> b_exit ;
829
+ result -> b_nofallthrough = 1 ;
828
830
return result ;
829
831
}
830
832
@@ -1169,7 +1171,7 @@ PyCompile_OpcodeStackEffect(int opcode, int oparg)
1169
1171
*/
1170
1172
1171
1173
static int
1172
- compiler_addop (struct compiler * c , int opcode )
1174
+ compiler_addop_line (struct compiler * c , int opcode , int line )
1173
1175
{
1174
1176
basicblock * b ;
1175
1177
struct instr * i ;
@@ -1184,10 +1186,23 @@ compiler_addop(struct compiler *c, int opcode)
1184
1186
i -> i_oparg = 0 ;
1185
1187
if (opcode == RETURN_VALUE )
1186
1188
b -> b_return = 1 ;
1187
- i -> i_lineno = c -> u -> u_lineno ;
1189
+ i -> i_lineno = line ;
1188
1190
return 1 ;
1189
1191
}
1190
1192
1193
+ static int
1194
+ compiler_addop (struct compiler * c , int opcode )
1195
+ {
1196
+ return compiler_addop_line (c , opcode , c -> u -> u_lineno );
1197
+ }
1198
+
1199
+ static int
1200
+ compiler_addop_noline (struct compiler * c , int opcode )
1201
+ {
1202
+ return compiler_addop_line (c , opcode , -1 );
1203
+ }
1204
+
1205
+
1191
1206
static Py_ssize_t
1192
1207
compiler_add_o (PyObject * dict , PyObject * o )
1193
1208
{
@@ -1448,6 +1463,11 @@ compiler_addop_j_noline(struct compiler *c, int opcode, basicblock *b)
1448
1463
return 0; \
1449
1464
}
1450
1465
1466
+ #define ADDOP_NOLINE (C , OP ) { \
1467
+ if (!compiler_addop_noline((C), (OP))) \
1468
+ return 0; \
1469
+ }
1470
+
1451
1471
#define ADDOP_IN_SCOPE (C , OP ) { \
1452
1472
if (!compiler_addop((C), (OP))) { \
1453
1473
compiler_exit_scope(c); \
@@ -2955,9 +2975,7 @@ compiler_try_finally(struct compiler *c, stmt_ty s)
2955
2975
else {
2956
2976
VISIT_SEQ (c , stmt , s -> v .Try .body );
2957
2977
}
2958
- /* Mark code as artificial */
2959
- c -> u -> u_lineno = -1 ;
2960
- ADDOP (c , POP_BLOCK );
2978
+ ADDOP_NOLINE (c , POP_BLOCK );
2961
2979
compiler_pop_fblock (c , FINALLY_TRY , body );
2962
2980
VISIT_SEQ (c , stmt , s -> v .Try .finalbody );
2963
2981
ADDOP_JUMP_NOLINE (c , JUMP_FORWARD , exit );
@@ -3019,9 +3037,9 @@ compiler_try_except(struct compiler *c, stmt_ty s)
3019
3037
if (!compiler_push_fblock (c , TRY_EXCEPT , body , NULL , NULL ))
3020
3038
return 0 ;
3021
3039
VISIT_SEQ (c , stmt , s -> v .Try .body );
3022
- ADDOP (c , POP_BLOCK );
3023
3040
compiler_pop_fblock (c , TRY_EXCEPT , body );
3024
- ADDOP_JUMP (c , JUMP_FORWARD , orelse );
3041
+ ADDOP_NOLINE (c , POP_BLOCK );
3042
+ ADDOP_JUMP_NOLINE (c , JUMP_FORWARD , orelse );
3025
3043
n = asdl_seq_LEN (s -> v .Try .handlers );
3026
3044
compiler_use_next_block (c , except );
3027
3045
/* Runtime will push a block here, so we need to account for that */
@@ -4925,6 +4943,9 @@ compiler_with(struct compiler *c, stmt_ty s, int pos)
4925
4943
else if (!compiler_with (c , s , pos ))
4926
4944
return 0 ;
4927
4945
4946
+
4947
+ /* Mark all following code as artificial */
4948
+ c -> u -> u_lineno = -1 ;
4928
4949
ADDOP (c , POP_BLOCK );
4929
4950
compiler_pop_fblock (c , WITH , block );
4930
4951
@@ -6396,36 +6417,71 @@ mark_reachable(struct assembler *a) {
6396
6417
if (stack == NULL ) {
6397
6418
return -1 ;
6398
6419
}
6399
- a -> a_entry -> b_reachable = 1 ;
6420
+ a -> a_entry -> b_predecessors = 1 ;
6400
6421
* sp ++ = a -> a_entry ;
6401
6422
while (sp > stack ) {
6402
6423
basicblock * b = * (-- sp );
6403
- if (b -> b_next && !b -> b_nofallthrough && b -> b_next -> b_reachable == 0 ) {
6404
- b -> b_next -> b_reachable = 1 ;
6405
- * sp ++ = b -> b_next ;
6424
+ if (b -> b_next && !b -> b_nofallthrough ) {
6425
+ if (b -> b_next -> b_predecessors == 0 ) {
6426
+ * sp ++ = b -> b_next ;
6427
+ }
6428
+ b -> b_next -> b_predecessors ++ ;
6406
6429
}
6407
6430
for (int i = 0 ; i < b -> b_iused ; i ++ ) {
6408
6431
basicblock * target ;
6409
6432
if (is_jump (& b -> b_instr [i ])) {
6410
6433
target = b -> b_instr [i ].i_target ;
6411
- if (target -> b_reachable == 0 ) {
6412
- target -> b_reachable = 1 ;
6434
+ if (target -> b_predecessors == 0 ) {
6413
6435
* sp ++ = target ;
6414
6436
}
6437
+ target -> b_predecessors ++ ;
6415
6438
}
6416
6439
}
6417
6440
}
6418
6441
PyObject_Free (stack );
6419
6442
return 0 ;
6420
6443
}
6421
6444
6445
+ static void
6446
+ eliminate_empty_basic_blocks (basicblock * entry ) {
6447
+ /* Eliminate empty blocks */
6448
+ for (basicblock * b = entry ; b != NULL ; b = b -> b_next ) {
6449
+ basicblock * next = b -> b_next ;
6450
+ if (next ) {
6451
+ while (next -> b_iused == 0 && next -> b_next ) {
6452
+ next = next -> b_next ;
6453
+ }
6454
+ b -> b_next = next ;
6455
+ }
6456
+ }
6457
+ for (basicblock * b = entry ; b != NULL ; b = b -> b_next ) {
6458
+ if (b -> b_iused == 0 ) {
6459
+ continue ;
6460
+ }
6461
+ if (is_jump (& b -> b_instr [b -> b_iused - 1 ])) {
6462
+ basicblock * target = b -> b_instr [b -> b_iused - 1 ].i_target ;
6463
+ while (target -> b_iused == 0 ) {
6464
+ target = target -> b_next ;
6465
+ }
6466
+ b -> b_instr [b -> b_iused - 1 ].i_target = target ;
6467
+ }
6468
+ }
6469
+ }
6470
+
6471
+
6422
6472
/* If an instruction has no line number, but it's predecessor in the BB does,
6423
- * then copy the line number. This reduces the size of the line number table,
6473
+ * then copy the line number. If a successor block has no line number, and only
6474
+ * one predecessor, then inherit the line number.
6475
+ * This ensures that all exit blocks (with one predecessor) receive a line number.
6476
+ * Also reduces the size of the line number table,
6424
6477
* but has no impact on the generated line number events.
6425
6478
*/
6426
6479
static void
6427
- minimize_lineno_table (struct assembler * a ) {
6480
+ propogate_line_numbers (struct assembler * a ) {
6428
6481
for (basicblock * b = a -> a_entry ; b != NULL ; b = b -> b_next ) {
6482
+ if (b -> b_iused == 0 ) {
6483
+ continue ;
6484
+ }
6429
6485
int prev_lineno = -1 ;
6430
6486
for (int i = 0 ; i < b -> b_iused ; i ++ ) {
6431
6487
if (b -> b_instr [i ].i_lineno < 0 ) {
@@ -6435,7 +6491,27 @@ minimize_lineno_table(struct assembler *a) {
6435
6491
prev_lineno = b -> b_instr [i ].i_lineno ;
6436
6492
}
6437
6493
}
6438
-
6494
+ if (!b -> b_nofallthrough && b -> b_next -> b_predecessors == 1 ) {
6495
+ assert (b -> b_next -> b_iused );
6496
+ if (b -> b_next -> b_instr [0 ].i_lineno < 0 ) {
6497
+ b -> b_next -> b_instr [0 ].i_lineno = prev_lineno ;
6498
+ }
6499
+ }
6500
+ if (is_jump (& b -> b_instr [b -> b_iused - 1 ])) {
6501
+ switch (b -> b_instr [b -> b_iused - 1 ].i_opcode ) {
6502
+ /* Note: Only actual jumps, not exception handlers */
6503
+ case SETUP_ASYNC_WITH :
6504
+ case SETUP_WITH :
6505
+ case SETUP_FINALLY :
6506
+ continue ;
6507
+ }
6508
+ basicblock * target = b -> b_instr [b -> b_iused - 1 ].i_target ;
6509
+ if (target -> b_predecessors == 1 ) {
6510
+ if (target -> b_instr [0 ].i_lineno < 0 ) {
6511
+ target -> b_instr [0 ].i_lineno = prev_lineno ;
6512
+ }
6513
+ }
6514
+ }
6439
6515
}
6440
6516
}
6441
6517
@@ -6456,35 +6532,34 @@ optimize_cfg(struct assembler *a, PyObject *consts)
6456
6532
return -1 ;
6457
6533
}
6458
6534
clean_basic_block (b );
6459
- assert (b -> b_reachable == 0 );
6535
+ assert (b -> b_predecessors == 0 );
6460
6536
}
6461
6537
if (mark_reachable (a )) {
6462
6538
return -1 ;
6463
6539
}
6464
6540
/* Delete unreachable instructions */
6465
6541
for (basicblock * b = a -> a_entry ; b != NULL ; b = b -> b_next ) {
6466
- if (b -> b_reachable == 0 ) {
6542
+ if (b -> b_predecessors == 0 ) {
6467
6543
b -> b_iused = 0 ;
6468
6544
b -> b_nofallthrough = 0 ;
6469
6545
}
6470
6546
}
6547
+ eliminate_empty_basic_blocks (a -> a_entry );
6471
6548
/* Delete jump instructions made redundant by previous step. If a non-empty
6472
6549
block ends with a jump instruction, check if the next non-empty block
6473
6550
reached through normal flow control is the target of that jump. If it
6474
6551
is, then the jump instruction is redundant and can be deleted.
6475
6552
*/
6553
+ int maybe_empty_blocks = 0 ;
6476
6554
for (basicblock * b = a -> a_entry ; b != NULL ; b = b -> b_next ) {
6477
6555
if (b -> b_iused > 0 ) {
6478
6556
struct instr * b_last_instr = & b -> b_instr [b -> b_iused - 1 ];
6479
6557
if (b_last_instr -> i_opcode == POP_JUMP_IF_FALSE ||
6480
6558
b_last_instr -> i_opcode == POP_JUMP_IF_TRUE ||
6481
6559
b_last_instr -> i_opcode == JUMP_ABSOLUTE ||
6482
6560
b_last_instr -> i_opcode == JUMP_FORWARD ) {
6483
- basicblock * b_next_act = b -> b_next ;
6484
- while (b_next_act != NULL && b_next_act -> b_iused == 0 ) {
6485
- b_next_act = b_next_act -> b_next ;
6486
- }
6487
- if (b_last_instr -> i_target == b_next_act ) {
6561
+ if (b_last_instr -> i_target == b -> b_next ) {
6562
+ assert (b -> b_next -> b_iused );
6488
6563
b -> b_nofallthrough = 0 ;
6489
6564
switch (b_last_instr -> i_opcode ) {
6490
6565
case POP_JUMP_IF_FALSE :
@@ -6497,19 +6572,17 @@ optimize_cfg(struct assembler *a, PyObject *consts)
6497
6572
case JUMP_FORWARD :
6498
6573
b_last_instr -> i_opcode = NOP ;
6499
6574
clean_basic_block (b );
6575
+ maybe_empty_blocks = 1 ;
6500
6576
break ;
6501
6577
}
6502
- /* The blocks after this one are now reachable through it */
6503
- b_next_act = b -> b_next ;
6504
- while (b_next_act != NULL && b_next_act -> b_iused == 0 ) {
6505
- b_next_act -> b_reachable = 1 ;
6506
- b_next_act = b_next_act -> b_next ;
6507
- }
6508
6578
}
6509
6579
}
6510
6580
}
6511
6581
}
6512
- minimize_lineno_table (a );
6582
+ if (maybe_empty_blocks ) {
6583
+ eliminate_empty_basic_blocks (a -> a_entry );
6584
+ }
6585
+ propogate_line_numbers (a );
6513
6586
return 0 ;
6514
6587
}
6515
6588
0 commit comments