File: | build/gcc/cfgcleanup.cc |
Warning: | line 1439, column 24 Although the value stored to 'afterlast2' is used in the enclosing expression, the value is never actually read from 'afterlast2' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* Control flow optimization code for GNU compiler. |
2 | Copyright (C) 1987-2023 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free |
8 | Software Foundation; either version 3, or (at your option) any later |
9 | version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | /* This file contains optimizer of the control flow. The main entry point is |
21 | cleanup_cfg. Following optimizations are performed: |
22 | |
23 | - Unreachable blocks removal |
24 | - Edge forwarding (edge to the forwarder block is forwarded to its |
25 | successor. Simplification of the branch instruction is performed by |
26 | underlying infrastructure so branch can be converted to simplejump or |
27 | eliminated). |
28 | - Cross jumping (tail merging) |
29 | - Conditional jump-around-simplejump simplification |
30 | - Basic block merging. */ |
31 | |
32 | #include "config.h" |
33 | #include "system.h" |
34 | #include "coretypes.h" |
35 | #include "backend.h" |
36 | #include "target.h" |
37 | #include "rtl.h" |
38 | #include "tree.h" |
39 | #include "cfghooks.h" |
40 | #include "df.h" |
41 | #include "memmodel.h" |
42 | #include "tm_p.h" |
43 | #include "insn-config.h" |
44 | #include "emit-rtl.h" |
45 | #include "cselib.h" |
46 | #include "tree-pass.h" |
47 | #include "cfgloop.h" |
48 | #include "cfgrtl.h" |
49 | #include "cfganal.h" |
50 | #include "cfgbuild.h" |
51 | #include "cfgcleanup.h" |
52 | #include "dce.h" |
53 | #include "dbgcnt.h" |
54 | #include "rtl-iter.h" |
55 | #include "regs.h" |
56 | #include "function-abi.h" |
57 | |
58 | #define FORWARDER_BLOCK_P(BB)((BB)->flags & BB_FORWARDER_BLOCK) ((BB)->flags & BB_FORWARDER_BLOCK) |
59 | |
60 | /* Set to true when we are running first pass of try_optimize_cfg loop. */ |
61 | static bool first_pass; |
62 | |
63 | /* Set to true if crossjumps occurred in the latest run of try_optimize_cfg. */ |
64 | static bool crossjumps_occurred; |
65 | |
66 | /* Set to true if we couldn't run an optimization due to stale liveness |
67 | information; we should run df_analyze to enable more opportunities. */ |
68 | static bool block_was_dirty; |
69 | |
70 | static bool try_crossjump_to_edge (int, edge, edge, enum replace_direction); |
71 | static bool try_crossjump_bb (int, basic_block); |
72 | static bool outgoing_edges_match (int, basic_block, basic_block); |
73 | static enum replace_direction old_insns_match_p (int, rtx_insn *, rtx_insn *); |
74 | |
75 | static void merge_blocks_move_predecessor_nojumps (basic_block, basic_block); |
76 | static void merge_blocks_move_successor_nojumps (basic_block, basic_block); |
77 | static bool try_optimize_cfg (int); |
78 | static bool try_simplify_condjump (basic_block); |
79 | static bool try_forward_edges (int, basic_block); |
80 | static edge thread_jump (edge, basic_block); |
81 | static bool mark_effect (rtx, bitmap); |
82 | static void notice_new_block (basic_block); |
83 | static void update_forwarder_flag (basic_block); |
84 | static void merge_memattrs (rtx, rtx); |
85 | |
86 | /* Set flags for newly created block. */ |
87 | |
88 | static void |
89 | notice_new_block (basic_block bb) |
90 | { |
91 | if (!bb) |
92 | return; |
93 | |
94 | if (forwarder_block_p (bb)) |
95 | bb->flags |= BB_FORWARDER_BLOCK; |
96 | } |
97 | |
98 | /* Recompute forwarder flag after block has been modified. */ |
99 | |
100 | static void |
101 | update_forwarder_flag (basic_block bb) |
102 | { |
103 | if (forwarder_block_p (bb)) |
104 | bb->flags |= BB_FORWARDER_BLOCK; |
105 | else |
106 | bb->flags &= ~BB_FORWARDER_BLOCK; |
107 | } |
108 | |
109 | /* Simplify a conditional jump around an unconditional jump. |
110 | Return true if something changed. */ |
111 | |
112 | static bool |
113 | try_simplify_condjump (basic_block cbranch_block) |
114 | { |
115 | basic_block jump_block, jump_dest_block, cbranch_dest_block; |
116 | edge cbranch_jump_edge, cbranch_fallthru_edge; |
117 | rtx_insn *cbranch_insn; |
118 | |
119 | /* Verify that there are exactly two successors. */ |
120 | if (EDGE_COUNT (cbranch_block->succs)vec_safe_length (cbranch_block->succs) != 2) |
121 | return false; |
122 | |
123 | /* Verify that we've got a normal conditional branch at the end |
124 | of the block. */ |
125 | cbranch_insn = BB_END (cbranch_block)(cbranch_block)->il.x.rtl->end_; |
126 | if (!any_condjump_p (cbranch_insn)) |
127 | return false; |
128 | |
129 | cbranch_fallthru_edge = FALLTHRU_EDGE (cbranch_block)((*((cbranch_block))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*((cbranch_block))->succs)[(0)] : (*((cbranch_block))-> succs)[(1)]); |
130 | cbranch_jump_edge = BRANCH_EDGE (cbranch_block)((*((cbranch_block))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*((cbranch_block))->succs)[(1)] : (*((cbranch_block))-> succs)[(0)]); |
131 | |
132 | /* The next block must not have multiple predecessors, must not |
133 | be the last block in the function, and must contain just the |
134 | unconditional jump. */ |
135 | jump_block = cbranch_fallthru_edge->dest; |
136 | if (!single_pred_p (jump_block) |
137 | || jump_block->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
138 | || !FORWARDER_BLOCK_P (jump_block)((jump_block)->flags & BB_FORWARDER_BLOCK)) |
139 | return false; |
140 | jump_dest_block = single_succ (jump_block); |
141 | |
142 | /* If we are partitioning hot/cold basic blocks, we don't want to |
143 | mess up unconditional or indirect jumps that cross between hot |
144 | and cold sections. |
145 | |
146 | Basic block partitioning may result in some jumps that appear to |
147 | be optimizable (or blocks that appear to be mergeable), but which really |
148 | must be left untouched (they are required to make it safely across |
149 | partition boundaries). See the comments at the top of |
150 | bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */ |
151 | |
152 | if (BB_PARTITION (jump_block)((jump_block)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION )) != BB_PARTITION (jump_dest_block)((jump_dest_block)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION )) |
153 | || (cbranch_jump_edge->flags & EDGE_CROSSING)) |
154 | return false; |
155 | |
156 | /* The conditional branch must target the block after the |
157 | unconditional branch. */ |
158 | cbranch_dest_block = cbranch_jump_edge->dest; |
159 | |
160 | if (cbranch_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
161 | || jump_dest_block == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
162 | || !can_fallthru (jump_block, cbranch_dest_block)) |
163 | return false; |
164 | |
165 | /* Invert the conditional branch. */ |
166 | if (!invert_jump (as_a <rtx_jump_insn *> (cbranch_insn), |
167 | block_label (jump_dest_block), 0)) |
168 | return false; |
169 | |
170 | if (dump_file) |
171 | fprintf (dump_file, "Simplifying condjump %i around jump %i\n", |
172 | INSN_UID (cbranch_insn), INSN_UID (BB_END (jump_block)(jump_block)->il.x.rtl->end_)); |
173 | |
174 | /* Success. Update the CFG to match. Note that after this point |
175 | the edge variable names appear backwards; the redirection is done |
176 | this way to preserve edge profile data. */ |
177 | cbranch_jump_edge = redirect_edge_succ_nodup (cbranch_jump_edge, |
178 | cbranch_dest_block); |
179 | cbranch_fallthru_edge = redirect_edge_succ_nodup (cbranch_fallthru_edge, |
180 | jump_dest_block); |
181 | cbranch_jump_edge->flags |= EDGE_FALLTHRU; |
182 | cbranch_fallthru_edge->flags &= ~EDGE_FALLTHRU; |
183 | update_br_prob_note (cbranch_block); |
184 | |
185 | /* Delete the block with the unconditional jump, and clean up the mess. */ |
186 | delete_basic_block (jump_block); |
187 | tidy_fallthru_edge (cbranch_jump_edge); |
188 | update_forwarder_flag (cbranch_block); |
189 | |
190 | return true; |
191 | } |
192 | |
193 | /* Attempt to prove that operation is NOOP using CSElib or mark the effect |
194 | on register. Used by jump threading. */ |
195 | |
196 | static bool |
197 | mark_effect (rtx exp, regset nonequal) |
198 | { |
199 | rtx dest; |
200 | switch (GET_CODE (exp)((enum rtx_code) (exp)->code)) |
201 | { |
202 | /* In case we do clobber the register, mark it as equal, as we know the |
203 | value is dead so it don't have to match. */ |
204 | case CLOBBER: |
205 | dest = XEXP (exp, 0)(((exp)->u.fld[0]).rt_rtx); |
206 | if (REG_P (dest)(((enum rtx_code) (dest)->code) == REG)) |
207 | bitmap_clear_range (nonequal, REGNO (dest)(rhs_regno(dest)), REG_NREGS (dest)((&(dest)->u.reg)->nregs)); |
208 | return false; |
209 | |
210 | case SET: |
211 | if (cselib_redundant_set_p (exp)) |
212 | return false; |
213 | dest = SET_DEST (exp)(((exp)->u.fld[0]).rt_rtx); |
214 | if (dest == pc_rtx) |
215 | return false; |
216 | if (!REG_P (dest)(((enum rtx_code) (dest)->code) == REG)) |
217 | return true; |
218 | bitmap_set_range (nonequal, REGNO (dest)(rhs_regno(dest)), REG_NREGS (dest)((&(dest)->u.reg)->nregs)); |
219 | return false; |
220 | |
221 | default: |
222 | return false; |
223 | } |
224 | } |
225 | |
226 | /* Return true if X contains a register in NONEQUAL. */ |
227 | static bool |
228 | mentions_nonequal_regs (const_rtx x, regset nonequal) |
229 | { |
230 | subrtx_iterator::array_type array; |
231 | FOR_EACH_SUBRTX (iter, array, x, NONCONST)for (subrtx_iterator iter (array, x, rtx_nonconst_subrtx_bounds ); !iter.at_end (); iter.next ()) |
232 | { |
233 | const_rtx x = *iter; |
234 | if (REG_P (x)(((enum rtx_code) (x)->code) == REG)) |
235 | { |
236 | unsigned int end_regno = END_REGNO (x); |
237 | for (unsigned int regno = REGNO (x)(rhs_regno(x)); regno < end_regno; ++regno) |
238 | if (REGNO_REG_SET_P (nonequal, regno)bitmap_bit_p (nonequal, regno)) |
239 | return true; |
240 | } |
241 | } |
242 | return false; |
243 | } |
244 | |
245 | /* Attempt to prove that the basic block B will have no side effects and |
246 | always continues in the same edge if reached via E. Return the edge |
247 | if exist, NULL otherwise. */ |
248 | |
249 | static edge |
250 | thread_jump (edge e, basic_block b) |
251 | { |
252 | rtx set1, set2, cond1, cond2; |
253 | rtx_insn *insn; |
254 | enum rtx_code code1, code2, reversed_code2; |
255 | bool reverse1 = false; |
256 | unsigned i; |
257 | regset nonequal; |
258 | bool failed = false; |
259 | |
260 | /* Jump threading may cause fixup_partitions to introduce new crossing edges, |
261 | which is not allowed after reload. */ |
262 | gcc_checking_assert (!reload_completed || !crtl->has_bb_partition)((void)(!(!reload_completed || !(&x_rtl)->has_bb_partition ) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 262, __FUNCTION__), 0 : 0)); |
263 | |
264 | if (b->flags & BB_NONTHREADABLE_BLOCK) |
265 | return NULLnullptr; |
266 | |
267 | /* At the moment, we do handle only conditional jumps, but later we may |
268 | want to extend this code to tablejumps and others. */ |
269 | if (EDGE_COUNT (e->src->succs)vec_safe_length (e->src->succs) != 2) |
270 | return NULLnullptr; |
271 | if (EDGE_COUNT (b->succs)vec_safe_length (b->succs) != 2) |
272 | { |
273 | b->flags |= BB_NONTHREADABLE_BLOCK; |
274 | return NULLnullptr; |
275 | } |
276 | |
277 | /* Second branch must end with onlyjump, as we will eliminate the jump. */ |
278 | if (!any_condjump_p (BB_END (e->src)(e->src)->il.x.rtl->end_)) |
279 | return NULLnullptr; |
280 | |
281 | if (!any_condjump_p (BB_END (b)(b)->il.x.rtl->end_) || !onlyjump_p (BB_END (b)(b)->il.x.rtl->end_)) |
282 | { |
283 | b->flags |= BB_NONTHREADABLE_BLOCK; |
284 | return NULLnullptr; |
285 | } |
286 | |
287 | set1 = pc_set (BB_END (e->src)(e->src)->il.x.rtl->end_); |
288 | set2 = pc_set (BB_END (b)(b)->il.x.rtl->end_); |
289 | if (((e->flags & EDGE_FALLTHRU) != 0) |
290 | != (XEXP (SET_SRC (set1), 1)((((((set1)->u.fld[1]).rt_rtx))->u.fld[1]).rt_rtx) == pc_rtx)) |
291 | reverse1 = true; |
292 | |
293 | cond1 = XEXP (SET_SRC (set1), 0)((((((set1)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx); |
294 | cond2 = XEXP (SET_SRC (set2), 0)((((((set2)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx); |
295 | if (reverse1) |
296 | code1 = reversed_comparison_code (cond1, BB_END (e->src)(e->src)->il.x.rtl->end_); |
297 | else |
298 | code1 = GET_CODE (cond1)((enum rtx_code) (cond1)->code); |
299 | |
300 | code2 = GET_CODE (cond2)((enum rtx_code) (cond2)->code); |
301 | reversed_code2 = reversed_comparison_code (cond2, BB_END (b)(b)->il.x.rtl->end_); |
302 | |
303 | if (!comparison_dominates_p (code1, code2) |
304 | && !comparison_dominates_p (code1, reversed_code2)) |
305 | return NULLnullptr; |
306 | |
307 | /* Ensure that the comparison operators are equivalent. |
308 | ??? This is far too pessimistic. We should allow swapped operands, |
309 | different CCmodes, or for example comparisons for interval, that |
310 | dominate even when operands are not equivalent. */ |
311 | if (!rtx_equal_p (XEXP (cond1, 0)(((cond1)->u.fld[0]).rt_rtx), XEXP (cond2, 0)(((cond2)->u.fld[0]).rt_rtx)) |
312 | || !rtx_equal_p (XEXP (cond1, 1)(((cond1)->u.fld[1]).rt_rtx), XEXP (cond2, 1)(((cond2)->u.fld[1]).rt_rtx))) |
313 | return NULLnullptr; |
314 | |
315 | /* Punt if BB_END (e->src) is doloop-like conditional jump that modifies |
316 | the registers used in cond1. */ |
317 | if (modified_in_p (cond1, BB_END (e->src)(e->src)->il.x.rtl->end_)) |
318 | return NULLnullptr; |
319 | |
320 | /* Short circuit cases where block B contains some side effects, as we can't |
321 | safely bypass it. */ |
322 | for (insn = NEXT_INSN (BB_HEAD (b)(b)->il.x.head_); insn != NEXT_INSN (BB_END (b)(b)->il.x.rtl->end_); |
323 | insn = NEXT_INSN (insn)) |
324 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN)) && side_effects_p (PATTERN (insn))) |
325 | { |
326 | b->flags |= BB_NONTHREADABLE_BLOCK; |
327 | return NULLnullptr; |
328 | } |
329 | |
330 | cselib_init (0); |
331 | |
332 | /* First process all values computed in the source basic block. */ |
333 | for (insn = NEXT_INSN (BB_HEAD (e->src)(e->src)->il.x.head_); |
334 | insn != NEXT_INSN (BB_END (e->src)(e->src)->il.x.rtl->end_); |
335 | insn = NEXT_INSN (insn)) |
336 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
337 | cselib_process_insn (insn); |
338 | |
339 | nonequal = BITMAP_ALLOCbitmap_alloc (NULLnullptr); |
340 | CLEAR_REG_SET (nonequal)bitmap_clear (nonequal); |
341 | |
342 | /* Now assume that we've continued by the edge E to B and continue |
343 | processing as if it were same basic block. |
344 | Our goal is to prove that whole block is an NOOP. */ |
345 | |
346 | for (insn = NEXT_INSN (BB_HEAD (b)(b)->il.x.head_); |
347 | insn != NEXT_INSN (BB_END (b)(b)->il.x.rtl->end_) && !failed; |
348 | insn = NEXT_INSN (insn)) |
349 | { |
350 | /* cond2 must not mention any register that is not equal to the |
351 | former block. Check this before processing that instruction, |
352 | as BB_END (b) could contain also clobbers. */ |
353 | if (insn == BB_END (b)(b)->il.x.rtl->end_ |
354 | && mentions_nonequal_regs (cond2, nonequal)) |
355 | goto failed_exit; |
356 | |
357 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
358 | { |
359 | rtx pat = PATTERN (insn); |
360 | |
361 | if (GET_CODE (pat)((enum rtx_code) (pat)->code) == PARALLEL) |
362 | { |
363 | for (i = 0; i < (unsigned)XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem); i++) |
364 | failed |= mark_effect (XVECEXP (pat, 0, i)(((((pat)->u.fld[0]).rt_rtvec))->elem[i]), nonequal); |
365 | } |
366 | else |
367 | failed |= mark_effect (pat, nonequal); |
368 | } |
369 | |
370 | cselib_process_insn (insn); |
371 | } |
372 | |
373 | /* Later we should clear nonequal of dead registers. So far we don't |
374 | have life information in cfg_cleanup. */ |
375 | if (failed) |
376 | { |
377 | b->flags |= BB_NONTHREADABLE_BLOCK; |
378 | goto failed_exit; |
379 | } |
380 | |
381 | if (!REG_SET_EMPTY_P (nonequal)bitmap_empty_p (nonequal)) |
382 | goto failed_exit; |
383 | |
384 | BITMAP_FREE (nonequal)((void) (bitmap_obstack_free ((bitmap) nonequal), (nonequal) = (bitmap) nullptr)); |
385 | cselib_finish (); |
386 | if ((comparison_dominates_p (code1, code2) != 0) |
387 | != (XEXP (SET_SRC (set2), 1)((((((set2)->u.fld[1]).rt_rtx))->u.fld[1]).rt_rtx) == pc_rtx)) |
388 | return BRANCH_EDGE (b)((*((b))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*(( b))->succs)[(1)] : (*((b))->succs)[(0)]); |
389 | else |
390 | return FALLTHRU_EDGE (b)((*((b))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*(( b))->succs)[(0)] : (*((b))->succs)[(1)]); |
391 | |
392 | failed_exit: |
393 | BITMAP_FREE (nonequal)((void) (bitmap_obstack_free ((bitmap) nonequal), (nonequal) = (bitmap) nullptr)); |
394 | cselib_finish (); |
395 | return NULLnullptr; |
396 | } |
397 | |
398 | /* Attempt to forward edges leaving basic block B. |
399 | Return true if successful. */ |
400 | |
401 | static bool |
402 | try_forward_edges (int mode, basic_block b) |
403 | { |
404 | bool changed = false; |
405 | edge_iterator ei; |
406 | edge e, *threaded_edges = NULLnullptr; |
407 | |
408 | for (ei = ei_start (b->succs)ei_start_1 (&(b->succs)); (e = ei_safe_edge (ei)); ) |
409 | { |
410 | basic_block target, first; |
411 | location_t goto_locus; |
412 | int counter; |
413 | bool threaded = false; |
414 | int nthreaded_edges = 0; |
415 | bool may_thread = first_pass || (b->flags & BB_MODIFIED) != 0; |
416 | bool new_target_threaded = false; |
417 | |
418 | /* Skip complex edges because we don't know how to update them. |
419 | |
420 | Still handle fallthru edges, as we can succeed to forward fallthru |
421 | edge to the same place as the branch edge of conditional branch |
422 | and turn conditional branch to an unconditional branch. */ |
423 | if (e->flags & EDGE_COMPLEX(EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE )) |
424 | { |
425 | ei_next (&ei); |
426 | continue; |
427 | } |
428 | |
429 | target = first = e->dest; |
430 | counter = NUM_FIXED_BLOCKS(2); |
431 | goto_locus = e->goto_locus; |
432 | |
433 | while (counter < n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks)) |
434 | { |
435 | basic_block new_target = NULLnullptr; |
436 | may_thread |= (target->flags & BB_MODIFIED) != 0; |
437 | |
438 | if (FORWARDER_BLOCK_P (target)((target)->flags & BB_FORWARDER_BLOCK) |
439 | && single_succ (target) != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
440 | { |
441 | /* Bypass trivial infinite loops. */ |
442 | new_target = single_succ (target); |
443 | if (target == new_target) |
444 | counter = n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks); |
445 | else if (!optimizeglobal_options.x_optimize) |
446 | { |
447 | /* When not optimizing, ensure that edges or forwarder |
448 | blocks with different locus are not optimized out. */ |
449 | location_t new_locus = single_succ_edge (target)->goto_locus; |
450 | location_t locus = goto_locus; |
451 | |
452 | if (LOCATION_LOCUS (new_locus)((IS_ADHOC_LOC (new_locus)) ? get_location_from_adhoc_loc (line_table , new_locus) : (new_locus)) != UNKNOWN_LOCATION((location_t) 0) |
453 | && LOCATION_LOCUS (locus)((IS_ADHOC_LOC (locus)) ? get_location_from_adhoc_loc (line_table , locus) : (locus)) != UNKNOWN_LOCATION((location_t) 0) |
454 | && new_locus != locus) |
455 | new_target = NULLnullptr; |
456 | else |
457 | { |
458 | if (LOCATION_LOCUS (new_locus)((IS_ADHOC_LOC (new_locus)) ? get_location_from_adhoc_loc (line_table , new_locus) : (new_locus)) != UNKNOWN_LOCATION((location_t) 0)) |
459 | locus = new_locus; |
460 | |
461 | rtx_insn *last = BB_END (target)(target)->il.x.rtl->end_; |
462 | if (DEBUG_INSN_P (last)(((enum rtx_code) (last)->code) == DEBUG_INSN)) |
463 | last = prev_nondebug_insn (last); |
464 | if (last && INSN_P (last)(((((enum rtx_code) (last)->code) == INSN) || (((enum rtx_code ) (last)->code) == JUMP_INSN) || (((enum rtx_code) (last)-> code) == CALL_INSN)) || (((enum rtx_code) (last)->code) == DEBUG_INSN))) |
465 | new_locus = INSN_LOCATION (last); |
466 | else |
467 | new_locus = UNKNOWN_LOCATION((location_t) 0); |
468 | |
469 | if (LOCATION_LOCUS (new_locus)((IS_ADHOC_LOC (new_locus)) ? get_location_from_adhoc_loc (line_table , new_locus) : (new_locus)) != UNKNOWN_LOCATION((location_t) 0) |
470 | && LOCATION_LOCUS (locus)((IS_ADHOC_LOC (locus)) ? get_location_from_adhoc_loc (line_table , locus) : (locus)) != UNKNOWN_LOCATION((location_t) 0) |
471 | && new_locus != locus) |
472 | new_target = NULLnullptr; |
473 | else |
474 | { |
475 | if (LOCATION_LOCUS (new_locus)((IS_ADHOC_LOC (new_locus)) ? get_location_from_adhoc_loc (line_table , new_locus) : (new_locus)) != UNKNOWN_LOCATION((location_t) 0)) |
476 | locus = new_locus; |
477 | |
478 | goto_locus = locus; |
479 | } |
480 | } |
481 | } |
482 | } |
483 | |
484 | /* Allow to thread only over one edge at time to simplify updating |
485 | of probabilities. */ |
486 | else if ((mode & CLEANUP_THREADING8) && may_thread) |
487 | { |
488 | edge t = thread_jump (e, target); |
489 | if (t) |
490 | { |
491 | if (!threaded_edges) |
492 | threaded_edges = XNEWVEC (edge,((edge *) xmalloc (sizeof (edge) * ((((cfun + 0))->cfg-> x_n_basic_blocks)))) |
493 | n_basic_blocks_for_fn (cfun))((edge *) xmalloc (sizeof (edge) * ((((cfun + 0))->cfg-> x_n_basic_blocks)))); |
494 | else |
495 | { |
496 | int i; |
497 | |
498 | /* Detect an infinite loop across blocks not |
499 | including the start block. */ |
500 | for (i = 0; i < nthreaded_edges; ++i) |
501 | if (threaded_edges[i] == t) |
502 | break; |
503 | if (i < nthreaded_edges) |
504 | { |
505 | counter = n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks); |
506 | break; |
507 | } |
508 | } |
509 | |
510 | /* Detect an infinite loop across the start block. */ |
511 | if (t->dest == b) |
512 | break; |
513 | |
514 | gcc_assert (nthreaded_edges((void)(!(nthreaded_edges < ((((cfun + 0))->cfg->x_n_basic_blocks ) - (2))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 516, __FUNCTION__), 0 : 0)) |
515 | < (n_basic_blocks_for_fn (cfun)((void)(!(nthreaded_edges < ((((cfun + 0))->cfg->x_n_basic_blocks ) - (2))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 516, __FUNCTION__), 0 : 0)) |
516 | - NUM_FIXED_BLOCKS))((void)(!(nthreaded_edges < ((((cfun + 0))->cfg->x_n_basic_blocks ) - (2))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 516, __FUNCTION__), 0 : 0)); |
517 | threaded_edges[nthreaded_edges++] = t; |
518 | |
519 | new_target = t->dest; |
520 | new_target_threaded = true; |
521 | } |
522 | } |
523 | |
524 | if (!new_target) |
525 | break; |
526 | |
527 | counter++; |
528 | /* Do not turn non-crossing jump to crossing. Depending on target |
529 | it may require different instruction pattern. */ |
530 | if ((e->flags & EDGE_CROSSING) |
531 | || BB_PARTITION (first)((first)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION) ) == BB_PARTITION (new_target)((new_target)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION ))) |
532 | { |
533 | target = new_target; |
534 | threaded |= new_target_threaded; |
535 | } |
536 | } |
537 | |
538 | if (counter >= n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks)) |
539 | { |
540 | if (dump_file) |
541 | fprintf (dump_file, "Infinite loop in BB %i.\n", |
542 | target->index); |
543 | } |
544 | else if (target == first) |
545 | ; /* We didn't do anything. */ |
546 | else |
547 | { |
548 | /* Save the values now, as the edge may get removed. */ |
549 | profile_count edge_count = e->count (); |
550 | int n = 0; |
551 | |
552 | e->goto_locus = goto_locus; |
553 | |
554 | /* Don't force if target is exit block. */ |
555 | if (threaded && target != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
556 | { |
557 | notice_new_block (redirect_edge_and_branch_force (e, target)); |
558 | if (dump_file) |
559 | fprintf (dump_file, "Conditionals threaded.\n"); |
560 | } |
561 | else if (!redirect_edge_and_branch (e, target)) |
562 | { |
563 | if (dump_file) |
564 | fprintf (dump_file, |
565 | "Forwarding edge %i->%i to %i failed.\n", |
566 | b->index, e->dest->index, target->index); |
567 | ei_next (&ei); |
568 | continue; |
569 | } |
570 | |
571 | /* We successfully forwarded the edge. Now update profile |
572 | data: for each edge we traversed in the chain, remove |
573 | the original edge's execution count. */ |
574 | do |
575 | { |
576 | edge t; |
577 | |
578 | if (!single_succ_p (first)) |
579 | { |
580 | gcc_assert (n < nthreaded_edges)((void)(!(n < nthreaded_edges) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 580, __FUNCTION__), 0 : 0)); |
581 | t = threaded_edges [n++]; |
582 | gcc_assert (t->src == first)((void)(!(t->src == first) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 582, __FUNCTION__), 0 : 0)); |
583 | update_bb_profile_for_threading (first, edge_count, t); |
584 | update_br_prob_note (first); |
585 | } |
586 | else |
587 | { |
588 | first->count -= edge_count; |
589 | /* It is possible that as the result of |
590 | threading we've removed edge as it is |
591 | threaded to the fallthru edge. Avoid |
592 | getting out of sync. */ |
593 | if (n < nthreaded_edges |
594 | && first == threaded_edges [n]->src) |
595 | n++; |
596 | t = single_succ_edge (first); |
597 | } |
598 | |
599 | first = t->dest; |
600 | } |
601 | while (first != target); |
602 | |
603 | changed = true; |
604 | continue; |
605 | } |
606 | ei_next (&ei); |
607 | } |
608 | |
609 | free (threaded_edges); |
610 | return changed; |
611 | } |
612 | |
613 | |
614 | /* Blocks A and B are to be merged into a single block. A has no incoming |
615 | fallthru edge, so it can be moved before B without adding or modifying |
616 | any jumps (aside from the jump from A to B). */ |
617 | |
618 | static void |
619 | merge_blocks_move_predecessor_nojumps (basic_block a, basic_block b) |
620 | { |
621 | rtx_insn *barrier; |
622 | |
623 | /* If we are partitioning hot/cold basic blocks, we don't want to |
624 | mess up unconditional or indirect jumps that cross between hot |
625 | and cold sections. |
626 | |
627 | Basic block partitioning may result in some jumps that appear to |
628 | be optimizable (or blocks that appear to be mergeable), but which really |
629 | must be left untouched (they are required to make it safely across |
630 | partition boundaries). See the comments at the top of |
631 | bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */ |
632 | |
633 | if (BB_PARTITION (a)((a)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) != BB_PARTITION (b)((b)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION))) |
634 | return; |
635 | |
636 | barrier = next_nonnote_insn (BB_END (a)(a)->il.x.rtl->end_); |
637 | gcc_assert (BARRIER_P (barrier))((void)(!((((enum rtx_code) (barrier)->code) == BARRIER)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 637, __FUNCTION__), 0 : 0)); |
638 | delete_insn (barrier); |
639 | |
640 | /* Scramble the insn chain. */ |
641 | if (BB_END (a)(a)->il.x.rtl->end_ != PREV_INSN (BB_HEAD (b)(b)->il.x.head_)) |
642 | reorder_insns_nobb (BB_HEAD (a)(a)->il.x.head_, BB_END (a)(a)->il.x.rtl->end_, PREV_INSN (BB_HEAD (b)(b)->il.x.head_)); |
643 | df_set_bb_dirty (a); |
644 | |
645 | if (dump_file) |
646 | fprintf (dump_file, "Moved block %d before %d and merged.\n", |
647 | a->index, b->index); |
648 | |
649 | /* Swap the records for the two blocks around. */ |
650 | |
651 | unlink_block (a); |
652 | link_block (a, b->prev_bb); |
653 | |
654 | /* Now blocks A and B are contiguous. Merge them. */ |
655 | merge_blocks (a, b); |
656 | } |
657 | |
658 | /* Blocks A and B are to be merged into a single block. B has no outgoing |
659 | fallthru edge, so it can be moved after A without adding or modifying |
660 | any jumps (aside from the jump from A to B). */ |
661 | |
662 | static void |
663 | merge_blocks_move_successor_nojumps (basic_block a, basic_block b) |
664 | { |
665 | rtx_insn *barrier, *real_b_end; |
666 | rtx_insn *label; |
667 | rtx_jump_table_data *table; |
668 | |
669 | /* If we are partitioning hot/cold basic blocks, we don't want to |
670 | mess up unconditional or indirect jumps that cross between hot |
671 | and cold sections. |
672 | |
673 | Basic block partitioning may result in some jumps that appear to |
674 | be optimizable (or blocks that appear to be mergeable), but which really |
675 | must be left untouched (they are required to make it safely across |
676 | partition boundaries). See the comments at the top of |
677 | bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */ |
678 | |
679 | if (BB_PARTITION (a)((a)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) != BB_PARTITION (b)((b)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION))) |
680 | return; |
681 | |
682 | real_b_end = BB_END (b)(b)->il.x.rtl->end_; |
683 | |
684 | /* If there is a jump table following block B temporarily add the jump table |
685 | to block B so that it will also be moved to the correct location. */ |
686 | if (tablejump_p (BB_END (b)(b)->il.x.rtl->end_, &label, &table) |
687 | && prev_active_insn (label) == BB_END (b)(b)->il.x.rtl->end_) |
688 | { |
689 | BB_END (b)(b)->il.x.rtl->end_ = table; |
690 | } |
691 | |
692 | /* There had better have been a barrier there. Delete it. */ |
693 | barrier = NEXT_INSN (BB_END (b)(b)->il.x.rtl->end_); |
694 | if (barrier && BARRIER_P (barrier)(((enum rtx_code) (barrier)->code) == BARRIER)) |
695 | delete_insn (barrier); |
696 | |
697 | |
698 | /* Scramble the insn chain. */ |
699 | reorder_insns_nobb (BB_HEAD (b)(b)->il.x.head_, BB_END (b)(b)->il.x.rtl->end_, BB_END (a)(a)->il.x.rtl->end_); |
700 | |
701 | /* Restore the real end of b. */ |
702 | BB_END (b)(b)->il.x.rtl->end_ = real_b_end; |
703 | |
704 | if (dump_file) |
705 | fprintf (dump_file, "Moved block %d after %d and merged.\n", |
706 | b->index, a->index); |
707 | |
708 | /* Now blocks A and B are contiguous. Merge them. */ |
709 | merge_blocks (a, b); |
710 | } |
711 | |
712 | /* Attempt to merge basic blocks that are potentially non-adjacent. |
713 | Return NULL iff the attempt failed, otherwise return basic block |
714 | where cleanup_cfg should continue. Because the merging commonly |
715 | moves basic block away or introduces another optimization |
716 | possibility, return basic block just before B so cleanup_cfg don't |
717 | need to iterate. |
718 | |
719 | It may be good idea to return basic block before C in the case |
720 | C has been moved after B and originally appeared earlier in the |
721 | insn sequence, but we have no information available about the |
722 | relative ordering of these two. Hopefully it is not too common. */ |
723 | |
724 | static basic_block |
725 | merge_blocks_move (edge e, basic_block b, basic_block c, int mode) |
726 | { |
727 | basic_block next; |
728 | |
729 | /* If we are partitioning hot/cold basic blocks, we don't want to |
730 | mess up unconditional or indirect jumps that cross between hot |
731 | and cold sections. |
732 | |
733 | Basic block partitioning may result in some jumps that appear to |
734 | be optimizable (or blocks that appear to be mergeable), but which really |
735 | must be left untouched (they are required to make it safely across |
736 | partition boundaries). See the comments at the top of |
737 | bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */ |
738 | |
739 | if (BB_PARTITION (b)((b)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) != BB_PARTITION (c)((c)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION))) |
740 | return NULLnullptr; |
741 | |
742 | /* If B has a fallthru edge to C, no need to move anything. */ |
743 | if (e->flags & EDGE_FALLTHRU) |
744 | { |
745 | int b_index = b->index, c_index = c->index; |
746 | |
747 | /* Protect the loop latches. */ |
748 | if (current_loops((cfun + 0)->x_current_loops) && c->loop_father->latch == c) |
749 | return NULLnullptr; |
750 | |
751 | merge_blocks (b, c); |
752 | update_forwarder_flag (b); |
753 | |
754 | if (dump_file) |
755 | fprintf (dump_file, "Merged %d and %d without moving.\n", |
756 | b_index, c_index); |
757 | |
758 | return b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) ? b : b->prev_bb; |
759 | } |
760 | |
761 | /* Otherwise we will need to move code around. Do that only if expensive |
762 | transformations are allowed. */ |
763 | else if (mode & CLEANUP_EXPENSIVE1) |
764 | { |
765 | edge tmp_edge, b_fallthru_edge; |
766 | bool c_has_outgoing_fallthru; |
767 | bool b_has_incoming_fallthru; |
768 | |
769 | /* Avoid overactive code motion, as the forwarder blocks should be |
770 | eliminated by edge redirection instead. One exception might have |
771 | been if B is a forwarder block and C has no fallthru edge, but |
772 | that should be cleaned up by bb-reorder instead. */ |
773 | if (FORWARDER_BLOCK_P (b)((b)->flags & BB_FORWARDER_BLOCK) || FORWARDER_BLOCK_P (c)((c)->flags & BB_FORWARDER_BLOCK)) |
774 | return NULLnullptr; |
775 | |
776 | /* We must make sure to not munge nesting of lexical blocks, |
777 | and loop notes. This is done by squeezing out all the notes |
778 | and leaving them there to lie. Not ideal, but functional. */ |
779 | |
780 | tmp_edge = find_fallthru_edge (c->succs); |
781 | c_has_outgoing_fallthru = (tmp_edge != NULLnullptr); |
782 | |
783 | tmp_edge = find_fallthru_edge (b->preds); |
784 | b_has_incoming_fallthru = (tmp_edge != NULLnullptr); |
785 | b_fallthru_edge = tmp_edge; |
786 | next = b->prev_bb; |
787 | if (next == c) |
788 | next = next->prev_bb; |
789 | |
790 | /* Otherwise, we're going to try to move C after B. If C does |
791 | not have an outgoing fallthru, then it can be moved |
792 | immediately after B without introducing or modifying jumps. */ |
793 | if (! c_has_outgoing_fallthru) |
794 | { |
795 | merge_blocks_move_successor_nojumps (b, c); |
796 | return next == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) ? next->next_bb : next; |
797 | } |
798 | |
799 | /* If B does not have an incoming fallthru, then it can be moved |
800 | immediately before C without introducing or modifying jumps. |
801 | C cannot be the first block, so we do not have to worry about |
802 | accessing a non-existent block. */ |
803 | |
804 | if (b_has_incoming_fallthru) |
805 | { |
806 | basic_block bb; |
807 | |
808 | if (b_fallthru_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)) |
809 | return NULLnullptr; |
810 | bb = force_nonfallthru (b_fallthru_edge); |
811 | if (bb) |
812 | notice_new_block (bb); |
813 | } |
814 | |
815 | merge_blocks_move_predecessor_nojumps (b, c); |
816 | return next == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) ? next->next_bb : next; |
817 | } |
818 | |
819 | return NULLnullptr; |
820 | } |
821 | |
822 | |
823 | /* Removes the memory attributes of MEM expression |
824 | if they are not equal. */ |
825 | |
826 | static void |
827 | merge_memattrs (rtx x, rtx y) |
828 | { |
829 | int i; |
830 | int j; |
831 | enum rtx_code code; |
832 | const char *fmt; |
833 | |
834 | if (x == y) |
835 | return; |
836 | if (x == 0 || y == 0) |
837 | return; |
838 | |
839 | code = GET_CODE (x)((enum rtx_code) (x)->code); |
840 | |
841 | if (code != GET_CODE (y)((enum rtx_code) (y)->code)) |
842 | return; |
843 | |
844 | if (GET_MODE (x)((machine_mode) (x)->mode) != GET_MODE (y)((machine_mode) (y)->mode)) |
845 | return; |
846 | |
847 | if (code == MEM && !mem_attrs_eq_p (MEM_ATTRS (x)(((x)->u.fld[1]).rt_mem), MEM_ATTRS (y)(((y)->u.fld[1]).rt_mem))) |
848 | { |
849 | if (! MEM_ATTRS (x)(((x)->u.fld[1]).rt_mem)) |
850 | MEM_ATTRS (y)(((y)->u.fld[1]).rt_mem) = 0; |
851 | else if (! MEM_ATTRS (y)(((y)->u.fld[1]).rt_mem)) |
852 | MEM_ATTRS (x)(((x)->u.fld[1]).rt_mem) = 0; |
853 | else |
854 | { |
855 | if (MEM_ALIAS_SET (x)(get_mem_attrs (x)->alias) != MEM_ALIAS_SET (y)(get_mem_attrs (y)->alias)) |
856 | { |
857 | set_mem_alias_set (x, 0); |
858 | set_mem_alias_set (y, 0); |
859 | } |
860 | |
861 | if (! mem_expr_equal_p (MEM_EXPR (x)(get_mem_attrs (x)->expr), MEM_EXPR (y)(get_mem_attrs (y)->expr))) |
862 | { |
863 | set_mem_expr (x, 0); |
864 | set_mem_expr (y, 0); |
865 | clear_mem_offset (x); |
866 | clear_mem_offset (y); |
867 | } |
868 | else if (MEM_OFFSET_KNOWN_P (x)(get_mem_attrs (x)->offset_known_p) != MEM_OFFSET_KNOWN_P (y)(get_mem_attrs (y)->offset_known_p) |
869 | || (MEM_OFFSET_KNOWN_P (x)(get_mem_attrs (x)->offset_known_p) |
870 | && maybe_ne (MEM_OFFSET (x)(get_mem_attrs (x)->offset), MEM_OFFSET (y)(get_mem_attrs (y)->offset)))) |
871 | { |
872 | clear_mem_offset (x); |
873 | clear_mem_offset (y); |
874 | } |
875 | |
876 | if (!MEM_SIZE_KNOWN_P (x)(get_mem_attrs (x)->size_known_p)) |
877 | clear_mem_size (y); |
878 | else if (!MEM_SIZE_KNOWN_P (y)(get_mem_attrs (y)->size_known_p)) |
879 | clear_mem_size (x); |
880 | else if (known_le (MEM_SIZE (x), MEM_SIZE (y))(!maybe_lt ((get_mem_attrs (y)->size), (get_mem_attrs (x)-> size)))) |
881 | set_mem_size (x, MEM_SIZE (y)(get_mem_attrs (y)->size)); |
882 | else if (known_le (MEM_SIZE (y), MEM_SIZE (x))(!maybe_lt ((get_mem_attrs (x)->size), (get_mem_attrs (y)-> size)))) |
883 | set_mem_size (y, MEM_SIZE (x)(get_mem_attrs (x)->size)); |
884 | else |
885 | { |
886 | /* The sizes aren't ordered, so we can't merge them. */ |
887 | clear_mem_size (x); |
888 | clear_mem_size (y); |
889 | } |
890 | |
891 | set_mem_align (x, MIN (MEM_ALIGN (x), MEM_ALIGN (y))(((get_mem_attrs (x)->align)) < ((get_mem_attrs (y)-> align)) ? ((get_mem_attrs (x)->align)) : ((get_mem_attrs ( y)->align)))); |
892 | set_mem_align (y, MEM_ALIGN (x)(get_mem_attrs (x)->align)); |
893 | } |
894 | } |
895 | if (code == MEM) |
896 | { |
897 | if (MEM_READONLY_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_READONLY_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 897, __FUNCTION__); _rtx; })->unchanging) != MEM_READONLY_P (y)(__extension__ ({ __typeof ((y)) const _rtx = ((y)); if (((enum rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_READONLY_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 897, __FUNCTION__); _rtx; })->unchanging)) |
898 | { |
899 | MEM_READONLY_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_READONLY_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 899, __FUNCTION__); _rtx; })->unchanging) = 0; |
900 | MEM_READONLY_P (y)(__extension__ ({ __typeof ((y)) const _rtx = ((y)); if (((enum rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_READONLY_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 900, __FUNCTION__); _rtx; })->unchanging) = 0; |
901 | } |
902 | if (MEM_NOTRAP_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_NOTRAP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 902, __FUNCTION__); _rtx; })->call) != MEM_NOTRAP_P (y)(__extension__ ({ __typeof ((y)) const _rtx = ((y)); if (((enum rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_NOTRAP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 902, __FUNCTION__); _rtx; })->call)) |
903 | { |
904 | MEM_NOTRAP_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_NOTRAP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 904, __FUNCTION__); _rtx; })->call) = 0; |
905 | MEM_NOTRAP_P (y)(__extension__ ({ __typeof ((y)) const _rtx = ((y)); if (((enum rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_NOTRAP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 905, __FUNCTION__); _rtx; })->call) = 0; |
906 | } |
907 | if (MEM_VOLATILE_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != MEM && ((enum rtx_code ) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code ) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 907, __FUNCTION__); _rtx; })->volatil) != MEM_VOLATILE_P (y)(__extension__ ({ __typeof ((y)) const _rtx = ((y)); if (((enum rtx_code) (_rtx)->code) != MEM && ((enum rtx_code ) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code ) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 907, __FUNCTION__); _rtx; })->volatil)) |
908 | { |
909 | MEM_VOLATILE_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != MEM && ((enum rtx_code ) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code ) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 909, __FUNCTION__); _rtx; })->volatil) = 1; |
910 | MEM_VOLATILE_P (y)(__extension__ ({ __typeof ((y)) const _rtx = ((y)); if (((enum rtx_code) (_rtx)->code) != MEM && ((enum rtx_code ) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code ) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 910, __FUNCTION__); _rtx; })->volatil) = 1; |
911 | } |
912 | } |
913 | |
914 | fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
915 | for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--) |
916 | { |
917 | switch (fmt[i]) |
918 | { |
919 | case 'E': |
920 | /* Two vectors must have the same length. */ |
921 | if (XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) != XVECLEN (y, i)(((((y)->u.fld[i]).rt_rtvec))->num_elem)) |
922 | return; |
923 | |
924 | for (j = 0; j < XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); j++) |
925 | merge_memattrs (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), XVECEXP (y, i, j)(((((y)->u.fld[i]).rt_rtvec))->elem[j])); |
926 | |
927 | break; |
928 | |
929 | case 'e': |
930 | merge_memattrs (XEXP (x, i)(((x)->u.fld[i]).rt_rtx), XEXP (y, i)(((y)->u.fld[i]).rt_rtx)); |
931 | } |
932 | } |
933 | return; |
934 | } |
935 | |
936 | |
937 | /* Checks if patterns P1 and P2 are equivalent, apart from the possibly |
938 | different single sets S1 and S2. */ |
939 | |
940 | static bool |
941 | equal_different_set_p (rtx p1, rtx s1, rtx p2, rtx s2) |
942 | { |
943 | int i; |
944 | rtx e1, e2; |
945 | |
946 | if (p1 == s1 && p2 == s2) |
947 | return true; |
948 | |
949 | if (GET_CODE (p1)((enum rtx_code) (p1)->code) != PARALLEL || GET_CODE (p2)((enum rtx_code) (p2)->code) != PARALLEL) |
950 | return false; |
951 | |
952 | if (XVECLEN (p1, 0)(((((p1)->u.fld[0]).rt_rtvec))->num_elem) != XVECLEN (p2, 0)(((((p2)->u.fld[0]).rt_rtvec))->num_elem)) |
953 | return false; |
954 | |
955 | for (i = 0; i < XVECLEN (p1, 0)(((((p1)->u.fld[0]).rt_rtvec))->num_elem); i++) |
956 | { |
957 | e1 = XVECEXP (p1, 0, i)(((((p1)->u.fld[0]).rt_rtvec))->elem[i]); |
958 | e2 = XVECEXP (p2, 0, i)(((((p2)->u.fld[0]).rt_rtvec))->elem[i]); |
959 | if (e1 == s1 && e2 == s2) |
960 | continue; |
961 | if (reload_completed |
962 | ? rtx_renumbered_equal_p (e1, e2) : rtx_equal_p (e1, e2)) |
963 | continue; |
964 | |
965 | return false; |
966 | } |
967 | |
968 | return true; |
969 | } |
970 | |
971 | |
972 | /* NOTE1 is the REG_EQUAL note, if any, attached to an insn |
973 | that is a single_set with a SET_SRC of SRC1. Similarly |
974 | for NOTE2/SRC2. |
975 | |
976 | So effectively NOTE1/NOTE2 are an alternate form of |
977 | SRC1/SRC2 respectively. |
978 | |
979 | Return nonzero if SRC1 or NOTE1 has the same constant |
980 | integer value as SRC2 or NOTE2. Else return zero. */ |
981 | static int |
982 | values_equal_p (rtx note1, rtx note2, rtx src1, rtx src2) |
983 | { |
984 | if (note1 |
985 | && note2 |
986 | && CONST_INT_P (XEXP (note1, 0))(((enum rtx_code) ((((note1)->u.fld[0]).rt_rtx))->code) == CONST_INT) |
987 | && rtx_equal_p (XEXP (note1, 0)(((note1)->u.fld[0]).rt_rtx), XEXP (note2, 0)(((note2)->u.fld[0]).rt_rtx))) |
988 | return 1; |
989 | |
990 | if (!note1 |
991 | && !note2 |
992 | && CONST_INT_P (src1)(((enum rtx_code) (src1)->code) == CONST_INT) |
993 | && CONST_INT_P (src2)(((enum rtx_code) (src2)->code) == CONST_INT) |
994 | && rtx_equal_p (src1, src2)) |
995 | return 1; |
996 | |
997 | if (note1 |
998 | && CONST_INT_P (src2)(((enum rtx_code) (src2)->code) == CONST_INT) |
999 | && rtx_equal_p (XEXP (note1, 0)(((note1)->u.fld[0]).rt_rtx), src2)) |
1000 | return 1; |
1001 | |
1002 | if (note2 |
1003 | && CONST_INT_P (src1)(((enum rtx_code) (src1)->code) == CONST_INT) |
1004 | && rtx_equal_p (XEXP (note2, 0)(((note2)->u.fld[0]).rt_rtx), src1)) |
1005 | return 1; |
1006 | |
1007 | return 0; |
1008 | } |
1009 | |
1010 | /* Examine register notes on I1 and I2 and return: |
1011 | - dir_forward if I1 can be replaced by I2, or |
1012 | - dir_backward if I2 can be replaced by I1, or |
1013 | - dir_both if both are the case. */ |
1014 | |
1015 | static enum replace_direction |
1016 | can_replace_by (rtx_insn *i1, rtx_insn *i2) |
1017 | { |
1018 | rtx s1, s2, d1, d2, src1, src2, note1, note2; |
1019 | bool c1, c2; |
1020 | |
1021 | /* Check for 2 sets. */ |
1022 | s1 = single_set (i1); |
1023 | s2 = single_set (i2); |
1024 | if (s1 == NULL_RTX(rtx) 0 || s2 == NULL_RTX(rtx) 0) |
1025 | return dir_none; |
1026 | |
1027 | /* Check that the 2 sets set the same dest. */ |
1028 | d1 = SET_DEST (s1)(((s1)->u.fld[0]).rt_rtx); |
1029 | d2 = SET_DEST (s2)(((s2)->u.fld[0]).rt_rtx); |
1030 | if (!(reload_completed |
1031 | ? rtx_renumbered_equal_p (d1, d2) : rtx_equal_p (d1, d2))) |
1032 | return dir_none; |
1033 | |
1034 | /* Find identical req_equiv or reg_equal note, which implies that the 2 sets |
1035 | set dest to the same value. */ |
1036 | note1 = find_reg_equal_equiv_note (i1); |
1037 | note2 = find_reg_equal_equiv_note (i2); |
1038 | |
1039 | src1 = SET_SRC (s1)(((s1)->u.fld[1]).rt_rtx); |
1040 | src2 = SET_SRC (s2)(((s2)->u.fld[1]).rt_rtx); |
1041 | |
1042 | if (!values_equal_p (note1, note2, src1, src2)) |
1043 | return dir_none; |
1044 | |
1045 | if (!equal_different_set_p (PATTERN (i1), s1, PATTERN (i2), s2)) |
1046 | return dir_none; |
1047 | |
1048 | /* Although the 2 sets set dest to the same value, we cannot replace |
1049 | (set (dest) (const_int)) |
1050 | by |
1051 | (set (dest) (reg)) |
1052 | because we don't know if the reg is live and has the same value at the |
1053 | location of replacement. */ |
1054 | c1 = CONST_INT_P (src1)(((enum rtx_code) (src1)->code) == CONST_INT); |
1055 | c2 = CONST_INT_P (src2)(((enum rtx_code) (src2)->code) == CONST_INT); |
1056 | if (c1 && c2) |
1057 | return dir_both; |
1058 | else if (c2) |
1059 | return dir_forward; |
1060 | else if (c1) |
1061 | return dir_backward; |
1062 | |
1063 | return dir_none; |
1064 | } |
1065 | |
1066 | /* Merges directions A and B. */ |
1067 | |
1068 | static enum replace_direction |
1069 | merge_dir (enum replace_direction a, enum replace_direction b) |
1070 | { |
1071 | /* Implements the following table: |
1072 | |bo fw bw no |
1073 | ---+----------- |
1074 | bo |bo fw bw no |
1075 | fw |-- fw no no |
1076 | bw |-- -- bw no |
1077 | no |-- -- -- no. */ |
1078 | |
1079 | if (a == b) |
1080 | return a; |
1081 | |
1082 | if (a == dir_both) |
1083 | return b; |
1084 | if (b == dir_both) |
1085 | return a; |
1086 | |
1087 | return dir_none; |
1088 | } |
1089 | |
1090 | /* Array of flags indexed by reg note kind, true if the given |
1091 | reg note is CFA related. */ |
1092 | static const bool reg_note_cfa_p[] = { |
1093 | #undef REG_CFA_NOTE |
1094 | #define DEF_REG_NOTE(NAME) false, |
1095 | #define REG_CFA_NOTE(NAME) true, |
1096 | #include "reg-notes.def" |
1097 | #undef REG_CFA_NOTE |
1098 | #undef DEF_REG_NOTE |
1099 | false |
1100 | }; |
1101 | |
1102 | /* Return true if I1 and I2 have identical CFA notes (the same order |
1103 | and equivalent content). */ |
1104 | |
1105 | static bool |
1106 | insns_have_identical_cfa_notes (rtx_insn *i1, rtx_insn *i2) |
1107 | { |
1108 | rtx n1, n2; |
1109 | for (n1 = REG_NOTES (i1)(((i1)->u.fld[6]).rt_rtx), n2 = REG_NOTES (i2)(((i2)->u.fld[6]).rt_rtx); ; |
1110 | n1 = XEXP (n1, 1)(((n1)->u.fld[1]).rt_rtx), n2 = XEXP (n2, 1)(((n2)->u.fld[1]).rt_rtx)) |
1111 | { |
1112 | /* Skip over reg notes not related to CFI information. */ |
1113 | while (n1 && !reg_note_cfa_p[REG_NOTE_KIND (n1)((enum reg_note) ((machine_mode) (n1)->mode))]) |
1114 | n1 = XEXP (n1, 1)(((n1)->u.fld[1]).rt_rtx); |
1115 | while (n2 && !reg_note_cfa_p[REG_NOTE_KIND (n2)((enum reg_note) ((machine_mode) (n2)->mode))]) |
1116 | n2 = XEXP (n2, 1)(((n2)->u.fld[1]).rt_rtx); |
1117 | if (n1 == NULL_RTX(rtx) 0 && n2 == NULL_RTX(rtx) 0) |
1118 | return true; |
1119 | if (n1 == NULL_RTX(rtx) 0 || n2 == NULL_RTX(rtx) 0) |
1120 | return false; |
1121 | if (XEXP (n1, 0)(((n1)->u.fld[0]).rt_rtx) == XEXP (n2, 0)(((n2)->u.fld[0]).rt_rtx)) |
1122 | ; |
1123 | else if (XEXP (n1, 0)(((n1)->u.fld[0]).rt_rtx) == NULL_RTX(rtx) 0 || XEXP (n2, 0)(((n2)->u.fld[0]).rt_rtx) == NULL_RTX(rtx) 0) |
1124 | return false; |
1125 | else if (!(reload_completed |
1126 | ? rtx_renumbered_equal_p (XEXP (n1, 0)(((n1)->u.fld[0]).rt_rtx), XEXP (n2, 0)(((n2)->u.fld[0]).rt_rtx)) |
1127 | : rtx_equal_p (XEXP (n1, 0)(((n1)->u.fld[0]).rt_rtx), XEXP (n2, 0)(((n2)->u.fld[0]).rt_rtx)))) |
1128 | return false; |
1129 | } |
1130 | } |
1131 | |
1132 | /* Examine I1 and I2 and return: |
1133 | - dir_forward if I1 can be replaced by I2, or |
1134 | - dir_backward if I2 can be replaced by I1, or |
1135 | - dir_both if both are the case. */ |
1136 | |
1137 | static enum replace_direction |
1138 | old_insns_match_p (int mode ATTRIBUTE_UNUSED__attribute__ ((__unused__)), rtx_insn *i1, rtx_insn *i2) |
1139 | { |
1140 | rtx p1, p2; |
1141 | |
1142 | /* Verify that I1 and I2 are equivalent. */ |
1143 | if (GET_CODE (i1)((enum rtx_code) (i1)->code) != GET_CODE (i2)((enum rtx_code) (i2)->code)) |
1144 | return dir_none; |
1145 | |
1146 | /* __builtin_unreachable() may lead to empty blocks (ending with |
1147 | NOTE_INSN_BASIC_BLOCK). They may be crossjumped. */ |
1148 | if (NOTE_INSN_BASIC_BLOCK_P (i1)((((enum rtx_code) (i1)->code) == NOTE) && (((i1)-> u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK) && NOTE_INSN_BASIC_BLOCK_P (i2)((((enum rtx_code) (i2)->code) == NOTE) && (((i2)-> u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) |
1149 | return dir_both; |
1150 | |
1151 | /* ??? Do not allow cross-jumping between different stack levels. */ |
1152 | p1 = find_reg_note (i1, REG_ARGS_SIZE, NULLnullptr); |
1153 | p2 = find_reg_note (i2, REG_ARGS_SIZE, NULLnullptr); |
1154 | if (p1 && p2) |
1155 | { |
1156 | p1 = XEXP (p1, 0)(((p1)->u.fld[0]).rt_rtx); |
1157 | p2 = XEXP (p2, 0)(((p2)->u.fld[0]).rt_rtx); |
1158 | if (!rtx_equal_p (p1, p2)) |
1159 | return dir_none; |
1160 | |
1161 | /* ??? Worse, this adjustment had better be constant lest we |
1162 | have differing incoming stack levels. */ |
1163 | if (!frame_pointer_needed((&x_rtl)->frame_pointer_needed) |
1164 | && known_eq (find_args_size_adjust (i1), HOST_WIDE_INT_MIN)(!maybe_ne (find_args_size_adjust (i1), (long) (1UL << ( 64 - 1))))) |
1165 | return dir_none; |
1166 | } |
1167 | else if (p1 || p2) |
1168 | return dir_none; |
1169 | |
1170 | /* Do not allow cross-jumping between frame related insns and other |
1171 | insns. */ |
1172 | if (RTX_FRAME_RELATED_P (i1)(__extension__ ({ __typeof ((i1)) const _rtx = ((i1)); if ((( enum rtx_code) (_rtx)->code) != DEBUG_INSN && ((enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != CALL_INSN && ((enum rtx_code) ( _rtx)->code) != JUMP_INSN && ((enum rtx_code) (_rtx )->code) != BARRIER && ((enum rtx_code) (_rtx)-> code) != SET) rtl_check_failed_flag ("RTX_FRAME_RELATED_P",_rtx , "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 1172, __FUNCTION__); _rtx; })->frame_related) != RTX_FRAME_RELATED_P (i2)(__extension__ ({ __typeof ((i2)) const _rtx = ((i2)); if ((( enum rtx_code) (_rtx)->code) != DEBUG_INSN && ((enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != CALL_INSN && ((enum rtx_code) ( _rtx)->code) != JUMP_INSN && ((enum rtx_code) (_rtx )->code) != BARRIER && ((enum rtx_code) (_rtx)-> code) != SET) rtl_check_failed_flag ("RTX_FRAME_RELATED_P",_rtx , "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 1172, __FUNCTION__); _rtx; })->frame_related)) |
1173 | return dir_none; |
1174 | |
1175 | p1 = PATTERN (i1); |
1176 | p2 = PATTERN (i2); |
1177 | |
1178 | if (GET_CODE (p1)((enum rtx_code) (p1)->code) != GET_CODE (p2)((enum rtx_code) (p2)->code)) |
1179 | return dir_none; |
1180 | |
1181 | /* If this is a CALL_INSN, compare register usage information. |
1182 | If we don't check this on stack register machines, the two |
1183 | CALL_INSNs might be merged leaving reg-stack.cc with mismatching |
1184 | numbers of stack registers in the same basic block. |
1185 | If we don't check this on machines with delay slots, a delay slot may |
1186 | be filled that clobbers a parameter expected by the subroutine. |
1187 | |
1188 | ??? We take the simple route for now and assume that if they're |
1189 | equal, they were constructed identically. |
1190 | |
1191 | Also check for identical exception regions. */ |
1192 | |
1193 | if (CALL_P (i1)(((enum rtx_code) (i1)->code) == CALL_INSN)) |
1194 | { |
1195 | /* Ensure the same EH region. */ |
1196 | rtx n1 = find_reg_note (i1, REG_EH_REGION, 0); |
1197 | rtx n2 = find_reg_note (i2, REG_EH_REGION, 0); |
1198 | |
1199 | if (!n1 && n2) |
1200 | return dir_none; |
1201 | |
1202 | if (n1 && (!n2 || XEXP (n1, 0)(((n1)->u.fld[0]).rt_rtx) != XEXP (n2, 0)(((n2)->u.fld[0]).rt_rtx))) |
1203 | return dir_none; |
1204 | |
1205 | if (!rtx_equal_p (CALL_INSN_FUNCTION_USAGE (i1)(((i1)->u.fld[7]).rt_rtx), |
1206 | CALL_INSN_FUNCTION_USAGE (i2)(((i2)->u.fld[7]).rt_rtx)) |
1207 | || SIBLING_CALL_P (i1)(__extension__ ({ __typeof ((i1)) const _rtx = ((i1)); if ((( enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SIBLING_CALL_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 1207, __FUNCTION__); _rtx; })->jump) != SIBLING_CALL_P (i2)(__extension__ ({ __typeof ((i2)) const _rtx = ((i2)); if ((( enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SIBLING_CALL_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 1207, __FUNCTION__); _rtx; })->jump)) |
1208 | return dir_none; |
1209 | |
1210 | /* For address sanitizer, never crossjump __asan_report_* builtins, |
1211 | otherwise errors might be reported on incorrect lines. */ |
1212 | if (flag_sanitizeglobal_options.x_flag_sanitize & SANITIZE_ADDRESS) |
1213 | { |
1214 | rtx call = get_call_rtx_from (i1); |
1215 | if (call && GET_CODE (XEXP (XEXP (call, 0), 0))((enum rtx_code) (((((((call)->u.fld[0]).rt_rtx))->u.fld [0]).rt_rtx))->code) == SYMBOL_REF) |
1216 | { |
1217 | rtx symbol = XEXP (XEXP (call, 0), 0)((((((call)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx); |
1218 | if (SYMBOL_REF_DECL (symbol)((__extension__ ({ __typeof ((symbol)) const _rtx = ((symbol) ); if (((enum rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag ("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 1218, __FUNCTION__); _rtx; })->unchanging) ? nullptr : ( (((symbol))->u.fld[1]).rt_tree)) |
1219 | && TREE_CODE (SYMBOL_REF_DECL (symbol))((enum tree_code) (((__extension__ ({ __typeof ((symbol)) const _rtx = ((symbol)); if (((enum rtx_code) (_rtx)->code) != SYMBOL_REF ) rtl_check_failed_flag ("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 1219, __FUNCTION__); _rtx; })->unchanging) ? nullptr : ( (((symbol))->u.fld[1]).rt_tree)))->base.code) == FUNCTION_DECL) |
1220 | { |
1221 | if ((DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))((built_in_class) (tree_check ((((__extension__ ({ __typeof ( (symbol)) const _rtx = ((symbol)); if (((enum rtx_code) (_rtx )->code) != SYMBOL_REF) rtl_check_failed_flag ("CONSTANT_POOL_ADDRESS_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 1221, __FUNCTION__); _rtx; })->unchanging) ? nullptr : ( (((symbol))->u.fld[1]).rt_tree))), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 1221, __FUNCTION__, (FUNCTION_DECL)))->function_decl.built_in_class ) |
1222 | == BUILT_IN_NORMAL) |
1223 | && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)((__extension__ ({ __typeof ((symbol)) const _rtx = ((symbol) ); if (((enum rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag ("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 1223, __FUNCTION__); _rtx; })->unchanging) ? nullptr : ( (((symbol))->u.fld[1]).rt_tree))) |
1224 | >= BUILT_IN_ASAN_REPORT_LOAD1 |
1225 | && DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)((__extension__ ({ __typeof ((symbol)) const _rtx = ((symbol) ); if (((enum rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag ("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 1225, __FUNCTION__); _rtx; })->unchanging) ? nullptr : ( (((symbol))->u.fld[1]).rt_tree))) |
1226 | <= BUILT_IN_ASAN_STOREN) |
1227 | return dir_none; |
1228 | } |
1229 | } |
1230 | } |
1231 | |
1232 | if (insn_callee_abi (i1) != insn_callee_abi (i2)) |
1233 | return dir_none; |
1234 | } |
1235 | |
1236 | /* If both i1 and i2 are frame related, verify all the CFA notes |
1237 | in the same order and with the same content. */ |
1238 | if (RTX_FRAME_RELATED_P (i1)(__extension__ ({ __typeof ((i1)) const _rtx = ((i1)); if ((( enum rtx_code) (_rtx)->code) != DEBUG_INSN && ((enum rtx_code) (_rtx)->code) != INSN && ((enum rtx_code ) (_rtx)->code) != CALL_INSN && ((enum rtx_code) ( _rtx)->code) != JUMP_INSN && ((enum rtx_code) (_rtx )->code) != BARRIER && ((enum rtx_code) (_rtx)-> code) != SET) rtl_check_failed_flag ("RTX_FRAME_RELATED_P",_rtx , "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 1238, __FUNCTION__); _rtx; })->frame_related) && !insns_have_identical_cfa_notes (i1, i2)) |
1239 | return dir_none; |
1240 | |
1241 | #ifdef STACK_REGS |
1242 | /* If cross_jump_death_matters is not 0, the insn's mode |
1243 | indicates whether or not the insn contains any stack-like |
1244 | regs. */ |
1245 | |
1246 | if ((mode & CLEANUP_POST_REGSTACK4) && stack_regs_mentioned (i1)) |
1247 | { |
1248 | /* If register stack conversion has already been done, then |
1249 | death notes must also be compared before it is certain that |
1250 | the two instruction streams match. */ |
1251 | |
1252 | rtx note; |
1253 | HARD_REG_SET i1_regset, i2_regset; |
1254 | |
1255 | CLEAR_HARD_REG_SET (i1_regset); |
1256 | CLEAR_HARD_REG_SET (i2_regset); |
1257 | |
1258 | for (note = REG_NOTES (i1)(((i1)->u.fld[6]).rt_rtx); note; note = XEXP (note, 1)(((note)->u.fld[1]).rt_rtx)) |
1259 | if (REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)) == REG_DEAD && STACK_REG_P (XEXP (note, 0))((((enum rtx_code) ((((note)->u.fld[0]).rt_rtx))->code) == REG) && ((unsigned long) (((rhs_regno((((note)-> u.fld[0]).rt_rtx))))) - (unsigned long) (8) <= (unsigned long ) (15) - (unsigned long) (8)))) |
1260 | SET_HARD_REG_BIT (i1_regset, REGNO (XEXP (note, 0))(rhs_regno((((note)->u.fld[0]).rt_rtx)))); |
1261 | |
1262 | for (note = REG_NOTES (i2)(((i2)->u.fld[6]).rt_rtx); note; note = XEXP (note, 1)(((note)->u.fld[1]).rt_rtx)) |
1263 | if (REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)) == REG_DEAD && STACK_REG_P (XEXP (note, 0))((((enum rtx_code) ((((note)->u.fld[0]).rt_rtx))->code) == REG) && ((unsigned long) (((rhs_regno((((note)-> u.fld[0]).rt_rtx))))) - (unsigned long) (8) <= (unsigned long ) (15) - (unsigned long) (8)))) |
1264 | SET_HARD_REG_BIT (i2_regset, REGNO (XEXP (note, 0))(rhs_regno((((note)->u.fld[0]).rt_rtx)))); |
1265 | |
1266 | if (i1_regset != i2_regset) |
1267 | return dir_none; |
1268 | } |
1269 | #endif |
1270 | |
1271 | if (reload_completed |
1272 | ? rtx_renumbered_equal_p (p1, p2) : rtx_equal_p (p1, p2)) |
1273 | return dir_both; |
1274 | |
1275 | return can_replace_by (i1, i2); |
1276 | } |
1277 | |
1278 | /* When comparing insns I1 and I2 in flow_find_cross_jump or |
1279 | flow_find_head_matching_sequence, ensure the notes match. */ |
1280 | |
1281 | static void |
1282 | merge_notes (rtx_insn *i1, rtx_insn *i2) |
1283 | { |
1284 | /* If the merged insns have different REG_EQUAL notes, then |
1285 | remove them. */ |
1286 | rtx equiv1 = find_reg_equal_equiv_note (i1); |
1287 | rtx equiv2 = find_reg_equal_equiv_note (i2); |
1288 | |
1289 | if (equiv1 && !equiv2) |
1290 | remove_note (i1, equiv1); |
1291 | else if (!equiv1 && equiv2) |
1292 | remove_note (i2, equiv2); |
1293 | else if (equiv1 && equiv2 |
1294 | && !rtx_equal_p (XEXP (equiv1, 0)(((equiv1)->u.fld[0]).rt_rtx), XEXP (equiv2, 0)(((equiv2)->u.fld[0]).rt_rtx))) |
1295 | { |
1296 | remove_note (i1, equiv1); |
1297 | remove_note (i2, equiv2); |
1298 | } |
1299 | } |
1300 | |
1301 | /* Walks from I1 in BB1 backward till the next non-debug insn, and returns the |
1302 | resulting insn in I1, and the corresponding bb in BB1. At the head of a |
1303 | bb, if there is a predecessor bb that reaches this bb via fallthru, and |
1304 | FOLLOW_FALLTHRU, walks further in the predecessor bb and registers this in |
1305 | DID_FALLTHRU. Otherwise, stops at the head of the bb. */ |
1306 | |
1307 | static void |
1308 | walk_to_nondebug_insn (rtx_insn **i1, basic_block *bb1, bool follow_fallthru, |
1309 | bool *did_fallthru) |
1310 | { |
1311 | edge fallthru; |
1312 | |
1313 | *did_fallthru = false; |
1314 | |
1315 | /* Ignore notes. */ |
1316 | while (!NONDEBUG_INSN_P (*i1)((((enum rtx_code) (*i1)->code) == INSN) || (((enum rtx_code ) (*i1)->code) == JUMP_INSN) || (((enum rtx_code) (*i1)-> code) == CALL_INSN))) |
1317 | { |
1318 | if (*i1 != BB_HEAD (*bb1)(*bb1)->il.x.head_) |
1319 | { |
1320 | *i1 = PREV_INSN (*i1); |
1321 | continue; |
1322 | } |
1323 | |
1324 | if (!follow_fallthru) |
1325 | return; |
1326 | |
1327 | fallthru = find_fallthru_edge ((*bb1)->preds); |
1328 | if (!fallthru || fallthru->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) |
1329 | || !single_succ_p (fallthru->src)) |
1330 | return; |
1331 | |
1332 | *bb1 = fallthru->src; |
1333 | *i1 = BB_END (*bb1)(*bb1)->il.x.rtl->end_; |
1334 | *did_fallthru = true; |
1335 | } |
1336 | } |
1337 | |
1338 | /* Look through the insns at the end of BB1 and BB2 and find the longest |
1339 | sequence that are either equivalent, or allow forward or backward |
1340 | replacement. Store the first insns for that sequence in *F1 and *F2 and |
1341 | return the sequence length. |
1342 | |
1343 | DIR_P indicates the allowed replacement direction on function entry, and |
1344 | the actual replacement direction on function exit. If NULL, only equivalent |
1345 | sequences are allowed. |
1346 | |
1347 | To simplify callers of this function, if the blocks match exactly, |
1348 | store the head of the blocks in *F1 and *F2. */ |
1349 | |
1350 | int |
1351 | flow_find_cross_jump (basic_block bb1, basic_block bb2, rtx_insn **f1, |
1352 | rtx_insn **f2, enum replace_direction *dir_p) |
1353 | { |
1354 | rtx_insn *i1, *i2, *last1, *last2, *afterlast1, *afterlast2; |
1355 | int ninsns = 0; |
1356 | enum replace_direction dir, last_dir, afterlast_dir; |
1357 | bool follow_fallthru, did_fallthru; |
1358 | |
1359 | if (dir_p) |
1360 | dir = *dir_p; |
1361 | else |
1362 | dir = dir_both; |
1363 | afterlast_dir = dir; |
1364 | last_dir = afterlast_dir; |
1365 | |
1366 | /* Skip simple jumps at the end of the blocks. Complex jumps still |
1367 | need to be compared for equivalence, which we'll do below. */ |
1368 | |
1369 | i1 = BB_END (bb1)(bb1)->il.x.rtl->end_; |
1370 | last1 = afterlast1 = last2 = afterlast2 = NULLnullptr; |
1371 | if (onlyjump_p (i1) |
1372 | || (returnjump_p (i1) && !side_effects_p (PATTERN (i1)))) |
1373 | { |
1374 | last1 = i1; |
1375 | i1 = PREV_INSN (i1); |
1376 | } |
1377 | |
1378 | i2 = BB_END (bb2)(bb2)->il.x.rtl->end_; |
1379 | if (onlyjump_p (i2) |
1380 | || (returnjump_p (i2) && !side_effects_p (PATTERN (i2)))) |
1381 | { |
1382 | last2 = i2; |
1383 | /* Count everything except for unconditional jump as insn. |
1384 | Don't count any jumps if dir_p is NULL. */ |
1385 | if (!simplejump_p (i2) && !returnjump_p (i2) && last1 && dir_p) |
1386 | ninsns++; |
1387 | i2 = PREV_INSN (i2); |
1388 | } |
1389 | |
1390 | while (true) |
1391 | { |
1392 | /* In the following example, we can replace all jumps to C by jumps to A. |
1393 | |
1394 | This removes 4 duplicate insns. |
1395 | [bb A] insn1 [bb C] insn1 |
1396 | insn2 insn2 |
1397 | [bb B] insn3 insn3 |
1398 | insn4 insn4 |
1399 | jump_insn jump_insn |
1400 | |
1401 | We could also replace all jumps to A by jumps to C, but that leaves B |
1402 | alive, and removes only 2 duplicate insns. In a subsequent crossjump |
1403 | step, all jumps to B would be replaced with jumps to the middle of C, |
1404 | achieving the same result with more effort. |
1405 | So we allow only the first possibility, which means that we don't allow |
1406 | fallthru in the block that's being replaced. */ |
1407 | |
1408 | follow_fallthru = dir_p && dir != dir_forward; |
1409 | walk_to_nondebug_insn (&i1, &bb1, follow_fallthru, &did_fallthru); |
1410 | if (did_fallthru) |
1411 | dir = dir_backward; |
1412 | |
1413 | follow_fallthru = dir_p && dir != dir_backward; |
1414 | walk_to_nondebug_insn (&i2, &bb2, follow_fallthru, &did_fallthru); |
1415 | if (did_fallthru) |
1416 | dir = dir_forward; |
1417 | |
1418 | if (i1 == BB_HEAD (bb1)(bb1)->il.x.head_ || i2 == BB_HEAD (bb2)(bb2)->il.x.head_) |
1419 | break; |
1420 | |
1421 | /* Do not turn corssing edge to non-crossing or vice versa after |
1422 | reload. */ |
1423 | if (BB_PARTITION (BLOCK_FOR_INSN (i1))((BLOCK_FOR_INSN (i1))->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION )) |
1424 | != BB_PARTITION (BLOCK_FOR_INSN (i2))((BLOCK_FOR_INSN (i2))->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION )) |
1425 | && reload_completed) |
1426 | break; |
1427 | |
1428 | dir = merge_dir (dir, old_insns_match_p (0, i1, i2)); |
1429 | if (dir == dir_none || (!dir_p && dir != dir_both)) |
1430 | break; |
1431 | |
1432 | merge_memattrs (i1, i2); |
1433 | |
1434 | /* Don't begin a cross-jump with a NOTE insn. */ |
1435 | if (INSN_P (i1)(((((enum rtx_code) (i1)->code) == INSN) || (((enum rtx_code ) (i1)->code) == JUMP_INSN) || (((enum rtx_code) (i1)-> code) == CALL_INSN)) || (((enum rtx_code) (i1)->code) == DEBUG_INSN ))) |
1436 | { |
1437 | merge_notes (i1, i2); |
1438 | |
1439 | afterlast1 = last1, afterlast2 = last2; |
Although the value stored to 'afterlast2' is used in the enclosing expression, the value is never actually read from 'afterlast2' | |
1440 | last1 = i1, last2 = i2; |
1441 | afterlast_dir = last_dir; |
1442 | last_dir = dir; |
1443 | if (active_insn_p (i1)) |
1444 | ninsns++; |
1445 | } |
1446 | |
1447 | i1 = PREV_INSN (i1); |
1448 | i2 = PREV_INSN (i2); |
1449 | } |
1450 | |
1451 | /* Include preceding notes and labels in the cross-jump. One, |
1452 | this may bring us to the head of the blocks as requested above. |
1453 | Two, it keeps line number notes as matched as may be. */ |
1454 | if (ninsns) |
1455 | { |
1456 | bb1 = BLOCK_FOR_INSN (last1); |
1457 | while (last1 != BB_HEAD (bb1)(bb1)->il.x.head_ && !NONDEBUG_INSN_P (PREV_INSN (last1))((((enum rtx_code) (PREV_INSN (last1))->code) == INSN) || ( ((enum rtx_code) (PREV_INSN (last1))->code) == JUMP_INSN) || (((enum rtx_code) (PREV_INSN (last1))->code) == CALL_INSN ))) |
1458 | last1 = PREV_INSN (last1); |
1459 | |
1460 | if (last1 != BB_HEAD (bb1)(bb1)->il.x.head_ && LABEL_P (PREV_INSN (last1))(((enum rtx_code) (PREV_INSN (last1))->code) == CODE_LABEL )) |
1461 | last1 = PREV_INSN (last1); |
1462 | |
1463 | bb2 = BLOCK_FOR_INSN (last2); |
1464 | while (last2 != BB_HEAD (bb2)(bb2)->il.x.head_ && !NONDEBUG_INSN_P (PREV_INSN (last2))((((enum rtx_code) (PREV_INSN (last2))->code) == INSN) || ( ((enum rtx_code) (PREV_INSN (last2))->code) == JUMP_INSN) || (((enum rtx_code) (PREV_INSN (last2))->code) == CALL_INSN ))) |
1465 | last2 = PREV_INSN (last2); |
1466 | |
1467 | if (last2 != BB_HEAD (bb2)(bb2)->il.x.head_ && LABEL_P (PREV_INSN (last2))(((enum rtx_code) (PREV_INSN (last2))->code) == CODE_LABEL )) |
1468 | last2 = PREV_INSN (last2); |
1469 | |
1470 | *f1 = last1; |
1471 | *f2 = last2; |
1472 | } |
1473 | |
1474 | if (dir_p) |
1475 | *dir_p = last_dir; |
1476 | return ninsns; |
1477 | } |
1478 | |
1479 | /* Like flow_find_cross_jump, except start looking for a matching sequence from |
1480 | the head of the two blocks. Do not include jumps at the end. |
1481 | If STOP_AFTER is nonzero, stop after finding that many matching |
1482 | instructions. If STOP_AFTER is zero, count all INSN_P insns, if it is |
1483 | non-zero, only count active insns. */ |
1484 | |
1485 | int |
1486 | flow_find_head_matching_sequence (basic_block bb1, basic_block bb2, rtx_insn **f1, |
1487 | rtx_insn **f2, int stop_after) |
1488 | { |
1489 | rtx_insn *i1, *i2, *last1, *last2, *beforelast1, *beforelast2; |
1490 | int ninsns = 0; |
1491 | edge e; |
1492 | edge_iterator ei; |
1493 | int nehedges1 = 0, nehedges2 = 0; |
1494 | |
1495 | FOR_EACH_EDGE (e, ei, bb1->succs)for ((ei) = ei_start_1 (&((bb1->succs))); ei_cond ((ei ), &(e)); ei_next (&(ei))) |
1496 | if (e->flags & EDGE_EH) |
1497 | nehedges1++; |
1498 | FOR_EACH_EDGE (e, ei, bb2->succs)for ((ei) = ei_start_1 (&((bb2->succs))); ei_cond ((ei ), &(e)); ei_next (&(ei))) |
1499 | if (e->flags & EDGE_EH) |
1500 | nehedges2++; |
1501 | |
1502 | i1 = BB_HEAD (bb1)(bb1)->il.x.head_; |
1503 | i2 = BB_HEAD (bb2)(bb2)->il.x.head_; |
1504 | last1 = beforelast1 = last2 = beforelast2 = NULLnullptr; |
1505 | |
1506 | while (true) |
1507 | { |
1508 | /* Ignore notes, except NOTE_INSN_EPILOGUE_BEG. */ |
1509 | while (!NONDEBUG_INSN_P (i1)((((enum rtx_code) (i1)->code) == INSN) || (((enum rtx_code ) (i1)->code) == JUMP_INSN) || (((enum rtx_code) (i1)-> code) == CALL_INSN)) && i1 != BB_END (bb1)(bb1)->il.x.rtl->end_) |
1510 | { |
1511 | if (NOTE_P (i1)(((enum rtx_code) (i1)->code) == NOTE) && NOTE_KIND (i1)(((i1)->u.fld[4]).rt_int) == NOTE_INSN_EPILOGUE_BEG) |
1512 | break; |
1513 | i1 = NEXT_INSN (i1); |
1514 | } |
1515 | |
1516 | while (!NONDEBUG_INSN_P (i2)((((enum rtx_code) (i2)->code) == INSN) || (((enum rtx_code ) (i2)->code) == JUMP_INSN) || (((enum rtx_code) (i2)-> code) == CALL_INSN)) && i2 != BB_END (bb2)(bb2)->il.x.rtl->end_) |
1517 | { |
1518 | if (NOTE_P (i2)(((enum rtx_code) (i2)->code) == NOTE) && NOTE_KIND (i2)(((i2)->u.fld[4]).rt_int) == NOTE_INSN_EPILOGUE_BEG) |
1519 | break; |
1520 | i2 = NEXT_INSN (i2); |
1521 | } |
1522 | |
1523 | if ((i1 == BB_END (bb1)(bb1)->il.x.rtl->end_ && !NONDEBUG_INSN_P (i1)((((enum rtx_code) (i1)->code) == INSN) || (((enum rtx_code ) (i1)->code) == JUMP_INSN) || (((enum rtx_code) (i1)-> code) == CALL_INSN))) |
1524 | || (i2 == BB_END (bb2)(bb2)->il.x.rtl->end_ && !NONDEBUG_INSN_P (i2)((((enum rtx_code) (i2)->code) == INSN) || (((enum rtx_code ) (i2)->code) == JUMP_INSN) || (((enum rtx_code) (i2)-> code) == CALL_INSN)))) |
1525 | break; |
1526 | |
1527 | if (NOTE_P (i1)(((enum rtx_code) (i1)->code) == NOTE) || NOTE_P (i2)(((enum rtx_code) (i2)->code) == NOTE) |
1528 | || JUMP_P (i1)(((enum rtx_code) (i1)->code) == JUMP_INSN) || JUMP_P (i2)(((enum rtx_code) (i2)->code) == JUMP_INSN)) |
1529 | break; |
1530 | |
1531 | /* A sanity check to make sure we're not merging insns with different |
1532 | effects on EH. If only one of them ends a basic block, it shouldn't |
1533 | have an EH edge; if both end a basic block, there should be the same |
1534 | number of EH edges. */ |
1535 | if ((i1 == BB_END (bb1)(bb1)->il.x.rtl->end_ && i2 != BB_END (bb2)(bb2)->il.x.rtl->end_ |
1536 | && nehedges1 > 0) |
1537 | || (i2 == BB_END (bb2)(bb2)->il.x.rtl->end_ && i1 != BB_END (bb1)(bb1)->il.x.rtl->end_ |
1538 | && nehedges2 > 0) |
1539 | || (i1 == BB_END (bb1)(bb1)->il.x.rtl->end_ && i2 == BB_END (bb2)(bb2)->il.x.rtl->end_ |
1540 | && nehedges1 != nehedges2)) |
1541 | break; |
1542 | |
1543 | if (old_insns_match_p (0, i1, i2) != dir_both) |
1544 | break; |
1545 | |
1546 | merge_memattrs (i1, i2); |
1547 | |
1548 | /* Don't begin a cross-jump with a NOTE insn. */ |
1549 | if (INSN_P (i1)(((((enum rtx_code) (i1)->code) == INSN) || (((enum rtx_code ) (i1)->code) == JUMP_INSN) || (((enum rtx_code) (i1)-> code) == CALL_INSN)) || (((enum rtx_code) (i1)->code) == DEBUG_INSN ))) |
1550 | { |
1551 | merge_notes (i1, i2); |
1552 | |
1553 | beforelast1 = last1, beforelast2 = last2; |
1554 | last1 = i1, last2 = i2; |
1555 | if (!stop_after || active_insn_p (i1)) |
1556 | ninsns++; |
1557 | } |
1558 | |
1559 | if (i1 == BB_END (bb1)(bb1)->il.x.rtl->end_ || i2 == BB_END (bb2)(bb2)->il.x.rtl->end_ |
1560 | || (stop_after > 0 && ninsns == stop_after)) |
1561 | break; |
1562 | |
1563 | i1 = NEXT_INSN (i1); |
1564 | i2 = NEXT_INSN (i2); |
1565 | } |
1566 | |
1567 | if (ninsns) |
1568 | { |
1569 | *f1 = last1; |
1570 | *f2 = last2; |
1571 | } |
1572 | |
1573 | return ninsns; |
1574 | } |
1575 | |
1576 | /* Return true iff outgoing edges of BB1 and BB2 match, together with |
1577 | the branch instruction. This means that if we commonize the control |
1578 | flow before end of the basic block, the semantic remains unchanged. |
1579 | |
1580 | We may assume that there exists one edge with a common destination. */ |
1581 | |
1582 | static bool |
1583 | outgoing_edges_match (int mode, basic_block bb1, basic_block bb2) |
1584 | { |
1585 | int nehedges1 = 0, nehedges2 = 0; |
1586 | edge fallthru1 = 0, fallthru2 = 0; |
1587 | edge e1, e2; |
1588 | edge_iterator ei; |
1589 | |
1590 | /* If we performed shrink-wrapping, edges to the exit block can |
1591 | only be distinguished for JUMP_INSNs. The two paths may differ in |
1592 | whether they went through the prologue. Sibcalls are fine, we know |
1593 | that we either didn't need or inserted an epilogue before them. */ |
1594 | if (crtl(&x_rtl)->shrink_wrapped |
1595 | && single_succ_p (bb1) |
1596 | && single_succ (bb1) == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
1597 | && (!JUMP_P (BB_END (bb1))(((enum rtx_code) ((bb1)->il.x.rtl->end_)->code) == JUMP_INSN ) |
1598 | /* Punt if the only successor is a fake edge to exit, the jump |
1599 | must be some weird one. */ |
1600 | || (single_succ_edge (bb1)->flags & EDGE_FAKE) != 0) |
1601 | && !(CALL_P (BB_END (bb1))(((enum rtx_code) ((bb1)->il.x.rtl->end_)->code) == CALL_INSN ) && SIBLING_CALL_P (BB_END (bb1))(__extension__ ({ __typeof (((bb1)->il.x.rtl->end_)) const _rtx = (((bb1)->il.x.rtl->end_)); if (((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SIBLING_CALL_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 1601, __FUNCTION__); _rtx; })->jump))) |
1602 | return false; |
1603 | |
1604 | /* If BB1 has only one successor, we may be looking at either an |
1605 | unconditional jump, or a fake edge to exit. */ |
1606 | if (single_succ_p (bb1) |
1607 | && (single_succ_edge (bb1)->flags & (EDGE_COMPLEX(EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE ) | EDGE_FAKE)) == 0 |
1608 | && (!JUMP_P (BB_END (bb1))(((enum rtx_code) ((bb1)->il.x.rtl->end_)->code) == JUMP_INSN ) || simplejump_p (BB_END (bb1)(bb1)->il.x.rtl->end_))) |
1609 | return (single_succ_p (bb2) |
1610 | && (single_succ_edge (bb2)->flags |
1611 | & (EDGE_COMPLEX(EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE ) | EDGE_FAKE)) == 0 |
1612 | && (!JUMP_P (BB_END (bb2))(((enum rtx_code) ((bb2)->il.x.rtl->end_)->code) == JUMP_INSN ) || simplejump_p (BB_END (bb2)(bb2)->il.x.rtl->end_))); |
1613 | |
1614 | /* Match conditional jumps - this may get tricky when fallthru and branch |
1615 | edges are crossed. */ |
1616 | if (EDGE_COUNT (bb1->succs)vec_safe_length (bb1->succs) == 2 |
1617 | && any_condjump_p (BB_END (bb1)(bb1)->il.x.rtl->end_) |
1618 | && onlyjump_p (BB_END (bb1)(bb1)->il.x.rtl->end_)) |
1619 | { |
1620 | edge b1, f1, b2, f2; |
1621 | bool reverse, match; |
1622 | rtx set1, set2, cond1, cond2; |
1623 | enum rtx_code code1, code2; |
1624 | |
1625 | if (EDGE_COUNT (bb2->succs)vec_safe_length (bb2->succs) != 2 |
1626 | || !any_condjump_p (BB_END (bb2)(bb2)->il.x.rtl->end_) |
1627 | || !onlyjump_p (BB_END (bb2)(bb2)->il.x.rtl->end_)) |
1628 | return false; |
1629 | |
1630 | b1 = BRANCH_EDGE (bb1)((*((bb1))->succs)[(0)]->flags & EDGE_FALLTHRU ? (* ((bb1))->succs)[(1)] : (*((bb1))->succs)[(0)]); |
1631 | b2 = BRANCH_EDGE (bb2)((*((bb2))->succs)[(0)]->flags & EDGE_FALLTHRU ? (* ((bb2))->succs)[(1)] : (*((bb2))->succs)[(0)]); |
1632 | f1 = FALLTHRU_EDGE (bb1)((*((bb1))->succs)[(0)]->flags & EDGE_FALLTHRU ? (* ((bb1))->succs)[(0)] : (*((bb1))->succs)[(1)]); |
1633 | f2 = FALLTHRU_EDGE (bb2)((*((bb2))->succs)[(0)]->flags & EDGE_FALLTHRU ? (* ((bb2))->succs)[(0)] : (*((bb2))->succs)[(1)]); |
1634 | |
1635 | /* Get around possible forwarders on fallthru edges. Other cases |
1636 | should be optimized out already. */ |
1637 | if (FORWARDER_BLOCK_P (f1->dest)((f1->dest)->flags & BB_FORWARDER_BLOCK)) |
1638 | f1 = single_succ_edge (f1->dest); |
1639 | |
1640 | if (FORWARDER_BLOCK_P (f2->dest)((f2->dest)->flags & BB_FORWARDER_BLOCK)) |
1641 | f2 = single_succ_edge (f2->dest); |
1642 | |
1643 | /* To simplify use of this function, return false if there are |
1644 | unneeded forwarder blocks. These will get eliminated later |
1645 | during cleanup_cfg. */ |
1646 | if (FORWARDER_BLOCK_P (f1->dest)((f1->dest)->flags & BB_FORWARDER_BLOCK) |
1647 | || FORWARDER_BLOCK_P (f2->dest)((f2->dest)->flags & BB_FORWARDER_BLOCK) |
1648 | || FORWARDER_BLOCK_P (b1->dest)((b1->dest)->flags & BB_FORWARDER_BLOCK) |
1649 | || FORWARDER_BLOCK_P (b2->dest)((b2->dest)->flags & BB_FORWARDER_BLOCK)) |
1650 | return false; |
1651 | |
1652 | if (f1->dest == f2->dest && b1->dest == b2->dest) |
1653 | reverse = false; |
1654 | else if (f1->dest == b2->dest && b1->dest == f2->dest) |
1655 | reverse = true; |
1656 | else |
1657 | return false; |
1658 | |
1659 | set1 = pc_set (BB_END (bb1)(bb1)->il.x.rtl->end_); |
1660 | set2 = pc_set (BB_END (bb2)(bb2)->il.x.rtl->end_); |
1661 | if ((XEXP (SET_SRC (set1), 1)((((((set1)->u.fld[1]).rt_rtx))->u.fld[1]).rt_rtx) == pc_rtx) |
1662 | != (XEXP (SET_SRC (set2), 1)((((((set2)->u.fld[1]).rt_rtx))->u.fld[1]).rt_rtx) == pc_rtx)) |
1663 | reverse = !reverse; |
1664 | |
1665 | cond1 = XEXP (SET_SRC (set1), 0)((((((set1)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx); |
1666 | cond2 = XEXP (SET_SRC (set2), 0)((((((set2)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx); |
1667 | code1 = GET_CODE (cond1)((enum rtx_code) (cond1)->code); |
1668 | if (reverse) |
1669 | code2 = reversed_comparison_code (cond2, BB_END (bb2)(bb2)->il.x.rtl->end_); |
1670 | else |
1671 | code2 = GET_CODE (cond2)((enum rtx_code) (cond2)->code); |
1672 | |
1673 | if (code2 == UNKNOWN) |
1674 | return false; |
1675 | |
1676 | /* Verify codes and operands match. */ |
1677 | match = ((code1 == code2 |
1678 | && rtx_renumbered_equal_p (XEXP (cond1, 0)(((cond1)->u.fld[0]).rt_rtx), XEXP (cond2, 0)(((cond2)->u.fld[0]).rt_rtx)) |
1679 | && rtx_renumbered_equal_p (XEXP (cond1, 1)(((cond1)->u.fld[1]).rt_rtx), XEXP (cond2, 1)(((cond2)->u.fld[1]).rt_rtx))) |
1680 | || (code1 == swap_condition (code2) |
1681 | && rtx_renumbered_equal_p (XEXP (cond1, 1)(((cond1)->u.fld[1]).rt_rtx), |
1682 | XEXP (cond2, 0)(((cond2)->u.fld[0]).rt_rtx)) |
1683 | && rtx_renumbered_equal_p (XEXP (cond1, 0)(((cond1)->u.fld[0]).rt_rtx), |
1684 | XEXP (cond2, 1)(((cond2)->u.fld[1]).rt_rtx)))); |
1685 | |
1686 | /* If we return true, we will join the blocks. Which means that |
1687 | we will only have one branch prediction bit to work with. Thus |
1688 | we require the existing branches to have probabilities that are |
1689 | roughly similar. */ |
1690 | if (match |
1691 | && optimize_bb_for_speed_p (bb1) |
1692 | && optimize_bb_for_speed_p (bb2)) |
1693 | { |
1694 | profile_probability prob2; |
1695 | |
1696 | if (b1->dest == b2->dest) |
1697 | prob2 = b2->probability; |
1698 | else |
1699 | /* Do not use f2 probability as f2 may be forwarded. */ |
1700 | prob2 = b2->probability.invert (); |
1701 | |
1702 | /* Fail if the difference in probabilities is greater than 50%. |
1703 | This rules out two well-predicted branches with opposite |
1704 | outcomes. */ |
1705 | if (b1->probability.differs_lot_from_p (prob2)) |
1706 | { |
1707 | if (dump_file) |
1708 | { |
1709 | fprintf (dump_file, |
1710 | "Outcomes of branch in bb %i and %i differ too" |
1711 | " much (", bb1->index, bb2->index); |
1712 | b1->probability.dump (dump_file); |
1713 | prob2.dump (dump_file); |
1714 | fprintf (dump_file, ")\n"); |
1715 | } |
1716 | return false; |
1717 | } |
1718 | } |
1719 | |
1720 | if (dump_file && match) |
1721 | fprintf (dump_file, "Conditionals in bb %i and %i match.\n", |
1722 | bb1->index, bb2->index); |
1723 | |
1724 | return match; |
1725 | } |
1726 | |
1727 | /* Generic case - we are seeing a computed jump, table jump or trapping |
1728 | instruction. */ |
1729 | |
1730 | /* Check whether there are tablejumps in the end of BB1 and BB2. |
1731 | Return true if they are identical. */ |
1732 | { |
1733 | rtx_insn *label1, *label2; |
1734 | rtx_jump_table_data *table1, *table2; |
1735 | |
1736 | if (tablejump_p (BB_END (bb1)(bb1)->il.x.rtl->end_, &label1, &table1) |
1737 | && tablejump_p (BB_END (bb2)(bb2)->il.x.rtl->end_, &label2, &table2) |
1738 | && GET_CODE (PATTERN (table1))((enum rtx_code) (PATTERN (table1))->code) == GET_CODE (PATTERN (table2))((enum rtx_code) (PATTERN (table2))->code)) |
1739 | { |
1740 | /* The labels should never be the same rtx. If they really are same |
1741 | the jump tables are same too. So disable crossjumping of blocks BB1 |
1742 | and BB2 because when deleting the common insns in the end of BB1 |
1743 | by delete_basic_block () the jump table would be deleted too. */ |
1744 | /* If LABEL2 is referenced in BB1->END do not do anything |
1745 | because we would loose information when replacing |
1746 | LABEL1 by LABEL2 and then LABEL2 by LABEL1 in BB1->END. */ |
1747 | if (label1 != label2 && !rtx_referenced_p (label2, BB_END (bb1)(bb1)->il.x.rtl->end_)) |
1748 | { |
1749 | /* Set IDENTICAL to true when the tables are identical. */ |
1750 | bool identical = false; |
1751 | rtx p1, p2; |
1752 | |
1753 | p1 = PATTERN (table1); |
1754 | p2 = PATTERN (table2); |
1755 | if (GET_CODE (p1)((enum rtx_code) (p1)->code) == ADDR_VEC && rtx_equal_p (p1, p2)) |
1756 | { |
1757 | identical = true; |
1758 | } |
1759 | else if (GET_CODE (p1)((enum rtx_code) (p1)->code) == ADDR_DIFF_VEC |
1760 | && (XVECLEN (p1, 1)(((((p1)->u.fld[1]).rt_rtvec))->num_elem) == XVECLEN (p2, 1)(((((p2)->u.fld[1]).rt_rtvec))->num_elem)) |
1761 | && rtx_equal_p (XEXP (p1, 2)(((p1)->u.fld[2]).rt_rtx), XEXP (p2, 2)(((p2)->u.fld[2]).rt_rtx)) |
1762 | && rtx_equal_p (XEXP (p1, 3)(((p1)->u.fld[3]).rt_rtx), XEXP (p2, 3)(((p2)->u.fld[3]).rt_rtx))) |
1763 | { |
1764 | int i; |
1765 | |
1766 | identical = true; |
1767 | for (i = XVECLEN (p1, 1)(((((p1)->u.fld[1]).rt_rtvec))->num_elem) - 1; i >= 0 && identical; i--) |
1768 | if (!rtx_equal_p (XVECEXP (p1, 1, i)(((((p1)->u.fld[1]).rt_rtvec))->elem[i]), XVECEXP (p2, 1, i)(((((p2)->u.fld[1]).rt_rtvec))->elem[i]))) |
1769 | identical = false; |
1770 | } |
1771 | |
1772 | if (identical) |
1773 | { |
1774 | bool match; |
1775 | |
1776 | /* Temporarily replace references to LABEL1 with LABEL2 |
1777 | in BB1->END so that we could compare the instructions. */ |
1778 | replace_label_in_insn (BB_END (bb1)(bb1)->il.x.rtl->end_, label1, label2, false); |
1779 | |
1780 | match = (old_insns_match_p (mode, BB_END (bb1)(bb1)->il.x.rtl->end_, BB_END (bb2)(bb2)->il.x.rtl->end_) |
1781 | == dir_both); |
1782 | if (dump_file && match) |
1783 | fprintf (dump_file, |
1784 | "Tablejumps in bb %i and %i match.\n", |
1785 | bb1->index, bb2->index); |
1786 | |
1787 | /* Set the original label in BB1->END because when deleting |
1788 | a block whose end is a tablejump, the tablejump referenced |
1789 | from the instruction is deleted too. */ |
1790 | replace_label_in_insn (BB_END (bb1)(bb1)->il.x.rtl->end_, label2, label1, false); |
1791 | |
1792 | return match; |
1793 | } |
1794 | } |
1795 | return false; |
1796 | } |
1797 | } |
1798 | |
1799 | /* Find the last non-debug non-note instruction in each bb, except |
1800 | stop when we see the NOTE_INSN_BASIC_BLOCK, as old_insns_match_p |
1801 | handles that case specially. old_insns_match_p does not handle |
1802 | other types of instruction notes. */ |
1803 | rtx_insn *last1 = BB_END (bb1)(bb1)->il.x.rtl->end_; |
1804 | rtx_insn *last2 = BB_END (bb2)(bb2)->il.x.rtl->end_; |
1805 | while (!NOTE_INSN_BASIC_BLOCK_P (last1)((((enum rtx_code) (last1)->code) == NOTE) && (((last1 )->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK) && |
1806 | (DEBUG_INSN_P (last1)(((enum rtx_code) (last1)->code) == DEBUG_INSN) || NOTE_P (last1)(((enum rtx_code) (last1)->code) == NOTE))) |
1807 | last1 = PREV_INSN (last1); |
1808 | while (!NOTE_INSN_BASIC_BLOCK_P (last2)((((enum rtx_code) (last2)->code) == NOTE) && (((last2 )->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK) && |
1809 | (DEBUG_INSN_P (last2)(((enum rtx_code) (last2)->code) == DEBUG_INSN) || NOTE_P (last2)(((enum rtx_code) (last2)->code) == NOTE))) |
1810 | last2 = PREV_INSN (last2); |
1811 | gcc_assert (last1 && last2)((void)(!(last1 && last2) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 1811, __FUNCTION__), 0 : 0)); |
1812 | |
1813 | /* First ensure that the instructions match. There may be many outgoing |
1814 | edges so this test is generally cheaper. */ |
1815 | if (old_insns_match_p (mode, last1, last2) != dir_both) |
1816 | return false; |
1817 | |
1818 | /* Search the outgoing edges, ensure that the counts do match, find possible |
1819 | fallthru and exception handling edges since these needs more |
1820 | validation. */ |
1821 | if (EDGE_COUNT (bb1->succs)vec_safe_length (bb1->succs) != EDGE_COUNT (bb2->succs)vec_safe_length (bb2->succs)) |
1822 | return false; |
1823 | |
1824 | bool nonfakeedges = false; |
1825 | FOR_EACH_EDGE (e1, ei, bb1->succs)for ((ei) = ei_start_1 (&((bb1->succs))); ei_cond ((ei ), &(e1)); ei_next (&(ei))) |
1826 | { |
1827 | e2 = EDGE_SUCC (bb2, ei.index)(*(bb2)->succs)[(ei.index)]; |
1828 | |
1829 | if ((e1->flags & EDGE_FAKE) == 0) |
1830 | nonfakeedges = true; |
1831 | |
1832 | if (e1->flags & EDGE_EH) |
1833 | nehedges1++; |
1834 | |
1835 | if (e2->flags & EDGE_EH) |
1836 | nehedges2++; |
1837 | |
1838 | if (e1->flags & EDGE_FALLTHRU) |
1839 | fallthru1 = e1; |
1840 | if (e2->flags & EDGE_FALLTHRU) |
1841 | fallthru2 = e2; |
1842 | } |
1843 | |
1844 | /* If number of edges of various types does not match, fail. */ |
1845 | if (nehedges1 != nehedges2 |
1846 | || (fallthru1 != 0) != (fallthru2 != 0)) |
1847 | return false; |
1848 | |
1849 | /* If !ACCUMULATE_OUTGOING_ARGS, bb1 (and bb2) have no successors |
1850 | and the last real insn doesn't have REG_ARGS_SIZE note, don't |
1851 | attempt to optimize, as the two basic blocks might have different |
1852 | REG_ARGS_SIZE depths. For noreturn calls and unconditional |
1853 | traps there should be REG_ARG_SIZE notes, they could be missing |
1854 | for __builtin_unreachable () uses though. */ |
1855 | if (!nonfakeedges |
1856 | && !ACCUMULATE_OUTGOING_ARGS((((global_options.x_target_flags & (1U << 3)) != 0 ) && optimize_function_for_speed_p ((cfun + 0))) || ( (cfun + 0)->machine->func_type != TYPE_NORMAL && (&x_rtl)->stack_realign_needed) || ((global_options.x_target_flags & (1U << 26)) != 0) || (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) && ix86_cfun_abi () == MS_ABI) || (0 && (&x_rtl)->profile)) |
1857 | && (!INSN_P (last1)(((((enum rtx_code) (last1)->code) == INSN) || (((enum rtx_code ) (last1)->code) == JUMP_INSN) || (((enum rtx_code) (last1 )->code) == CALL_INSN)) || (((enum rtx_code) (last1)->code ) == DEBUG_INSN)) |
1858 | || !find_reg_note (last1, REG_ARGS_SIZE, NULLnullptr))) |
1859 | return false; |
1860 | |
1861 | /* fallthru edges must be forwarded to the same destination. */ |
1862 | if (fallthru1) |
1863 | { |
1864 | basic_block d1 = (forwarder_block_p (fallthru1->dest) |
1865 | ? single_succ (fallthru1->dest): fallthru1->dest); |
1866 | basic_block d2 = (forwarder_block_p (fallthru2->dest) |
1867 | ? single_succ (fallthru2->dest): fallthru2->dest); |
1868 | |
1869 | if (d1 != d2) |
1870 | return false; |
1871 | } |
1872 | |
1873 | /* Ensure the same EH region. */ |
1874 | { |
1875 | rtx n1 = find_reg_note (last1, REG_EH_REGION, 0); |
1876 | rtx n2 = find_reg_note (last2, REG_EH_REGION, 0); |
1877 | |
1878 | if (!n1 && n2) |
1879 | return false; |
1880 | |
1881 | if (n1 && (!n2 || XEXP (n1, 0)(((n1)->u.fld[0]).rt_rtx) != XEXP (n2, 0)(((n2)->u.fld[0]).rt_rtx))) |
1882 | return false; |
1883 | } |
1884 | |
1885 | /* The same checks as in try_crossjump_to_edge. It is required for RTL |
1886 | version of sequence abstraction. */ |
1887 | FOR_EACH_EDGE (e1, ei, bb2->succs)for ((ei) = ei_start_1 (&((bb2->succs))); ei_cond ((ei ), &(e1)); ei_next (&(ei))) |
1888 | { |
1889 | edge e2; |
1890 | edge_iterator ei; |
1891 | basic_block d1 = e1->dest; |
1892 | |
1893 | if (FORWARDER_BLOCK_P (d1)((d1)->flags & BB_FORWARDER_BLOCK)) |
1894 | d1 = EDGE_SUCC (d1, 0)(*(d1)->succs)[(0)]->dest; |
1895 | |
1896 | FOR_EACH_EDGE (e2, ei, bb1->succs)for ((ei) = ei_start_1 (&((bb1->succs))); ei_cond ((ei ), &(e2)); ei_next (&(ei))) |
1897 | { |
1898 | basic_block d2 = e2->dest; |
1899 | if (FORWARDER_BLOCK_P (d2)((d2)->flags & BB_FORWARDER_BLOCK)) |
1900 | d2 = EDGE_SUCC (d2, 0)(*(d2)->succs)[(0)]->dest; |
1901 | if (d1 == d2) |
1902 | break; |
1903 | } |
1904 | |
1905 | if (!e2) |
1906 | return false; |
1907 | } |
1908 | |
1909 | return true; |
1910 | } |
1911 | |
1912 | /* Returns true if BB basic block has a preserve label. */ |
1913 | |
1914 | static bool |
1915 | block_has_preserve_label (basic_block bb) |
1916 | { |
1917 | return (bb |
1918 | && block_label (bb) |
1919 | && LABEL_PRESERVE_P (block_label (bb))(__extension__ ({ __typeof ((block_label (bb))) const _rtx = ( (block_label (bb))); if (((enum rtx_code) (_rtx)->code) != CODE_LABEL && ((enum rtx_code) (_rtx)->code) != NOTE ) rtl_check_failed_flag ("LABEL_PRESERVE_P",_rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 1919, __FUNCTION__); _rtx; })->in_struct)); |
1920 | } |
1921 | |
1922 | /* E1 and E2 are edges with the same destination block. Search their |
1923 | predecessors for common code. If found, redirect control flow from |
1924 | (maybe the middle of) E1->SRC to (maybe the middle of) E2->SRC (dir_forward), |
1925 | or the other way around (dir_backward). DIR specifies the allowed |
1926 | replacement direction. */ |
1927 | |
1928 | static bool |
1929 | try_crossjump_to_edge (int mode, edge e1, edge e2, |
1930 | enum replace_direction dir) |
1931 | { |
1932 | int nmatch; |
1933 | basic_block src1 = e1->src, src2 = e2->src; |
1934 | basic_block redirect_to, redirect_from, to_remove; |
1935 | basic_block osrc1, osrc2, redirect_edges_to, tmp; |
1936 | rtx_insn *newpos1, *newpos2; |
1937 | edge s; |
1938 | edge_iterator ei; |
1939 | |
1940 | newpos1 = newpos2 = NULLnullptr; |
1941 | |
1942 | /* Search backward through forwarder blocks. We don't need to worry |
1943 | about multiple entry or chained forwarders, as they will be optimized |
1944 | away. We do this to look past the unconditional jump following a |
1945 | conditional jump that is required due to the current CFG shape. */ |
1946 | if (single_pred_p (src1) |
1947 | && FORWARDER_BLOCK_P (src1)((src1)->flags & BB_FORWARDER_BLOCK)) |
1948 | e1 = single_pred_edge (src1), src1 = e1->src; |
1949 | |
1950 | if (single_pred_p (src2) |
1951 | && FORWARDER_BLOCK_P (src2)((src2)->flags & BB_FORWARDER_BLOCK)) |
1952 | e2 = single_pred_edge (src2), src2 = e2->src; |
1953 | |
1954 | /* Nothing to do if we reach ENTRY, or a common source block. */ |
1955 | if (src1 == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) || src2 |
1956 | == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)) |
1957 | return false; |
1958 | if (src1 == src2) |
1959 | return false; |
1960 | |
1961 | /* Seeing more than 1 forwarder blocks would confuse us later... */ |
1962 | if (FORWARDER_BLOCK_P (e1->dest)((e1->dest)->flags & BB_FORWARDER_BLOCK) |
1963 | && FORWARDER_BLOCK_P (single_succ (e1->dest))((single_succ (e1->dest))->flags & BB_FORWARDER_BLOCK )) |
1964 | return false; |
1965 | |
1966 | if (FORWARDER_BLOCK_P (e2->dest)((e2->dest)->flags & BB_FORWARDER_BLOCK) |
1967 | && FORWARDER_BLOCK_P (single_succ (e2->dest))((single_succ (e2->dest))->flags & BB_FORWARDER_BLOCK )) |
1968 | return false; |
1969 | |
1970 | /* Likewise with dead code (possibly newly created by the other optimizations |
1971 | of cfg_cleanup). */ |
1972 | if (EDGE_COUNT (src1->preds)vec_safe_length (src1->preds) == 0 || EDGE_COUNT (src2->preds)vec_safe_length (src2->preds) == 0) |
1973 | return false; |
1974 | |
1975 | /* Do not turn corssing edge to non-crossing or vice versa after reload. */ |
1976 | if (BB_PARTITION (src1)((src1)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) != BB_PARTITION (src2)((src2)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) |
1977 | && reload_completed) |
1978 | return false; |
1979 | |
1980 | /* Look for the common insn sequence, part the first ... */ |
1981 | if (!outgoing_edges_match (mode, src1, src2)) |
1982 | return false; |
1983 | |
1984 | /* ... and part the second. */ |
1985 | nmatch = flow_find_cross_jump (src1, src2, &newpos1, &newpos2, &dir); |
1986 | |
1987 | osrc1 = src1; |
1988 | osrc2 = src2; |
1989 | if (newpos1 != NULL_RTX(rtx) 0) |
1990 | src1 = BLOCK_FOR_INSN (newpos1); |
1991 | if (newpos2 != NULL_RTX(rtx) 0) |
1992 | src2 = BLOCK_FOR_INSN (newpos2); |
1993 | |
1994 | /* Check that SRC1 and SRC2 have preds again. They may have changed |
1995 | above due to the call to flow_find_cross_jump. */ |
1996 | if (EDGE_COUNT (src1->preds)vec_safe_length (src1->preds) == 0 || EDGE_COUNT (src2->preds)vec_safe_length (src2->preds) == 0) |
1997 | return false; |
1998 | |
1999 | if (dir == dir_backward) |
2000 | { |
2001 | std::swap (osrc1, osrc2); |
2002 | std::swap (src1, src2); |
2003 | std::swap (e1, e2); |
2004 | std::swap (newpos1, newpos2); |
2005 | } |
2006 | |
2007 | /* Don't proceed with the crossjump unless we found a sufficient number |
2008 | of matching instructions or the 'from' block was totally matched |
2009 | (such that its predecessors will hopefully be redirected and the |
2010 | block removed). */ |
2011 | if ((nmatch < param_min_crossjump_insnsglobal_options.x_param_min_crossjump_insns) |
2012 | && (newpos1 != BB_HEAD (src1)(src1)->il.x.head_)) |
2013 | return false; |
2014 | |
2015 | /* Avoid deleting preserve label when redirecting ABNORMAL edges. */ |
2016 | if (block_has_preserve_label (e1->dest) |
2017 | && (e1->flags & EDGE_ABNORMAL)) |
2018 | return false; |
2019 | |
2020 | /* Here we know that the insns in the end of SRC1 which are common with SRC2 |
2021 | will be deleted. |
2022 | If we have tablejumps in the end of SRC1 and SRC2 |
2023 | they have been already compared for equivalence in outgoing_edges_match () |
2024 | so replace the references to TABLE1 by references to TABLE2. */ |
2025 | { |
2026 | rtx_insn *label1, *label2; |
2027 | rtx_jump_table_data *table1, *table2; |
2028 | |
2029 | if (tablejump_p (BB_END (osrc1)(osrc1)->il.x.rtl->end_, &label1, &table1) |
2030 | && tablejump_p (BB_END (osrc2)(osrc2)->il.x.rtl->end_, &label2, &table2) |
2031 | && label1 != label2) |
2032 | { |
2033 | rtx_insn *insn; |
2034 | |
2035 | /* Replace references to LABEL1 with LABEL2. */ |
2036 | for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) |
2037 | { |
2038 | /* Do not replace the label in SRC1->END because when deleting |
2039 | a block whose end is a tablejump, the tablejump referenced |
2040 | from the instruction is deleted too. */ |
2041 | if (insn != BB_END (osrc1)(osrc1)->il.x.rtl->end_) |
2042 | replace_label_in_insn (insn, label1, label2, true); |
2043 | } |
2044 | } |
2045 | } |
2046 | |
2047 | /* Avoid splitting if possible. We must always split when SRC2 has |
2048 | EH predecessor edges, or we may end up with basic blocks with both |
2049 | normal and EH predecessor edges. */ |
2050 | if (newpos2 == BB_HEAD (src2)(src2)->il.x.head_ |
2051 | && !(EDGE_PRED (src2, 0)(*(src2)->preds)[(0)]->flags & EDGE_EH)) |
2052 | redirect_to = src2; |
2053 | else |
2054 | { |
2055 | if (newpos2 == BB_HEAD (src2)(src2)->il.x.head_) |
2056 | { |
2057 | /* Skip possible basic block header. */ |
2058 | if (LABEL_P (newpos2)(((enum rtx_code) (newpos2)->code) == CODE_LABEL)) |
2059 | newpos2 = NEXT_INSN (newpos2); |
2060 | while (DEBUG_INSN_P (newpos2)(((enum rtx_code) (newpos2)->code) == DEBUG_INSN)) |
2061 | newpos2 = NEXT_INSN (newpos2); |
2062 | if (NOTE_P (newpos2)(((enum rtx_code) (newpos2)->code) == NOTE)) |
2063 | newpos2 = NEXT_INSN (newpos2); |
2064 | while (DEBUG_INSN_P (newpos2)(((enum rtx_code) (newpos2)->code) == DEBUG_INSN)) |
2065 | newpos2 = NEXT_INSN (newpos2); |
2066 | } |
2067 | |
2068 | if (dump_file) |
2069 | fprintf (dump_file, "Splitting bb %i before %i insns\n", |
2070 | src2->index, nmatch); |
2071 | redirect_to = split_block (src2, PREV_INSN (newpos2))->dest; |
2072 | } |
2073 | |
2074 | if (dump_file) |
2075 | fprintf (dump_file, |
2076 | "Cross jumping from bb %i to bb %i; %i common insns\n", |
2077 | src1->index, src2->index, nmatch); |
2078 | |
2079 | /* We may have some registers visible through the block. */ |
2080 | df_set_bb_dirty (redirect_to); |
2081 | |
2082 | if (osrc2 == src2) |
2083 | redirect_edges_to = redirect_to; |
2084 | else |
2085 | redirect_edges_to = osrc2; |
2086 | |
2087 | /* Recompute the counts of destinations of outgoing edges. */ |
2088 | FOR_EACH_EDGE (s, ei, redirect_edges_to->succs)for ((ei) = ei_start_1 (&((redirect_edges_to->succs))) ; ei_cond ((ei), &(s)); ei_next (&(ei))) |
2089 | { |
2090 | edge s2; |
2091 | edge_iterator ei; |
2092 | basic_block d = s->dest; |
2093 | |
2094 | if (FORWARDER_BLOCK_P (d)((d)->flags & BB_FORWARDER_BLOCK)) |
2095 | d = single_succ (d); |
2096 | |
2097 | FOR_EACH_EDGE (s2, ei, src1->succs)for ((ei) = ei_start_1 (&((src1->succs))); ei_cond ((ei ), &(s2)); ei_next (&(ei))) |
2098 | { |
2099 | basic_block d2 = s2->dest; |
2100 | if (FORWARDER_BLOCK_P (d2)((d2)->flags & BB_FORWARDER_BLOCK)) |
2101 | d2 = single_succ (d2); |
2102 | if (d == d2) |
2103 | break; |
2104 | } |
2105 | |
2106 | /* Take care to update possible forwarder blocks. We verified |
2107 | that there is no more than one in the chain, so we can't run |
2108 | into infinite loop. */ |
2109 | if (FORWARDER_BLOCK_P (s->dest)((s->dest)->flags & BB_FORWARDER_BLOCK)) |
2110 | s->dest->count += s->count (); |
2111 | |
2112 | if (FORWARDER_BLOCK_P (s2->dest)((s2->dest)->flags & BB_FORWARDER_BLOCK)) |
2113 | s2->dest->count -= s->count (); |
2114 | |
2115 | s->probability = s->probability.combine_with_count |
2116 | (redirect_edges_to->count, |
2117 | s2->probability, src1->count); |
2118 | } |
2119 | |
2120 | /* Adjust count for the block. An earlier jump |
2121 | threading pass may have left the profile in an inconsistent |
2122 | state (see update_bb_profile_for_threading) so we must be |
2123 | prepared for overflows. */ |
2124 | tmp = redirect_to; |
2125 | do |
2126 | { |
2127 | tmp->count += src1->count; |
2128 | if (tmp == redirect_edges_to) |
2129 | break; |
2130 | tmp = find_fallthru_edge (tmp->succs)->dest; |
2131 | } |
2132 | while (true); |
2133 | update_br_prob_note (redirect_edges_to); |
2134 | |
2135 | /* Edit SRC1 to go to REDIRECT_TO at NEWPOS1. */ |
2136 | |
2137 | /* Skip possible basic block header. */ |
2138 | if (LABEL_P (newpos1)(((enum rtx_code) (newpos1)->code) == CODE_LABEL)) |
2139 | newpos1 = NEXT_INSN (newpos1); |
2140 | |
2141 | while (DEBUG_INSN_P (newpos1)(((enum rtx_code) (newpos1)->code) == DEBUG_INSN)) |
2142 | newpos1 = NEXT_INSN (newpos1); |
2143 | |
2144 | if (NOTE_INSN_BASIC_BLOCK_P (newpos1)((((enum rtx_code) (newpos1)->code) == NOTE) && (( (newpos1)->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) |
2145 | newpos1 = NEXT_INSN (newpos1); |
2146 | |
2147 | /* Skip also prologue and function markers. */ |
2148 | while (DEBUG_INSN_P (newpos1)(((enum rtx_code) (newpos1)->code) == DEBUG_INSN) |
2149 | || (NOTE_P (newpos1)(((enum rtx_code) (newpos1)->code) == NOTE) |
2150 | && (NOTE_KIND (newpos1)(((newpos1)->u.fld[4]).rt_int) == NOTE_INSN_PROLOGUE_END |
2151 | || NOTE_KIND (newpos1)(((newpos1)->u.fld[4]).rt_int) == NOTE_INSN_FUNCTION_BEG))) |
2152 | newpos1 = NEXT_INSN (newpos1); |
2153 | |
2154 | redirect_from = split_block (src1, PREV_INSN (newpos1))->src; |
2155 | to_remove = single_succ (redirect_from); |
2156 | |
2157 | redirect_edge_and_branch_force (single_succ_edge (redirect_from), redirect_to); |
2158 | delete_basic_block (to_remove); |
2159 | |
2160 | update_forwarder_flag (redirect_from); |
2161 | if (redirect_to != src2) |
2162 | update_forwarder_flag (src2); |
2163 | |
2164 | return true; |
2165 | } |
2166 | |
2167 | /* Search the predecessors of BB for common insn sequences. When found, |
2168 | share code between them by redirecting control flow. Return true if |
2169 | any changes made. */ |
2170 | |
2171 | static bool |
2172 | try_crossjump_bb (int mode, basic_block bb) |
2173 | { |
2174 | edge e, e2, fallthru; |
2175 | bool changed; |
2176 | unsigned max, ix, ix2; |
2177 | |
2178 | /* Nothing to do if there is not at least two incoming edges. */ |
2179 | if (EDGE_COUNT (bb->preds)vec_safe_length (bb->preds) < 2) |
2180 | return false; |
2181 | |
2182 | /* Don't crossjump if this block ends in a computed jump, |
2183 | unless we are optimizing for size. */ |
2184 | if (optimize_bb_for_size_p (bb) |
2185 | && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
2186 | && computed_jump_p (BB_END (bb)(bb)->il.x.rtl->end_)) |
2187 | return false; |
2188 | |
2189 | /* If we are partitioning hot/cold basic blocks, we don't want to |
2190 | mess up unconditional or indirect jumps that cross between hot |
2191 | and cold sections. |
2192 | |
2193 | Basic block partitioning may result in some jumps that appear to |
2194 | be optimizable (or blocks that appear to be mergeable), but which really |
2195 | must be left untouched (they are required to make it safely across |
2196 | partition boundaries). See the comments at the top of |
2197 | bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */ |
2198 | |
2199 | if (BB_PARTITION (EDGE_PRED (bb, 0)->src)(((*(bb)->preds)[(0)]->src)->flags & (BB_HOT_PARTITION |BB_COLD_PARTITION)) != |
2200 | BB_PARTITION (EDGE_PRED (bb, 1)->src)(((*(bb)->preds)[(1)]->src)->flags & (BB_HOT_PARTITION |BB_COLD_PARTITION)) |
2201 | || (EDGE_PRED (bb, 0)(*(bb)->preds)[(0)]->flags & EDGE_CROSSING)) |
2202 | return false; |
2203 | |
2204 | /* It is always cheapest to redirect a block that ends in a branch to |
2205 | a block that falls through into BB, as that adds no branches to the |
2206 | program. We'll try that combination first. */ |
2207 | fallthru = NULLnullptr; |
2208 | max = param_max_crossjump_edgesglobal_options.x_param_max_crossjump_edges; |
2209 | |
2210 | if (EDGE_COUNT (bb->preds)vec_safe_length (bb->preds) > max) |
2211 | return false; |
2212 | |
2213 | fallthru = find_fallthru_edge (bb->preds); |
2214 | |
2215 | changed = false; |
2216 | for (ix = 0; ix < EDGE_COUNT (bb->preds)vec_safe_length (bb->preds);) |
2217 | { |
2218 | e = EDGE_PRED (bb, ix)(*(bb)->preds)[(ix)]; |
2219 | ix++; |
2220 | |
2221 | /* As noted above, first try with the fallthru predecessor (or, a |
2222 | fallthru predecessor if we are in cfglayout mode). */ |
2223 | if (fallthru) |
2224 | { |
2225 | /* Don't combine the fallthru edge into anything else. |
2226 | If there is a match, we'll do it the other way around. */ |
2227 | if (e == fallthru) |
2228 | continue; |
2229 | /* If nothing changed since the last attempt, there is nothing |
2230 | we can do. */ |
2231 | if (!first_pass |
2232 | && !((e->src->flags & BB_MODIFIED) |
2233 | || (fallthru->src->flags & BB_MODIFIED))) |
2234 | continue; |
2235 | |
2236 | if (try_crossjump_to_edge (mode, e, fallthru, dir_forward)) |
2237 | { |
2238 | changed = true; |
2239 | ix = 0; |
2240 | continue; |
2241 | } |
2242 | } |
2243 | |
2244 | /* Non-obvious work limiting check: Recognize that we're going |
2245 | to call try_crossjump_bb on every basic block. So if we have |
2246 | two blocks with lots of outgoing edges (a switch) and they |
2247 | share lots of common destinations, then we would do the |
2248 | cross-jump check once for each common destination. |
2249 | |
2250 | Now, if the blocks actually are cross-jump candidates, then |
2251 | all of their destinations will be shared. Which means that |
2252 | we only need check them for cross-jump candidacy once. We |
2253 | can eliminate redundant checks of crossjump(A,B) by arbitrarily |
2254 | choosing to do the check from the block for which the edge |
2255 | in question is the first successor of A. */ |
2256 | if (EDGE_SUCC (e->src, 0)(*(e->src)->succs)[(0)] != e) |
2257 | continue; |
2258 | |
2259 | for (ix2 = 0; ix2 < EDGE_COUNT (bb->preds)vec_safe_length (bb->preds); ix2++) |
2260 | { |
2261 | e2 = EDGE_PRED (bb, ix2)(*(bb)->preds)[(ix2)]; |
2262 | |
2263 | if (e2 == e) |
2264 | continue; |
2265 | |
2266 | /* We've already checked the fallthru edge above. */ |
2267 | if (e2 == fallthru) |
2268 | continue; |
2269 | |
2270 | /* The "first successor" check above only prevents multiple |
2271 | checks of crossjump(A,B). In order to prevent redundant |
2272 | checks of crossjump(B,A), require that A be the block |
2273 | with the lowest index. */ |
2274 | if (e->src->index > e2->src->index) |
2275 | continue; |
2276 | |
2277 | /* If nothing changed since the last attempt, there is nothing |
2278 | we can do. */ |
2279 | if (!first_pass |
2280 | && !((e->src->flags & BB_MODIFIED) |
2281 | || (e2->src->flags & BB_MODIFIED))) |
2282 | continue; |
2283 | |
2284 | /* Both e and e2 are not fallthru edges, so we can crossjump in either |
2285 | direction. */ |
2286 | if (try_crossjump_to_edge (mode, e, e2, dir_both)) |
2287 | { |
2288 | changed = true; |
2289 | ix = 0; |
2290 | break; |
2291 | } |
2292 | } |
2293 | } |
2294 | |
2295 | if (changed) |
2296 | crossjumps_occurred = true; |
2297 | |
2298 | return changed; |
2299 | } |
2300 | |
2301 | /* Search the successors of BB for common insn sequences. When found, |
2302 | share code between them by moving it across the basic block |
2303 | boundary. Return true if any changes made. */ |
2304 | |
2305 | static bool |
2306 | try_head_merge_bb (basic_block bb) |
2307 | { |
2308 | basic_block final_dest_bb = NULLnullptr; |
2309 | int max_match = INT_MAX2147483647; |
2310 | edge e0; |
2311 | rtx_insn **headptr, **currptr, **nextptr; |
2312 | bool changed, moveall; |
2313 | unsigned ix; |
2314 | rtx_insn *e0_last_head; |
2315 | rtx cond; |
2316 | rtx_insn *move_before; |
2317 | unsigned nedges = EDGE_COUNT (bb->succs)vec_safe_length (bb->succs); |
2318 | rtx_insn *jump = BB_END (bb)(bb)->il.x.rtl->end_; |
2319 | regset live, live_union; |
2320 | |
2321 | /* Nothing to do if there is not at least two outgoing edges. */ |
2322 | if (nedges < 2) |
2323 | return false; |
2324 | |
2325 | /* Don't crossjump if this block ends in a computed jump, |
2326 | unless we are optimizing for size. */ |
2327 | if (optimize_bb_for_size_p (bb) |
2328 | && bb != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
2329 | && computed_jump_p (BB_END (bb)(bb)->il.x.rtl->end_)) |
2330 | return false; |
2331 | |
2332 | cond = get_condition (jump, &move_before, true, false); |
2333 | if (cond == NULL_RTX(rtx) 0) |
2334 | move_before = jump; |
2335 | |
2336 | for (ix = 0; ix < nedges; ix++) |
2337 | if (EDGE_SUCC (bb, ix)(*(bb)->succs)[(ix)]->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
2338 | return false; |
2339 | |
2340 | for (ix = 0; ix < nedges; ix++) |
2341 | { |
2342 | edge e = EDGE_SUCC (bb, ix)(*(bb)->succs)[(ix)]; |
2343 | basic_block other_bb = e->dest; |
2344 | |
2345 | if (df_get_bb_dirty (other_bb)) |
2346 | { |
2347 | block_was_dirty = true; |
2348 | return false; |
2349 | } |
2350 | |
2351 | if (e->flags & EDGE_ABNORMAL) |
2352 | return false; |
2353 | |
2354 | /* Normally, all destination blocks must only be reachable from this |
2355 | block, i.e. they must have one incoming edge. |
2356 | |
2357 | There is one special case we can handle, that of multiple consecutive |
2358 | jumps where the first jumps to one of the targets of the second jump. |
2359 | This happens frequently in switch statements for default labels. |
2360 | The structure is as follows: |
2361 | FINAL_DEST_BB |
2362 | .... |
2363 | if (cond) jump A; |
2364 | fall through |
2365 | BB |
2366 | jump with targets A, B, C, D... |
2367 | A |
2368 | has two incoming edges, from FINAL_DEST_BB and BB |
2369 | |
2370 | In this case, we can try to move the insns through BB and into |
2371 | FINAL_DEST_BB. */ |
2372 | if (EDGE_COUNT (other_bb->preds)vec_safe_length (other_bb->preds) != 1) |
2373 | { |
2374 | edge incoming_edge, incoming_bb_other_edge; |
2375 | edge_iterator ei; |
2376 | |
2377 | if (final_dest_bb != NULLnullptr |
2378 | || EDGE_COUNT (other_bb->preds)vec_safe_length (other_bb->preds) != 2) |
2379 | return false; |
2380 | |
2381 | /* We must be able to move the insns across the whole block. */ |
2382 | move_before = BB_HEAD (bb)(bb)->il.x.head_; |
2383 | while (!NONDEBUG_INSN_P (move_before)((((enum rtx_code) (move_before)->code) == INSN) || (((enum rtx_code) (move_before)->code) == JUMP_INSN) || (((enum rtx_code ) (move_before)->code) == CALL_INSN))) |
2384 | move_before = NEXT_INSN (move_before); |
2385 | |
2386 | if (EDGE_COUNT (bb->preds)vec_safe_length (bb->preds) != 1) |
2387 | return false; |
2388 | incoming_edge = EDGE_PRED (bb, 0)(*(bb)->preds)[(0)]; |
2389 | final_dest_bb = incoming_edge->src; |
2390 | if (EDGE_COUNT (final_dest_bb->succs)vec_safe_length (final_dest_bb->succs) != 2) |
2391 | return false; |
2392 | FOR_EACH_EDGE (incoming_bb_other_edge, ei, final_dest_bb->succs)for ((ei) = ei_start_1 (&((final_dest_bb->succs))); ei_cond ((ei), &(incoming_bb_other_edge)); ei_next (&(ei))) |
2393 | if (incoming_bb_other_edge != incoming_edge) |
2394 | break; |
2395 | if (incoming_bb_other_edge->dest != other_bb) |
2396 | return false; |
2397 | } |
2398 | } |
2399 | |
2400 | e0 = EDGE_SUCC (bb, 0)(*(bb)->succs)[(0)]; |
2401 | e0_last_head = NULLnullptr; |
2402 | changed = false; |
2403 | |
2404 | for (ix = 1; ix < nedges; ix++) |
2405 | { |
2406 | edge e = EDGE_SUCC (bb, ix)(*(bb)->succs)[(ix)]; |
2407 | rtx_insn *e0_last, *e_last; |
2408 | int nmatch; |
2409 | |
2410 | nmatch = flow_find_head_matching_sequence (e0->dest, e->dest, |
2411 | &e0_last, &e_last, 0); |
2412 | if (nmatch == 0) |
2413 | return false; |
2414 | |
2415 | if (nmatch < max_match) |
2416 | { |
2417 | max_match = nmatch; |
2418 | e0_last_head = e0_last; |
2419 | } |
2420 | } |
2421 | |
2422 | /* If we matched an entire block, we probably have to avoid moving the |
2423 | last insn. */ |
2424 | if (max_match > 0 |
2425 | && e0_last_head == BB_END (e0->dest)(e0->dest)->il.x.rtl->end_ |
2426 | && (find_reg_note (e0_last_head, REG_EH_REGION, 0) |
2427 | || control_flow_insn_p (e0_last_head))) |
2428 | { |
2429 | max_match--; |
2430 | if (max_match == 0) |
2431 | return false; |
2432 | e0_last_head = prev_real_nondebug_insn (e0_last_head); |
2433 | } |
2434 | |
2435 | if (max_match == 0) |
2436 | return false; |
2437 | |
2438 | /* We must find a union of the live registers at each of the end points. */ |
2439 | live = BITMAP_ALLOCbitmap_alloc (NULLnullptr); |
2440 | live_union = BITMAP_ALLOCbitmap_alloc (NULLnullptr); |
2441 | |
2442 | currptr = XNEWVEC (rtx_insn *, nedges)((rtx_insn * *) xmalloc (sizeof (rtx_insn *) * (nedges))); |
2443 | headptr = XNEWVEC (rtx_insn *, nedges)((rtx_insn * *) xmalloc (sizeof (rtx_insn *) * (nedges))); |
2444 | nextptr = XNEWVEC (rtx_insn *, nedges)((rtx_insn * *) xmalloc (sizeof (rtx_insn *) * (nedges))); |
2445 | |
2446 | for (ix = 0; ix < nedges; ix++) |
2447 | { |
2448 | int j; |
2449 | basic_block merge_bb = EDGE_SUCC (bb, ix)(*(bb)->succs)[(ix)]->dest; |
2450 | rtx_insn *head = BB_HEAD (merge_bb)(merge_bb)->il.x.head_; |
2451 | |
2452 | while (!NONDEBUG_INSN_P (head)((((enum rtx_code) (head)->code) == INSN) || (((enum rtx_code ) (head)->code) == JUMP_INSN) || (((enum rtx_code) (head)-> code) == CALL_INSN))) |
2453 | head = NEXT_INSN (head); |
2454 | headptr[ix] = head; |
2455 | currptr[ix] = head; |
2456 | |
2457 | /* Compute the end point and live information */ |
2458 | for (j = 1; j < max_match; j++) |
2459 | do |
2460 | head = NEXT_INSN (head); |
2461 | while (!NONDEBUG_INSN_P (head)((((enum rtx_code) (head)->code) == INSN) || (((enum rtx_code ) (head)->code) == JUMP_INSN) || (((enum rtx_code) (head)-> code) == CALL_INSN))); |
2462 | simulate_backwards_to_point (merge_bb, live, head); |
2463 | IOR_REG_SET (live_union, live)bitmap_ior_into (live_union, live); |
2464 | } |
2465 | |
2466 | /* If we're moving across two blocks, verify the validity of the |
2467 | first move, then adjust the target and let the loop below deal |
2468 | with the final move. */ |
2469 | if (final_dest_bb != NULLnullptr) |
2470 | { |
2471 | rtx_insn *move_upto; |
2472 | |
2473 | moveall = can_move_insns_across (currptr[0], e0_last_head, move_before, |
2474 | jump, e0->dest, live_union, |
2475 | NULLnullptr, &move_upto); |
2476 | if (!moveall) |
2477 | { |
2478 | if (move_upto == NULL_RTX(rtx) 0) |
2479 | goto out; |
2480 | |
2481 | while (e0_last_head != move_upto) |
2482 | { |
2483 | df_simulate_one_insn_backwards (e0->dest, e0_last_head, |
2484 | live_union); |
2485 | e0_last_head = PREV_INSN (e0_last_head); |
2486 | } |
2487 | } |
2488 | if (e0_last_head == NULL_RTX(rtx) 0) |
2489 | goto out; |
2490 | |
2491 | jump = BB_END (final_dest_bb)(final_dest_bb)->il.x.rtl->end_; |
2492 | cond = get_condition (jump, &move_before, true, false); |
2493 | if (cond == NULL_RTX(rtx) 0) |
2494 | move_before = jump; |
2495 | } |
2496 | |
2497 | do |
2498 | { |
2499 | rtx_insn *move_upto; |
2500 | moveall = can_move_insns_across (currptr[0], e0_last_head, |
2501 | move_before, jump, e0->dest, live_union, |
2502 | NULLnullptr, &move_upto); |
2503 | if (!moveall && move_upto == NULL_RTX(rtx) 0) |
2504 | { |
2505 | if (jump == move_before) |
2506 | break; |
2507 | |
2508 | /* Try again, using a different insertion point. */ |
2509 | move_before = jump; |
2510 | |
2511 | continue; |
2512 | } |
2513 | |
2514 | if (final_dest_bb && !moveall) |
2515 | /* We haven't checked whether a partial move would be OK for the first |
2516 | move, so we have to fail this case. */ |
2517 | break; |
2518 | |
2519 | changed = true; |
2520 | for (;;) |
2521 | { |
2522 | if (currptr[0] == move_upto) |
2523 | break; |
2524 | for (ix = 0; ix < nedges; ix++) |
2525 | { |
2526 | rtx_insn *curr = currptr[ix]; |
2527 | do |
2528 | curr = NEXT_INSN (curr); |
2529 | while (!NONDEBUG_INSN_P (curr)((((enum rtx_code) (curr)->code) == INSN) || (((enum rtx_code ) (curr)->code) == JUMP_INSN) || (((enum rtx_code) (curr)-> code) == CALL_INSN))); |
2530 | currptr[ix] = curr; |
2531 | } |
2532 | } |
2533 | |
2534 | /* If we can't currently move all of the identical insns, remember |
2535 | each insn after the range that we'll merge. */ |
2536 | if (!moveall) |
2537 | for (ix = 0; ix < nedges; ix++) |
2538 | { |
2539 | rtx_insn *curr = currptr[ix]; |
2540 | do |
2541 | curr = NEXT_INSN (curr); |
2542 | while (!NONDEBUG_INSN_P (curr)((((enum rtx_code) (curr)->code) == INSN) || (((enum rtx_code ) (curr)->code) == JUMP_INSN) || (((enum rtx_code) (curr)-> code) == CALL_INSN))); |
2543 | nextptr[ix] = curr; |
2544 | } |
2545 | |
2546 | reorder_insns (headptr[0], currptr[0], PREV_INSN (move_before)); |
2547 | df_set_bb_dirty (EDGE_SUCC (bb, 0)(*(bb)->succs)[(0)]->dest); |
2548 | if (final_dest_bb != NULLnullptr) |
2549 | df_set_bb_dirty (final_dest_bb); |
2550 | df_set_bb_dirty (bb); |
2551 | for (ix = 1; ix < nedges; ix++) |
2552 | { |
2553 | df_set_bb_dirty (EDGE_SUCC (bb, ix)(*(bb)->succs)[(ix)]->dest); |
2554 | delete_insn_chain (headptr[ix], currptr[ix], false); |
2555 | } |
2556 | if (!moveall) |
2557 | { |
2558 | if (jump == move_before) |
2559 | break; |
2560 | |
2561 | /* For the unmerged insns, try a different insertion point. */ |
2562 | move_before = jump; |
2563 | |
2564 | for (ix = 0; ix < nedges; ix++) |
2565 | currptr[ix] = headptr[ix] = nextptr[ix]; |
2566 | } |
2567 | } |
2568 | while (!moveall); |
2569 | |
2570 | out: |
2571 | free (currptr); |
2572 | free (headptr); |
2573 | free (nextptr); |
2574 | |
2575 | crossjumps_occurred |= changed; |
2576 | |
2577 | return changed; |
2578 | } |
2579 | |
2580 | /* Return true if BB contains just bb note, or bb note followed |
2581 | by only DEBUG_INSNs. */ |
2582 | |
2583 | static bool |
2584 | trivially_empty_bb_p (basic_block bb) |
2585 | { |
2586 | rtx_insn *insn = BB_END (bb)(bb)->il.x.rtl->end_; |
2587 | |
2588 | while (1) |
2589 | { |
2590 | if (insn == BB_HEAD (bb)(bb)->il.x.head_) |
2591 | return true; |
2592 | if (!DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
2593 | return false; |
2594 | insn = PREV_INSN (insn); |
2595 | } |
2596 | } |
2597 | |
2598 | /* Return true if BB contains just a return and possibly a USE of the |
2599 | return value. Fill in *RET and *USE with the return and use insns |
2600 | if any found, otherwise NULL. All CLOBBERs are ignored. */ |
2601 | |
2602 | bool |
2603 | bb_is_just_return (basic_block bb, rtx_insn **ret, rtx_insn **use) |
2604 | { |
2605 | *ret = *use = NULLnullptr; |
2606 | rtx_insn *insn; |
2607 | |
2608 | if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
2609 | return false; |
2610 | |
2611 | FOR_BB_INSNS_REVERSE (bb, insn)for ((insn) = (bb)->il.x.rtl->end_; (insn) && ( insn) != PREV_INSN ((bb)->il.x.head_); (insn) = PREV_INSN ( insn)) |
2612 | if (NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN))) |
2613 | { |
2614 | rtx pat = PATTERN (insn); |
2615 | |
2616 | if (!*ret && ANY_RETURN_P (pat)(((enum rtx_code) (pat)->code) == RETURN || ((enum rtx_code ) (pat)->code) == SIMPLE_RETURN)) |
2617 | *ret = insn; |
2618 | else if (!*ret && !*use && GET_CODE (pat)((enum rtx_code) (pat)->code) == USE |
2619 | && REG_P (XEXP (pat, 0))(((enum rtx_code) ((((pat)->u.fld[0]).rt_rtx))->code) == REG) |
2620 | && REG_FUNCTION_VALUE_P (XEXP (pat, 0))(__extension__ ({ __typeof (((((pat)->u.fld[0]).rt_rtx))) const _rtx = (((((pat)->u.fld[0]).rt_rtx))); if (((enum rtx_code ) (_rtx)->code) != REG && ((enum rtx_code) (_rtx)-> code) != PARALLEL) rtl_check_failed_flag ("REG_FUNCTION_VALUE_P" ,_rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 2620, __FUNCTION__); _rtx; })->return_val)) |
2621 | *use = insn; |
2622 | else if (GET_CODE (pat)((enum rtx_code) (pat)->code) != CLOBBER) |
2623 | return false; |
2624 | } |
2625 | |
2626 | return !!*ret; |
2627 | } |
2628 | |
2629 | /* Do simple CFG optimizations - basic block merging, simplifying of jump |
2630 | instructions etc. Return nonzero if changes were made. */ |
2631 | |
2632 | static bool |
2633 | try_optimize_cfg (int mode) |
2634 | { |
2635 | bool changed_overall = false; |
2636 | bool changed; |
2637 | int iterations = 0; |
2638 | basic_block bb, b, next; |
2639 | |
2640 | if (mode & (CLEANUP_CROSSJUMP2 | CLEANUP_THREADING8)) |
2641 | clear_bb_flags (); |
2642 | |
2643 | crossjumps_occurred = false; |
2644 | |
2645 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
2646 | update_forwarder_flag (bb); |
2647 | |
2648 | if (! targetm.cannot_modify_jumps_p ()) |
2649 | { |
2650 | first_pass = true; |
2651 | /* Attempt to merge blocks as made possible by edge removal. If |
2652 | a block has only one successor, and the successor has only |
2653 | one predecessor, they may be combined. */ |
2654 | do |
2655 | { |
2656 | block_was_dirty = false; |
2657 | changed = false; |
2658 | iterations++; |
2659 | |
2660 | if (dump_file) |
2661 | fprintf (dump_file, |
2662 | "\n\ntry_optimize_cfg iteration %i\n\n", |
2663 | iterations); |
2664 | |
2665 | for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)->next_bb; b |
2666 | != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr);) |
2667 | { |
2668 | basic_block c; |
2669 | edge s; |
2670 | bool changed_here = false; |
2671 | |
2672 | /* Delete trivially dead basic blocks. This is either |
2673 | blocks with no predecessors, or empty blocks with no |
2674 | successors. However if the empty block with no |
2675 | successors is the successor of the ENTRY_BLOCK, it is |
2676 | kept. This ensures that the ENTRY_BLOCK will have a |
2677 | successor which is a precondition for many RTL |
2678 | passes. Empty blocks may result from expanding |
2679 | __builtin_unreachable (). */ |
2680 | if (EDGE_COUNT (b->preds)vec_safe_length (b->preds) == 0 |
2681 | || (EDGE_COUNT (b->succs)vec_safe_length (b->succs) == 0 |
2682 | && trivially_empty_bb_p (b) |
2683 | && single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr))->dest |
2684 | != b)) |
2685 | { |
2686 | c = b->prev_bb; |
2687 | if (EDGE_COUNT (b->preds)vec_safe_length (b->preds) > 0) |
2688 | { |
2689 | edge e; |
2690 | edge_iterator ei; |
2691 | |
2692 | if (current_ir_type () == IR_RTL_CFGLAYOUT) |
2693 | { |
2694 | rtx_insn *insn; |
2695 | for (insn = BB_FOOTER (b)(b)->il.x.rtl->footer_; |
2696 | insn; insn = NEXT_INSN (insn)) |
2697 | if (BARRIER_P (insn)(((enum rtx_code) (insn)->code) == BARRIER)) |
2698 | break; |
2699 | if (insn) |
2700 | FOR_EACH_EDGE (e, ei, b->preds)for ((ei) = ei_start_1 (&((b->preds))); ei_cond ((ei), &(e)); ei_next (&(ei))) |
2701 | if ((e->flags & EDGE_FALLTHRU)) |
2702 | { |
2703 | if (BB_FOOTER (b)(b)->il.x.rtl->footer_ |
2704 | && BB_FOOTER (e->src)(e->src)->il.x.rtl->footer_ == NULLnullptr) |
2705 | { |
2706 | BB_FOOTER (e->src)(e->src)->il.x.rtl->footer_ = BB_FOOTER (b)(b)->il.x.rtl->footer_; |
2707 | BB_FOOTER (b)(b)->il.x.rtl->footer_ = NULLnullptr; |
2708 | } |
2709 | else |
2710 | emit_barrier_after_bb (e->src); |
2711 | } |
2712 | } |
2713 | else |
2714 | { |
2715 | rtx_insn *last = get_last_bb_insn (b); |
2716 | if (last && BARRIER_P (last)(((enum rtx_code) (last)->code) == BARRIER)) |
2717 | FOR_EACH_EDGE (e, ei, b->preds)for ((ei) = ei_start_1 (&((b->preds))); ei_cond ((ei), &(e)); ei_next (&(ei))) |
2718 | if ((e->flags & EDGE_FALLTHRU)) |
2719 | emit_barrier_after (BB_END (e->src)(e->src)->il.x.rtl->end_); |
2720 | } |
2721 | } |
2722 | delete_basic_block (b); |
2723 | changed = true; |
2724 | /* Avoid trying to remove the exit block. */ |
2725 | b = (c == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) ? c->next_bb : c); |
2726 | continue; |
2727 | } |
2728 | |
2729 | /* Remove code labels no longer used. */ |
2730 | if (single_pred_p (b) |
2731 | && (single_pred_edge (b)->flags & EDGE_FALLTHRU) |
2732 | && !(single_pred_edge (b)->flags & EDGE_COMPLEX(EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE )) |
2733 | && LABEL_P (BB_HEAD (b))(((enum rtx_code) ((b)->il.x.head_)->code) == CODE_LABEL ) |
2734 | && !LABEL_PRESERVE_P (BB_HEAD (b))(__extension__ ({ __typeof (((b)->il.x.head_)) const _rtx = (((b)->il.x.head_)); if (((enum rtx_code) (_rtx)->code ) != CODE_LABEL && ((enum rtx_code) (_rtx)->code) != NOTE) rtl_check_failed_flag ("LABEL_PRESERVE_P",_rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 2734, __FUNCTION__); _rtx; })->in_struct) |
2735 | /* If the previous block ends with a branch to this |
2736 | block, we can't delete the label. Normally this |
2737 | is a condjump that is yet to be simplified, but |
2738 | if CASE_DROPS_THRU, this can be a tablejump with |
2739 | some element going to the same place as the |
2740 | default (fallthru). */ |
2741 | && (single_pred (b) == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) |
2742 | || !JUMP_P (BB_END (single_pred (b)))(((enum rtx_code) ((single_pred (b))->il.x.rtl->end_)-> code) == JUMP_INSN) |
2743 | || ! label_is_jump_target_p (BB_HEAD (b)(b)->il.x.head_, |
2744 | BB_END (single_pred (b))(single_pred (b))->il.x.rtl->end_))) |
2745 | { |
2746 | delete_insn (BB_HEAD (b)(b)->il.x.head_); |
2747 | if (dump_file) |
2748 | fprintf (dump_file, "Deleted label in block %i.\n", |
2749 | b->index); |
2750 | } |
2751 | |
2752 | /* If we fall through an empty block, we can remove it. */ |
2753 | if (!(mode & (CLEANUP_CFGLAYOUT32 | CLEANUP_NO_INSN_DEL16)) |
2754 | && single_pred_p (b) |
2755 | && (single_pred_edge (b)->flags & EDGE_FALLTHRU) |
2756 | && !LABEL_P (BB_HEAD (b))(((enum rtx_code) ((b)->il.x.head_)->code) == CODE_LABEL ) |
2757 | && FORWARDER_BLOCK_P (b)((b)->flags & BB_FORWARDER_BLOCK) |
2758 | /* Note that forwarder_block_p true ensures that |
2759 | there is a successor for this block. */ |
2760 | && (single_succ_edge (b)->flags & EDGE_FALLTHRU) |
2761 | && n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks) > NUM_FIXED_BLOCKS(2) + 1) |
2762 | { |
2763 | if (dump_file) |
2764 | fprintf (dump_file, |
2765 | "Deleting fallthru block %i.\n", |
2766 | b->index); |
2767 | |
2768 | c = ((b->prev_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)) |
2769 | ? b->next_bb : b->prev_bb); |
2770 | redirect_edge_succ_nodup (single_pred_edge (b), |
2771 | single_succ (b)); |
2772 | delete_basic_block (b); |
2773 | changed = true; |
2774 | b = c; |
2775 | continue; |
2776 | } |
2777 | |
2778 | /* Merge B with its single successor, if any. */ |
2779 | if (single_succ_p (b) |
2780 | && (s = single_succ_edge (b)) |
2781 | && !(s->flags & EDGE_COMPLEX(EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE )) |
2782 | && (c = s->dest) != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
2783 | && single_pred_p (c) |
2784 | && b != c) |
2785 | { |
2786 | /* When not in cfg_layout mode use code aware of reordering |
2787 | INSN. This code possibly creates new basic blocks so it |
2788 | does not fit merge_blocks interface and is kept here in |
2789 | hope that it will become useless once more of compiler |
2790 | is transformed to use cfg_layout mode. */ |
2791 | |
2792 | if ((mode & CLEANUP_CFGLAYOUT32) |
2793 | && can_merge_blocks_p (b, c)) |
2794 | { |
2795 | merge_blocks (b, c); |
2796 | update_forwarder_flag (b); |
2797 | changed_here = true; |
2798 | } |
2799 | else if (!(mode & CLEANUP_CFGLAYOUT32) |
2800 | /* If the jump insn has side effects, |
2801 | we can't kill the edge. */ |
2802 | && (!JUMP_P (BB_END (b))(((enum rtx_code) ((b)->il.x.rtl->end_)->code) == JUMP_INSN ) |
2803 | || (reload_completed |
2804 | ? simplejump_p (BB_END (b)(b)->il.x.rtl->end_) |
2805 | : (onlyjump_p (BB_END (b)(b)->il.x.rtl->end_) |
2806 | && !tablejump_p (BB_END (b)(b)->il.x.rtl->end_, |
2807 | NULLnullptr, NULLnullptr)))) |
2808 | && (next = merge_blocks_move (s, b, c, mode))) |
2809 | { |
2810 | b = next; |
2811 | changed_here = true; |
2812 | } |
2813 | } |
2814 | |
2815 | /* Try to change a branch to a return to just that return. */ |
2816 | rtx_insn *ret, *use; |
2817 | if (single_succ_p (b) |
2818 | && onlyjump_p (BB_END (b)(b)->il.x.rtl->end_) |
2819 | && bb_is_just_return (single_succ (b), &ret, &use)) |
2820 | { |
2821 | if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)(b)->il.x.rtl->end_), |
2822 | PATTERN (ret), 0)) |
2823 | { |
2824 | if (use) |
2825 | emit_insn_before (copy_insn (PATTERN (use)), |
2826 | BB_END (b)(b)->il.x.rtl->end_); |
2827 | if (dump_file) |
2828 | fprintf (dump_file, "Changed jump %d->%d to return.\n", |
2829 | b->index, single_succ (b)->index); |
2830 | redirect_edge_succ (single_succ_edge (b), |
2831 | EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)); |
2832 | single_succ_edge (b)->flags &= ~EDGE_CROSSING; |
2833 | changed_here = true; |
2834 | } |
2835 | } |
2836 | |
2837 | /* Try to change a conditional branch to a return to the |
2838 | respective conditional return. */ |
2839 | if (EDGE_COUNT (b->succs)vec_safe_length (b->succs) == 2 |
2840 | && any_condjump_p (BB_END (b)(b)->il.x.rtl->end_) |
2841 | && bb_is_just_return (BRANCH_EDGE (b)((*((b))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*(( b))->succs)[(1)] : (*((b))->succs)[(0)])->dest, &ret, &use)) |
2842 | { |
2843 | if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)(b)->il.x.rtl->end_), |
2844 | PATTERN (ret), 0)) |
2845 | { |
2846 | if (use) |
2847 | emit_insn_before (copy_insn (PATTERN (use)), |
2848 | BB_END (b)(b)->il.x.rtl->end_); |
2849 | if (dump_file) |
2850 | fprintf (dump_file, "Changed conditional jump %d->%d " |
2851 | "to conditional return.\n", |
2852 | b->index, BRANCH_EDGE (b)((*((b))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*(( b))->succs)[(1)] : (*((b))->succs)[(0)])->dest->index); |
2853 | redirect_edge_succ (BRANCH_EDGE (b)((*((b))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*(( b))->succs)[(1)] : (*((b))->succs)[(0)]), |
2854 | EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)); |
2855 | BRANCH_EDGE (b)((*((b))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*(( b))->succs)[(1)] : (*((b))->succs)[(0)])->flags &= ~EDGE_CROSSING; |
2856 | changed_here = true; |
2857 | } |
2858 | } |
2859 | |
2860 | /* Try to flip a conditional branch that falls through to |
2861 | a return so that it becomes a conditional return and a |
2862 | new jump to the original branch target. */ |
2863 | if (EDGE_COUNT (b->succs)vec_safe_length (b->succs) == 2 |
2864 | && BRANCH_EDGE (b)((*((b))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*(( b))->succs)[(1)] : (*((b))->succs)[(0)])->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
2865 | && any_condjump_p (BB_END (b)(b)->il.x.rtl->end_) |
2866 | && bb_is_just_return (FALLTHRU_EDGE (b)((*((b))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*(( b))->succs)[(0)] : (*((b))->succs)[(1)])->dest, &ret, &use)) |
2867 | { |
2868 | if (invert_jump (as_a <rtx_jump_insn *> (BB_END (b)(b)->il.x.rtl->end_), |
2869 | JUMP_LABEL (BB_END (b))((((b)->il.x.rtl->end_)->u.fld[7]).rt_rtx), 0)) |
2870 | { |
2871 | basic_block new_ft = BRANCH_EDGE (b)((*((b))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*(( b))->succs)[(1)] : (*((b))->succs)[(0)])->dest; |
2872 | if (redirect_jump (as_a <rtx_jump_insn *> (BB_END (b)(b)->il.x.rtl->end_), |
2873 | PATTERN (ret), 0)) |
2874 | { |
2875 | if (use) |
2876 | emit_insn_before (copy_insn (PATTERN (use)), |
2877 | BB_END (b)(b)->il.x.rtl->end_); |
2878 | if (dump_file) |
2879 | fprintf (dump_file, "Changed conditional jump " |
2880 | "%d->%d to conditional return, adding " |
2881 | "fall-through jump.\n", |
2882 | b->index, BRANCH_EDGE (b)((*((b))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*(( b))->succs)[(1)] : (*((b))->succs)[(0)])->dest->index); |
2883 | redirect_edge_succ (BRANCH_EDGE (b)((*((b))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*(( b))->succs)[(1)] : (*((b))->succs)[(0)]), |
2884 | EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)); |
2885 | BRANCH_EDGE (b)((*((b))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*(( b))->succs)[(1)] : (*((b))->succs)[(0)])->flags &= ~EDGE_CROSSING; |
2886 | std::swap (BRANCH_EDGE (b)((*((b))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*(( b))->succs)[(1)] : (*((b))->succs)[(0)])->probability, |
2887 | FALLTHRU_EDGE (b)((*((b))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*(( b))->succs)[(0)] : (*((b))->succs)[(1)])->probability); |
2888 | update_br_prob_note (b); |
2889 | basic_block jb = force_nonfallthru (FALLTHRU_EDGE (b)((*((b))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*(( b))->succs)[(0)] : (*((b))->succs)[(1)])); |
2890 | notice_new_block (jb); |
2891 | if (!redirect_jump (as_a <rtx_jump_insn *> (BB_END (jb)(jb)->il.x.rtl->end_), |
2892 | block_label (new_ft), 0)) |
2893 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 2893, __FUNCTION__)); |
2894 | redirect_edge_succ (single_succ_edge (jb), new_ft); |
2895 | changed_here = true; |
2896 | } |
2897 | else |
2898 | { |
2899 | /* Invert the jump back to what it was. This should |
2900 | never fail. */ |
2901 | if (!invert_jump (as_a <rtx_jump_insn *> (BB_END (b)(b)->il.x.rtl->end_), |
2902 | JUMP_LABEL (BB_END (b))((((b)->il.x.rtl->end_)->u.fld[7]).rt_rtx), 0)) |
2903 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 2903, __FUNCTION__)); |
2904 | } |
2905 | } |
2906 | } |
2907 | |
2908 | /* Simplify branch over branch. */ |
2909 | if ((mode & CLEANUP_EXPENSIVE1) |
2910 | && !(mode & CLEANUP_CFGLAYOUT32) |
2911 | && try_simplify_condjump (b)) |
2912 | changed_here = true; |
2913 | |
2914 | /* If B has a single outgoing edge, but uses a |
2915 | non-trivial jump instruction without side-effects, we |
2916 | can either delete the jump entirely, or replace it |
2917 | with a simple unconditional jump. */ |
2918 | if (single_succ_p (b) |
2919 | && single_succ (b) != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
2920 | && onlyjump_p (BB_END (b)(b)->il.x.rtl->end_) |
2921 | && !CROSSING_JUMP_P (BB_END (b))(__extension__ ({ __typeof (((b)->il.x.rtl->end_)) const _rtx = (((b)->il.x.rtl->end_)); if (((enum rtx_code) ( _rtx)->code) != JUMP_INSN) rtl_check_failed_flag ("CROSSING_JUMP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 2921, __FUNCTION__); _rtx; })->jump) |
2922 | && try_redirect_by_replacing_jump (single_succ_edge (b), |
2923 | single_succ (b), |
2924 | (mode & CLEANUP_CFGLAYOUT32) != 0)) |
2925 | { |
2926 | update_forwarder_flag (b); |
2927 | changed_here = true; |
2928 | } |
2929 | |
2930 | /* Simplify branch to branch. */ |
2931 | if (try_forward_edges (mode, b)) |
2932 | { |
2933 | update_forwarder_flag (b); |
2934 | changed_here = true; |
2935 | } |
2936 | |
2937 | /* Look for shared code between blocks. */ |
2938 | if ((mode & CLEANUP_CROSSJUMP2) |
2939 | && try_crossjump_bb (mode, b)) |
2940 | changed_here = true; |
2941 | |
2942 | if ((mode & CLEANUP_CROSSJUMP2) |
2943 | /* This can lengthen register lifetimes. Do it only after |
2944 | reload. */ |
2945 | && reload_completed |
2946 | && try_head_merge_bb (b)) |
2947 | changed_here = true; |
2948 | |
2949 | /* Don't get confused by the index shift caused by |
2950 | deleting blocks. */ |
2951 | if (!changed_here) |
2952 | b = b->next_bb; |
2953 | else |
2954 | changed = true; |
2955 | } |
2956 | |
2957 | if ((mode & CLEANUP_CROSSJUMP2) |
2958 | && try_crossjump_bb (mode, EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr))) |
2959 | changed = true; |
2960 | |
2961 | if (block_was_dirty) |
2962 | { |
2963 | /* This should only be set by head-merging. */ |
2964 | gcc_assert (mode & CLEANUP_CROSSJUMP)((void)(!(mode & 2) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 2964, __FUNCTION__), 0 : 0)); |
2965 | df_analyze (); |
2966 | } |
2967 | |
2968 | if (changed) |
2969 | { |
2970 | /* Edge forwarding in particular can cause hot blocks previously |
2971 | reached by both hot and cold blocks to become dominated only |
2972 | by cold blocks. This will cause the verification below to fail, |
2973 | and lead to now cold code in the hot section. This is not easy |
2974 | to detect and fix during edge forwarding, and in some cases |
2975 | is only visible after newly unreachable blocks are deleted, |
2976 | which will be done in fixup_partitions. */ |
2977 | if ((mode & CLEANUP_NO_PARTITIONING128) == 0) |
2978 | { |
2979 | fixup_partitions (); |
2980 | checking_verify_flow_info (); |
2981 | } |
2982 | } |
2983 | |
2984 | changed_overall |= changed; |
2985 | first_pass = false; |
2986 | } |
2987 | while (changed); |
2988 | } |
2989 | |
2990 | FOR_ALL_BB_FN (b, cfun)for (b = (((cfun + 0))->cfg->x_entry_block_ptr); b; b = b->next_bb) |
2991 | b->flags &= ~(BB_FORWARDER_BLOCK | BB_NONTHREADABLE_BLOCK); |
2992 | |
2993 | return changed_overall; |
2994 | } |
2995 | |
2996 | /* Delete all unreachable basic blocks. */ |
2997 | |
2998 | bool |
2999 | delete_unreachable_blocks (void) |
3000 | { |
3001 | bool changed = false; |
3002 | basic_block b, prev_bb; |
3003 | |
3004 | find_unreachable_blocks (); |
3005 | |
3006 | /* When we're in GIMPLE mode and there may be debug bind insns, we |
3007 | should delete blocks in reverse dominator order, so as to get a |
3008 | chance to substitute all released DEFs into debug bind stmts. If |
3009 | we don't have dominators information, walking blocks backward |
3010 | gets us a better chance of retaining most debug information than |
3011 | otherwise. */ |
3012 | if (MAY_HAVE_DEBUG_BIND_INSNSglobal_options.x_flag_var_tracking_assignments && current_ir_type () == IR_GIMPLE |
3013 | && dom_info_available_p (CDI_DOMINATORS)) |
3014 | { |
3015 | for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)->prev_bb; |
3016 | b != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr); b = prev_bb) |
3017 | { |
3018 | prev_bb = b->prev_bb; |
3019 | |
3020 | if (!(b->flags & BB_REACHABLE)) |
3021 | { |
3022 | /* Speed up the removal of blocks that don't dominate |
3023 | others. Walking backwards, this should be the common |
3024 | case. */ |
3025 | if (!first_dom_son (CDI_DOMINATORS, b)) |
3026 | delete_basic_block (b); |
3027 | else |
3028 | { |
3029 | auto_vec<basic_block> h |
3030 | = get_all_dominated_blocks (CDI_DOMINATORS, b); |
3031 | |
3032 | while (h.length ()) |
3033 | { |
3034 | b = h.pop (); |
3035 | |
3036 | prev_bb = b->prev_bb; |
3037 | |
3038 | gcc_assert (!(b->flags & BB_REACHABLE))((void)(!(!(b->flags & BB_REACHABLE)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 3038, __FUNCTION__), 0 : 0)); |
3039 | |
3040 | delete_basic_block (b); |
3041 | } |
3042 | } |
3043 | |
3044 | changed = true; |
3045 | } |
3046 | } |
3047 | } |
3048 | else |
3049 | { |
3050 | for (b = EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)->prev_bb; |
3051 | b != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr); b = prev_bb) |
3052 | { |
3053 | prev_bb = b->prev_bb; |
3054 | |
3055 | if (!(b->flags & BB_REACHABLE)) |
3056 | { |
3057 | delete_basic_block (b); |
3058 | changed = true; |
3059 | } |
3060 | } |
3061 | } |
3062 | |
3063 | if (changed) |
3064 | tidy_fallthru_edges (); |
3065 | return changed; |
3066 | } |
3067 | |
3068 | /* Delete any jump tables never referenced. We can't delete them at the |
3069 | time of removing tablejump insn as they are referenced by the preceding |
3070 | insns computing the destination, so we delay deleting and garbagecollect |
3071 | them once life information is computed. */ |
3072 | void |
3073 | delete_dead_jumptables (void) |
3074 | { |
3075 | basic_block bb; |
3076 | |
3077 | /* A dead jump table does not belong to any basic block. Scan insns |
3078 | between two adjacent basic blocks. */ |
3079 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
3080 | { |
3081 | rtx_insn *insn, *next; |
3082 | |
3083 | for (insn = NEXT_INSN (BB_END (bb)(bb)->il.x.rtl->end_); |
3084 | insn && !NOTE_INSN_BASIC_BLOCK_P (insn)((((enum rtx_code) (insn)->code) == NOTE) && (((insn )->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK); |
3085 | insn = next) |
3086 | { |
3087 | next = NEXT_INSN (insn); |
3088 | if (LABEL_P (insn)(((enum rtx_code) (insn)->code) == CODE_LABEL) |
3089 | && LABEL_NUSES (insn)(((insn)->u.fld[4]).rt_int) == LABEL_PRESERVE_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != CODE_LABEL && ( (enum rtx_code) (_rtx)->code) != NOTE) rtl_check_failed_flag ("LABEL_PRESERVE_P",_rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 3089, __FUNCTION__); _rtx; })->in_struct) |
3090 | && JUMP_TABLE_DATA_P (next)(((enum rtx_code) (next)->code) == JUMP_TABLE_DATA)) |
3091 | { |
3092 | rtx_insn *label = insn, *jump = next; |
3093 | |
3094 | if (dump_file) |
3095 | fprintf (dump_file, "Dead jumptable %i removed\n", |
3096 | INSN_UID (insn)); |
3097 | |
3098 | next = NEXT_INSN (next); |
3099 | delete_insn (jump); |
3100 | delete_insn (label); |
3101 | } |
3102 | } |
3103 | } |
3104 | } |
3105 | |
3106 | |
3107 | /* Tidy the CFG by deleting unreachable code and whatnot. */ |
3108 | |
3109 | bool |
3110 | cleanup_cfg (int mode) |
3111 | { |
3112 | bool changed = false; |
3113 | |
3114 | /* Set the cfglayout mode flag here. We could update all the callers |
3115 | but that is just inconvenient, especially given that we eventually |
3116 | want to have cfglayout mode as the default. */ |
3117 | if (current_ir_type () == IR_RTL_CFGLAYOUT) |
3118 | mode |= CLEANUP_CFGLAYOUT32; |
3119 | |
3120 | timevar_push (TV_CLEANUP_CFG); |
3121 | if (delete_unreachable_blocks ()) |
3122 | { |
3123 | changed = true; |
3124 | /* We've possibly created trivially dead code. Cleanup it right |
3125 | now to introduce more opportunities for try_optimize_cfg. */ |
3126 | if (!(mode & (CLEANUP_NO_INSN_DEL16)) |
3127 | && !reload_completed) |
3128 | delete_trivially_dead_insns (get_insns (), max_reg_num ()); |
3129 | } |
3130 | |
3131 | compact_blocks (); |
3132 | |
3133 | /* To tail-merge blocks ending in the same noreturn function (e.g. |
3134 | a call to abort) we have to insert fake edges to exit. Do this |
3135 | here once. The fake edges do not interfere with any other CFG |
3136 | cleanups. */ |
3137 | if (mode & CLEANUP_CROSSJUMP2) |
3138 | add_noreturn_fake_exit_edges (); |
3139 | |
3140 | if (!dbg_cnt (cfg_cleanup)) |
3141 | return changed; |
3142 | |
3143 | while (try_optimize_cfg (mode)) |
3144 | { |
3145 | delete_unreachable_blocks (), changed = true; |
3146 | if (!(mode & CLEANUP_NO_INSN_DEL16)) |
3147 | { |
3148 | /* Try to remove some trivially dead insns when doing an expensive |
3149 | cleanup. But delete_trivially_dead_insns doesn't work after |
3150 | reload (it only handles pseudos) and run_fast_dce is too costly |
3151 | to run in every iteration. |
3152 | |
3153 | For effective cross jumping, we really want to run a fast DCE to |
3154 | clean up any dead conditions, or they get in the way of performing |
3155 | useful tail merges. |
3156 | |
3157 | Other transformations in cleanup_cfg are not so sensitive to dead |
3158 | code, so delete_trivially_dead_insns or even doing nothing at all |
3159 | is good enough. */ |
3160 | if ((mode & CLEANUP_EXPENSIVE1) && !reload_completed |
3161 | && !delete_trivially_dead_insns (get_insns (), max_reg_num ())) |
3162 | break; |
3163 | if ((mode & CLEANUP_CROSSJUMP2) && crossjumps_occurred) |
3164 | { |
3165 | run_fast_dce (); |
3166 | mode &= ~CLEANUP_FORCE_FAST_DCE0x100; |
3167 | } |
3168 | } |
3169 | else |
3170 | break; |
3171 | } |
3172 | |
3173 | if (mode & CLEANUP_CROSSJUMP2) |
3174 | remove_fake_exit_edges (); |
3175 | |
3176 | if (mode & CLEANUP_FORCE_FAST_DCE0x100) |
3177 | run_fast_dce (); |
3178 | |
3179 | /* Don't call delete_dead_jumptables in cfglayout mode, because |
3180 | that function assumes that jump tables are in the insns stream. |
3181 | But we also don't _have_ to delete dead jumptables in cfglayout |
3182 | mode because we shouldn't even be looking at things that are |
3183 | not in a basic block. Dead jumptables are cleaned up when |
3184 | going out of cfglayout mode. */ |
3185 | if (!(mode & CLEANUP_CFGLAYOUT32)) |
3186 | delete_dead_jumptables (); |
3187 | |
3188 | /* ??? We probably do this way too often. */ |
3189 | if (current_loops((cfun + 0)->x_current_loops) |
3190 | && (changed |
3191 | || (mode & CLEANUP_CFG_CHANGED64))) |
3192 | { |
3193 | timevar_push (TV_REPAIR_LOOPS); |
3194 | /* The above doesn't preserve dominance info if available. */ |
3195 | gcc_assert (!dom_info_available_p (CDI_DOMINATORS))((void)(!(!dom_info_available_p (CDI_DOMINATORS)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgcleanup.cc" , 3195, __FUNCTION__), 0 : 0)); |
3196 | calculate_dominance_info (CDI_DOMINATORS); |
3197 | fix_loop_structure (NULLnullptr); |
3198 | free_dominance_info (CDI_DOMINATORS); |
3199 | timevar_pop (TV_REPAIR_LOOPS); |
3200 | } |
3201 | |
3202 | timevar_pop (TV_CLEANUP_CFG); |
3203 | |
3204 | return changed; |
3205 | } |
3206 | |
3207 | namespace { |
3208 | |
3209 | const pass_data pass_data_jump = |
3210 | { |
3211 | RTL_PASS, /* type */ |
3212 | "jump", /* name */ |
3213 | OPTGROUP_NONE, /* optinfo_flags */ |
3214 | TV_JUMP, /* tv_id */ |
3215 | 0, /* properties_required */ |
3216 | 0, /* properties_provided */ |
3217 | 0, /* properties_destroyed */ |
3218 | 0, /* todo_flags_start */ |
3219 | 0, /* todo_flags_finish */ |
3220 | }; |
3221 | |
3222 | class pass_jump : public rtl_opt_pass |
3223 | { |
3224 | public: |
3225 | pass_jump (gcc::context *ctxt) |
3226 | : rtl_opt_pass (pass_data_jump, ctxt) |
3227 | {} |
3228 | |
3229 | /* opt_pass methods: */ |
3230 | unsigned int execute (function *) final override; |
3231 | |
3232 | }; // class pass_jump |
3233 | |
3234 | unsigned int |
3235 | pass_jump::execute (function *) |
3236 | { |
3237 | delete_trivially_dead_insns (get_insns (), max_reg_num ()); |
3238 | if (dump_file) |
3239 | dump_flow_info (dump_file, dump_flags); |
3240 | cleanup_cfg ((optimizeglobal_options.x_optimize ? CLEANUP_EXPENSIVE1 : 0) |
3241 | | (flag_thread_jumpsglobal_options.x_flag_thread_jumps && flag_expensive_optimizationsglobal_options.x_flag_expensive_optimizations |
3242 | ? CLEANUP_THREADING8 : 0)); |
3243 | return 0; |
3244 | } |
3245 | |
3246 | } // anon namespace |
3247 | |
3248 | rtl_opt_pass * |
3249 | make_pass_jump (gcc::context *ctxt) |
3250 | { |
3251 | return new pass_jump (ctxt); |
3252 | } |
3253 | |
3254 | namespace { |
3255 | |
3256 | const pass_data pass_data_jump_after_combine = |
3257 | { |
3258 | RTL_PASS, /* type */ |
3259 | "jump_after_combine", /* name */ |
3260 | OPTGROUP_NONE, /* optinfo_flags */ |
3261 | TV_JUMP, /* tv_id */ |
3262 | 0, /* properties_required */ |
3263 | 0, /* properties_provided */ |
3264 | 0, /* properties_destroyed */ |
3265 | 0, /* todo_flags_start */ |
3266 | 0, /* todo_flags_finish */ |
3267 | }; |
3268 | |
3269 | class pass_jump_after_combine : public rtl_opt_pass |
3270 | { |
3271 | public: |
3272 | pass_jump_after_combine (gcc::context *ctxt) |
3273 | : rtl_opt_pass (pass_data_jump_after_combine, ctxt) |
3274 | {} |
3275 | |
3276 | /* opt_pass methods: */ |
3277 | bool gate (function *) final override |
3278 | { |
3279 | return flag_thread_jumpsglobal_options.x_flag_thread_jumps && flag_expensive_optimizationsglobal_options.x_flag_expensive_optimizations; |
3280 | } |
3281 | unsigned int execute (function *) final override; |
3282 | |
3283 | }; // class pass_jump_after_combine |
3284 | |
3285 | unsigned int |
3286 | pass_jump_after_combine::execute (function *) |
3287 | { |
3288 | /* Jump threading does not keep dominators up-to-date. */ |
3289 | free_dominance_info (CDI_DOMINATORS); |
3290 | cleanup_cfg (CLEANUP_THREADING8); |
3291 | return 0; |
3292 | } |
3293 | |
3294 | } // anon namespace |
3295 | |
3296 | rtl_opt_pass * |
3297 | make_pass_jump_after_combine (gcc::context *ctxt) |
3298 | { |
3299 | return new pass_jump_after_combine (ctxt); |
3300 | } |
3301 | |
3302 | namespace { |
3303 | |
3304 | const pass_data pass_data_jump2 = |
3305 | { |
3306 | RTL_PASS, /* type */ |
3307 | "jump2", /* name */ |
3308 | OPTGROUP_NONE, /* optinfo_flags */ |
3309 | TV_JUMP, /* tv_id */ |
3310 | 0, /* properties_required */ |
3311 | 0, /* properties_provided */ |
3312 | 0, /* properties_destroyed */ |
3313 | 0, /* todo_flags_start */ |
3314 | 0, /* todo_flags_finish */ |
3315 | }; |
3316 | |
3317 | class pass_jump2 : public rtl_opt_pass |
3318 | { |
3319 | public: |
3320 | pass_jump2 (gcc::context *ctxt) |
3321 | : rtl_opt_pass (pass_data_jump2, ctxt) |
3322 | {} |
3323 | |
3324 | /* opt_pass methods: */ |
3325 | unsigned int execute (function *) final override |
3326 | { |
3327 | cleanup_cfg (flag_crossjumpingglobal_options.x_flag_crossjumping ? CLEANUP_CROSSJUMP2 : 0); |
3328 | return 0; |
3329 | } |
3330 | |
3331 | }; // class pass_jump2 |
3332 | |
3333 | } // anon namespace |
3334 | |
3335 | rtl_opt_pass * |
3336 | make_pass_jump2 (gcc::context *ctxt) |
3337 | { |
3338 | return new pass_jump2 (ctxt); |
3339 | } |