File: | build/gcc/cfgrtl.cc |
Warning: | line 3981, column 39 Although the value stored to 'e_taken' is used in the enclosing expression, the value is never actually read from 'e_taken' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* Control flow graph manipulation code for GNU compiler. |
2 | Copyright (C) 1987-2023 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free |
8 | Software Foundation; either version 3, or (at your option) any later |
9 | version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | /* This file contains low level functions to manipulate the CFG and analyze it |
21 | that are aware of the RTL intermediate language. |
22 | |
23 | Available functionality: |
24 | - Basic CFG/RTL manipulation API documented in cfghooks.h |
25 | - CFG-aware instruction chain manipulation |
26 | delete_insn, delete_insn_chain |
27 | - Edge splitting and committing to edges |
28 | insert_insn_on_edge, commit_edge_insertions |
29 | - CFG updating after insn simplification |
30 | purge_dead_edges, purge_all_dead_edges |
31 | - CFG fixing after coarse manipulation |
32 | fixup_abnormal_edges |
33 | |
34 | Functions not supposed for generic use: |
35 | - Infrastructure to determine quickly basic block for insn |
36 | compute_bb_for_insn, update_bb_for_insn, set_block_for_insn, |
37 | - Edge redirection with updating and optimizing of insn chain |
38 | block_label, tidy_fallthru_edge, force_nonfallthru */ |
39 | |
40 | #include "config.h" |
41 | #include "system.h" |
42 | #include "coretypes.h" |
43 | #include "backend.h" |
44 | #include "target.h" |
45 | #include "rtl.h" |
46 | #include "tree.h" |
47 | #include "cfghooks.h" |
48 | #include "df.h" |
49 | #include "insn-config.h" |
50 | #include "memmodel.h" |
51 | #include "emit-rtl.h" |
52 | #include "cfgrtl.h" |
53 | #include "cfganal.h" |
54 | #include "cfgbuild.h" |
55 | #include "cfgcleanup.h" |
56 | #include "bb-reorder.h" |
57 | #include "rtl-error.h" |
58 | #include "insn-attr.h" |
59 | #include "dojump.h" |
60 | #include "expr.h" |
61 | #include "cfgloop.h" |
62 | #include "tree-pass.h" |
63 | #include "print-rtl.h" |
64 | #include "rtl-iter.h" |
65 | #include "gimplify.h" |
66 | #include "profile.h" |
67 | #include "sreal.h" |
68 | |
69 | /* Disable warnings about missing quoting in GCC diagnostics. */ |
70 | #if __GNUC__4 >= 10 |
71 | # pragma GCC diagnostic push |
72 | # pragma GCC diagnostic ignored "-Wformat-diag" |
73 | #endif |
74 | |
75 | /* Holds the interesting leading and trailing notes for the function. |
76 | Only applicable if the CFG is in cfglayout mode. */ |
77 | static GTY(()) rtx_insn *cfg_layout_function_footer; |
78 | static GTY(()) rtx_insn *cfg_layout_function_header; |
79 | |
80 | static rtx_insn *skip_insns_after_block (basic_block); |
81 | static void record_effective_endpoints (void); |
82 | static void fixup_reorder_chain (void); |
83 | |
84 | void verify_insn_chain (void); |
85 | static void fixup_fallthru_exit_predecessor (void); |
86 | static int can_delete_note_p (const rtx_note *); |
87 | static int can_delete_label_p (const rtx_code_label *); |
88 | static basic_block rtl_split_edge (edge); |
89 | static bool rtl_move_block_after (basic_block, basic_block); |
90 | static int rtl_verify_flow_info (void); |
91 | static basic_block cfg_layout_split_block (basic_block, void *); |
92 | static edge cfg_layout_redirect_edge_and_branch (edge, basic_block); |
93 | static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block); |
94 | static void cfg_layout_delete_block (basic_block); |
95 | static void rtl_delete_block (basic_block); |
96 | static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block); |
97 | static edge rtl_redirect_edge_and_branch (edge, basic_block); |
98 | static basic_block rtl_split_block (basic_block, void *); |
99 | static void rtl_dump_bb (FILE *, basic_block, int, dump_flags_t); |
100 | static int rtl_verify_flow_info_1 (void); |
101 | static void rtl_make_forwarder_block (edge); |
102 | static bool rtl_bb_info_initialized_p (basic_block bb); |
103 | |
104 | /* Return true if NOTE is not one of the ones that must be kept paired, |
105 | so that we may simply delete it. */ |
106 | |
107 | static int |
108 | can_delete_note_p (const rtx_note *note) |
109 | { |
110 | switch (NOTE_KIND (note)(((note)->u.fld[4]).rt_int)) |
111 | { |
112 | case NOTE_INSN_DELETED: |
113 | case NOTE_INSN_BASIC_BLOCK: |
114 | case NOTE_INSN_EPILOGUE_BEG: |
115 | return true; |
116 | |
117 | default: |
118 | return false; |
119 | } |
120 | } |
121 | |
122 | /* True if a given label can be deleted. */ |
123 | |
124 | static int |
125 | can_delete_label_p (const rtx_code_label *label) |
126 | { |
127 | return (!LABEL_PRESERVE_P (label)(__extension__ ({ __typeof ((label)) const _rtx = ((label)); if (((enum rtx_code) (_rtx)->code) != CODE_LABEL && ( (enum rtx_code) (_rtx)->code) != NOTE) rtl_check_failed_flag ("LABEL_PRESERVE_P",_rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 127, __FUNCTION__); _rtx; })->in_struct) |
128 | /* User declared labels must be preserved. */ |
129 | && LABEL_NAME (label)(((label)->u.fld[6]).rt_str) == 0 |
130 | && !vec_safe_contains<rtx_insn *> (forced_labels((&x_rtl)->expr.x_forced_labels), |
131 | const_cast<rtx_code_label *> (label))); |
132 | } |
133 | |
134 | /* Delete INSN by patching it out. */ |
135 | |
136 | void |
137 | delete_insn (rtx_insn *insn) |
138 | { |
139 | rtx note; |
140 | bool really_delete = true; |
141 | |
142 | if (LABEL_P (insn)(((enum rtx_code) (insn)->code) == CODE_LABEL)) |
143 | { |
144 | /* Some labels can't be directly removed from the INSN chain, as they |
145 | might be references via variables, constant pool etc. |
146 | Convert them to the special NOTE_INSN_DELETED_LABEL note. */ |
147 | if (! can_delete_label_p (as_a <rtx_code_label *> (insn))) |
148 | { |
149 | const char *name = LABEL_NAME (insn)(((insn)->u.fld[6]).rt_str); |
150 | basic_block bb = BLOCK_FOR_INSN (insn); |
151 | rtx_insn *bb_note = NEXT_INSN (insn); |
152 | |
153 | really_delete = false; |
154 | PUT_CODE (insn, NOTE)((insn)->code = (NOTE)); |
155 | NOTE_KIND (insn)(((insn)->u.fld[4]).rt_int) = NOTE_INSN_DELETED_LABEL; |
156 | NOTE_DELETED_LABEL_NAME (insn)(((insn)->u.fld[3]).rt_str) = name; |
157 | |
158 | /* If the note following the label starts a basic block, and the |
159 | label is a member of the same basic block, interchange the two. */ |
160 | if (bb_note != NULL_RTX(rtx) 0 |
161 | && NOTE_INSN_BASIC_BLOCK_P (bb_note)((((enum rtx_code) (bb_note)->code) == NOTE) && (( (bb_note)->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK) |
162 | && bb != NULLnullptr |
163 | && bb == BLOCK_FOR_INSN (bb_note)) |
164 | { |
165 | reorder_insns_nobb (insn, insn, bb_note); |
166 | BB_HEAD (bb)(bb)->il.x.head_ = bb_note; |
167 | if (BB_END (bb)(bb)->il.x.rtl->end_ == bb_note) |
168 | BB_END (bb)(bb)->il.x.rtl->end_ = insn; |
169 | } |
170 | } |
171 | |
172 | remove_node_from_insn_list (insn, &nonlocal_goto_handler_labels((&x_rtl)->x_nonlocal_goto_handler_labels)); |
173 | } |
174 | |
175 | if (really_delete) |
176 | { |
177 | /* If this insn has already been deleted, something is very wrong. */ |
178 | gcc_assert (!insn->deleted ())((void)(!(!insn->deleted ()) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 178, __FUNCTION__), 0 : 0)); |
179 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
180 | df_insn_delete (insn); |
181 | remove_insn (insn); |
182 | insn->set_deleted (); |
183 | } |
184 | |
185 | /* If deleting a jump, decrement the use count of the label. Deleting |
186 | the label itself should happen in the normal course of block merging. */ |
187 | if (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN)) |
188 | { |
189 | if (JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx) |
190 | && LABEL_P (JUMP_LABEL (insn))(((enum rtx_code) ((((insn)->u.fld[7]).rt_rtx))->code) == CODE_LABEL)) |
191 | LABEL_NUSES (JUMP_LABEL (insn))((((((insn)->u.fld[7]).rt_rtx))->u.fld[4]).rt_int)--; |
192 | |
193 | /* If there are more targets, remove them too. */ |
194 | while ((note |
195 | = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX(rtx) 0)) != NULL_RTX(rtx) 0 |
196 | && LABEL_P (XEXP (note, 0))(((enum rtx_code) ((((note)->u.fld[0]).rt_rtx))->code) == CODE_LABEL)) |
197 | { |
198 | LABEL_NUSES (XEXP (note, 0))((((((note)->u.fld[0]).rt_rtx))->u.fld[4]).rt_int)--; |
199 | remove_note (insn, note); |
200 | } |
201 | } |
202 | |
203 | /* Also if deleting any insn that references a label as an operand. */ |
204 | while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX(rtx) 0)) != NULL_RTX(rtx) 0 |
205 | && LABEL_P (XEXP (note, 0))(((enum rtx_code) ((((note)->u.fld[0]).rt_rtx))->code) == CODE_LABEL)) |
206 | { |
207 | LABEL_NUSES (XEXP (note, 0))((((((note)->u.fld[0]).rt_rtx))->u.fld[4]).rt_int)--; |
208 | remove_note (insn, note); |
209 | } |
210 | |
211 | if (rtx_jump_table_data *table = dyn_cast <rtx_jump_table_data *> (insn)) |
212 | { |
213 | rtvec vec = table->get_labels (); |
214 | int len = GET_NUM_ELEM (vec)((vec)->num_elem); |
215 | int i; |
216 | |
217 | for (i = 0; i < len; i++) |
218 | { |
219 | rtx label = XEXP (RTVEC_ELT (vec, i), 0)(((((vec)->elem[i]))->u.fld[0]).rt_rtx); |
220 | |
221 | /* When deleting code in bulk (e.g. removing many unreachable |
222 | blocks) we can delete a label that's a target of the vector |
223 | before deleting the vector itself. */ |
224 | if (!NOTE_P (label)(((enum rtx_code) (label)->code) == NOTE)) |
225 | LABEL_NUSES (label)(((label)->u.fld[4]).rt_int)--; |
226 | } |
227 | } |
228 | } |
229 | |
230 | /* Like delete_insn but also purge dead edges from BB. |
231 | Return true if any edges are eliminated. */ |
232 | |
233 | bool |
234 | delete_insn_and_edges (rtx_insn *insn) |
235 | { |
236 | bool purge = false; |
237 | |
238 | if (NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) && BLOCK_FOR_INSN (insn)) |
239 | { |
240 | basic_block bb = BLOCK_FOR_INSN (insn); |
241 | if (BB_END (bb)(bb)->il.x.rtl->end_ == insn) |
242 | purge = true; |
243 | else if (DEBUG_INSN_P (BB_END (bb))(((enum rtx_code) ((bb)->il.x.rtl->end_)->code) == DEBUG_INSN )) |
244 | for (rtx_insn *dinsn = NEXT_INSN (insn); |
245 | DEBUG_INSN_P (dinsn)(((enum rtx_code) (dinsn)->code) == DEBUG_INSN); dinsn = NEXT_INSN (dinsn)) |
246 | if (BB_END (bb)(bb)->il.x.rtl->end_ == dinsn) |
247 | { |
248 | purge = true; |
249 | break; |
250 | } |
251 | } |
252 | delete_insn (insn); |
253 | if (purge) |
254 | return purge_dead_edges (BLOCK_FOR_INSN (insn)); |
255 | return false; |
256 | } |
257 | |
258 | /* Unlink a chain of insns between START and FINISH, leaving notes |
259 | that must be paired. If CLEAR_BB is true, we set bb field for |
260 | insns that cannot be removed to NULL. */ |
261 | |
262 | void |
263 | delete_insn_chain (rtx start, rtx_insn *finish, bool clear_bb) |
264 | { |
265 | /* Unchain the insns one by one. It would be quicker to delete all of these |
266 | with a single unchaining, rather than one at a time, but we need to keep |
267 | the NOTE's. */ |
268 | rtx_insn *current = finish; |
269 | while (1) |
270 | { |
271 | rtx_insn *prev = PREV_INSN (current); |
272 | if (NOTE_P (current)(((enum rtx_code) (current)->code) == NOTE) && !can_delete_note_p (as_a <rtx_note *> (current))) |
273 | ; |
274 | else |
275 | delete_insn (current); |
276 | |
277 | if (clear_bb && !current->deleted ()) |
278 | set_block_for_insn (current, NULLnullptr); |
279 | |
280 | if (current == start) |
281 | break; |
282 | current = prev; |
283 | } |
284 | } |
285 | |
286 | /* Create a new basic block consisting of the instructions between HEAD and END |
287 | inclusive. This function is designed to allow fast BB construction - reuses |
288 | the note and basic block struct in BB_NOTE, if any and do not grow |
289 | BASIC_BLOCK chain and should be used directly only by CFG construction code. |
290 | END can be NULL in to create new empty basic block before HEAD. Both END |
291 | and HEAD can be NULL to create basic block at the end of INSN chain. |
292 | AFTER is the basic block we should be put after. */ |
293 | |
294 | basic_block |
295 | create_basic_block_structure (rtx_insn *head, rtx_insn *end, rtx_note *bb_note, |
296 | basic_block after) |
297 | { |
298 | basic_block bb; |
299 | |
300 | if (bb_note |
301 | && (bb = NOTE_BASIC_BLOCK (bb_note)(((bb_note)->u.fld[3]).rt_bb)) != NULLnullptr |
302 | && bb->aux == NULLnullptr) |
303 | { |
304 | /* If we found an existing note, thread it back onto the chain. */ |
305 | |
306 | rtx_insn *after; |
307 | |
308 | if (LABEL_P (head)(((enum rtx_code) (head)->code) == CODE_LABEL)) |
309 | after = head; |
310 | else |
311 | { |
312 | after = PREV_INSN (head); |
313 | head = bb_note; |
314 | } |
315 | |
316 | if (after != bb_note && NEXT_INSN (after) != bb_note) |
317 | reorder_insns_nobb (bb_note, bb_note, after); |
318 | } |
319 | else |
320 | { |
321 | /* Otherwise we must create a note and a basic block structure. */ |
322 | |
323 | bb = alloc_block (); |
324 | |
325 | init_rtl_bb_info (bb); |
326 | if (!head && !end) |
327 | head = end = bb_note |
328 | = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ()); |
329 | else if (LABEL_P (head)(((enum rtx_code) (head)->code) == CODE_LABEL) && end) |
330 | { |
331 | bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head); |
332 | if (head == end) |
333 | end = bb_note; |
334 | } |
335 | else |
336 | { |
337 | bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head); |
338 | head = bb_note; |
339 | if (!end) |
340 | end = head; |
341 | } |
342 | |
343 | NOTE_BASIC_BLOCK (bb_note)(((bb_note)->u.fld[3]).rt_bb) = bb; |
344 | } |
345 | |
346 | /* Always include the bb note in the block. */ |
347 | if (NEXT_INSN (end) == bb_note) |
348 | end = bb_note; |
349 | |
350 | BB_HEAD (bb)(bb)->il.x.head_ = head; |
351 | BB_END (bb)(bb)->il.x.rtl->end_ = end; |
352 | bb->index = last_basic_block_for_fn (cfun)(((cfun + 0))->cfg->x_last_basic_block)++; |
353 | bb->flags = BB_NEW | BB_RTL; |
354 | link_block (bb, after); |
355 | SET_BASIC_BLOCK_FOR_FN (cfun, bb->index, bb)((*(((cfun + 0))->cfg->x_basic_block_info))[(bb->index )] = (bb)); |
356 | df_bb_refs_record (bb->index, false); |
357 | update_bb_for_insn (bb); |
358 | BB_SET_PARTITION (bb, BB_UNPARTITIONED)do { basic_block bb_ = (bb); bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) | (0)); } while (0); |
359 | |
360 | /* Tag the block so that we know it has been used when considering |
361 | other basic block notes. */ |
362 | bb->aux = bb; |
363 | |
364 | return bb; |
365 | } |
366 | |
367 | /* Create new basic block consisting of instructions in between HEAD and END |
368 | and place it to the BB chain after block AFTER. END can be NULL to |
369 | create a new empty basic block before HEAD. Both END and HEAD can be |
370 | NULL to create basic block at the end of INSN chain. */ |
371 | |
372 | static basic_block |
373 | rtl_create_basic_block (void *headp, void *endp, basic_block after) |
374 | { |
375 | rtx_insn *head = (rtx_insn *) headp; |
376 | rtx_insn *end = (rtx_insn *) endp; |
377 | basic_block bb; |
378 | |
379 | /* Grow the basic block array if needed. */ |
380 | if ((size_t) last_basic_block_for_fn (cfun)(((cfun + 0))->cfg->x_last_basic_block) |
381 | >= basic_block_info_for_fn (cfun)(((cfun + 0))->cfg->x_basic_block_info)->length ()) |
382 | vec_safe_grow_cleared (basic_block_info_for_fn (cfun)(((cfun + 0))->cfg->x_basic_block_info), |
383 | last_basic_block_for_fn (cfun)(((cfun + 0))->cfg->x_last_basic_block) + 1); |
384 | |
385 | n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks)++; |
386 | |
387 | bb = create_basic_block_structure (head, end, NULLnullptr, after); |
388 | bb->aux = NULLnullptr; |
389 | return bb; |
390 | } |
391 | |
392 | static basic_block |
393 | cfg_layout_create_basic_block (void *head, void *end, basic_block after) |
394 | { |
395 | basic_block newbb = rtl_create_basic_block (head, end, after); |
396 | |
397 | return newbb; |
398 | } |
399 | |
400 | /* Delete the insns in a (non-live) block. We physically delete every |
401 | non-deleted-note insn, and update the flow graph appropriately. |
402 | |
403 | Return nonzero if we deleted an exception handler. */ |
404 | |
405 | /* ??? Preserving all such notes strikes me as wrong. It would be nice |
406 | to post-process the stream to remove empty blocks, loops, ranges, etc. */ |
407 | |
408 | static void |
409 | rtl_delete_block (basic_block b) |
410 | { |
411 | rtx_insn *insn, *end; |
412 | |
413 | /* If the head of this block is a CODE_LABEL, then it might be the |
414 | label for an exception handler which can't be reached. We need |
415 | to remove the label from the exception_handler_label list. */ |
416 | insn = BB_HEAD (b)(b)->il.x.head_; |
417 | |
418 | end = get_last_bb_insn (b); |
419 | |
420 | /* Selectively delete the entire chain. */ |
421 | BB_HEAD (b)(b)->il.x.head_ = NULLnullptr; |
422 | delete_insn_chain (insn, end, true); |
423 | |
424 | |
425 | if (dump_file) |
426 | fprintf (dump_file, "deleting block %d\n", b->index); |
427 | df_bb_delete (b->index); |
428 | } |
429 | |
430 | /* Records the basic block struct in BLOCK_FOR_INSN for every insn. */ |
431 | |
432 | void |
433 | compute_bb_for_insn (void) |
434 | { |
435 | basic_block bb; |
436 | |
437 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
438 | { |
439 | rtx_insn *end = BB_END (bb)(bb)->il.x.rtl->end_; |
440 | rtx_insn *insn; |
441 | |
442 | for (insn = BB_HEAD (bb)(bb)->il.x.head_; ; insn = NEXT_INSN (insn)) |
443 | { |
444 | BLOCK_FOR_INSN (insn) = bb; |
445 | if (insn == end) |
446 | break; |
447 | } |
448 | } |
449 | } |
450 | |
451 | /* Release the basic_block_for_insn array. */ |
452 | |
453 | unsigned int |
454 | free_bb_for_insn (void) |
455 | { |
456 | rtx_insn *insn; |
457 | for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) |
458 | if (!BARRIER_P (insn)(((enum rtx_code) (insn)->code) == BARRIER)) |
459 | BLOCK_FOR_INSN (insn) = NULLnullptr; |
460 | return 0; |
461 | } |
462 | |
463 | namespace { |
464 | |
465 | const pass_data pass_data_free_cfg = |
466 | { |
467 | RTL_PASS, /* type */ |
468 | "*free_cfg", /* name */ |
469 | OPTGROUP_NONE, /* optinfo_flags */ |
470 | TV_NONE, /* tv_id */ |
471 | 0, /* properties_required */ |
472 | 0, /* properties_provided */ |
473 | PROP_cfg(1 << 3), /* properties_destroyed */ |
474 | 0, /* todo_flags_start */ |
475 | 0, /* todo_flags_finish */ |
476 | }; |
477 | |
478 | class pass_free_cfg : public rtl_opt_pass |
479 | { |
480 | public: |
481 | pass_free_cfg (gcc::context *ctxt) |
482 | : rtl_opt_pass (pass_data_free_cfg, ctxt) |
483 | {} |
484 | |
485 | /* opt_pass methods: */ |
486 | unsigned int execute (function *) final override; |
487 | |
488 | }; // class pass_free_cfg |
489 | |
490 | unsigned int |
491 | pass_free_cfg::execute (function *) |
492 | { |
493 | /* The resource.cc machinery uses DF but the CFG isn't guaranteed to be |
494 | valid at that point so it would be too late to call df_analyze. */ |
495 | if (DELAY_SLOTS0 && optimizeglobal_options.x_optimize > 0 && flag_delayed_branchglobal_options.x_flag_delayed_branch) |
496 | { |
497 | df_note_add_problem (); |
498 | df_analyze (); |
499 | } |
500 | |
501 | if (crtl(&x_rtl)->has_bb_partition) |
502 | insert_section_boundary_note (); |
503 | |
504 | free_bb_for_insn (); |
505 | return 0; |
506 | } |
507 | |
508 | } // anon namespace |
509 | |
510 | rtl_opt_pass * |
511 | make_pass_free_cfg (gcc::context *ctxt) |
512 | { |
513 | return new pass_free_cfg (ctxt); |
514 | } |
515 | |
516 | /* Return RTX to emit after when we want to emit code on the entry of function. */ |
517 | rtx_insn * |
518 | entry_of_function (void) |
519 | { |
520 | return (n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks) > NUM_FIXED_BLOCKS(2) ? |
521 | BB_HEAD (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)((((cfun + 0))->cfg->x_entry_block_ptr)->next_bb)-> il.x.head_ : get_insns ()); |
522 | } |
523 | |
524 | /* Emit INSN at the entry point of the function, ensuring that it is only |
525 | executed once per function. */ |
526 | void |
527 | emit_insn_at_entry (rtx insn) |
528 | { |
529 | edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)ei_start_1 (&((((cfun + 0))->cfg->x_entry_block_ptr )->succs)); |
530 | edge e = ei_safe_edge (ei); |
531 | gcc_assert (e->flags & EDGE_FALLTHRU)((void)(!(e->flags & EDGE_FALLTHRU) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 531, __FUNCTION__), 0 : 0)); |
532 | |
533 | insert_insn_on_edge (insn, e); |
534 | commit_edge_insertions (); |
535 | } |
536 | |
537 | /* Update BLOCK_FOR_INSN of insns between BEGIN and END |
538 | (or BARRIER if found) and notify df of the bb change. |
539 | The insn chain range is inclusive |
540 | (i.e. both BEGIN and END will be updated. */ |
541 | |
542 | static void |
543 | update_bb_for_insn_chain (rtx_insn *begin, rtx_insn *end, basic_block bb) |
544 | { |
545 | rtx_insn *insn; |
546 | |
547 | end = NEXT_INSN (end); |
548 | for (insn = begin; insn != end; insn = NEXT_INSN (insn)) |
549 | if (!BARRIER_P (insn)(((enum rtx_code) (insn)->code) == BARRIER)) |
550 | df_insn_change_bb (insn, bb); |
551 | } |
552 | |
553 | /* Update BLOCK_FOR_INSN of insns in BB to BB, |
554 | and notify df of the change. */ |
555 | |
556 | void |
557 | update_bb_for_insn (basic_block bb) |
558 | { |
559 | update_bb_for_insn_chain (BB_HEAD (bb)(bb)->il.x.head_, BB_END (bb)(bb)->il.x.rtl->end_, bb); |
560 | } |
561 | |
562 | |
563 | /* Like active_insn_p, except keep the return value use or clobber around |
564 | even after reload. */ |
565 | |
566 | static bool |
567 | flow_active_insn_p (const rtx_insn *insn) |
568 | { |
569 | if (active_insn_p (insn)) |
570 | return true; |
571 | |
572 | /* A clobber of the function return value exists for buggy |
573 | programs that fail to return a value. Its effect is to |
574 | keep the return value from being live across the entire |
575 | function. If we allow it to be skipped, we introduce the |
576 | possibility for register lifetime confusion. |
577 | Similarly, keep a USE of the function return value, otherwise |
578 | the USE is dropped and we could fail to thread jump if USE |
579 | appears on some paths and not on others, see PR90257. */ |
580 | if ((GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == CLOBBER |
581 | || GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == USE) |
582 | && REG_P (XEXP (PATTERN (insn), 0))(((enum rtx_code) ((((PATTERN (insn))->u.fld[0]).rt_rtx))-> code) == REG) |
583 | && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0))(__extension__ ({ __typeof (((((PATTERN (insn))->u.fld[0]) .rt_rtx))) const _rtx = (((((PATTERN (insn))->u.fld[0]).rt_rtx ))); if (((enum rtx_code) (_rtx)->code) != REG && ( (enum rtx_code) (_rtx)->code) != PARALLEL) rtl_check_failed_flag ("REG_FUNCTION_VALUE_P",_rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 583, __FUNCTION__); _rtx; })->return_val)) |
584 | return true; |
585 | |
586 | return false; |
587 | } |
588 | |
589 | /* Return true if the block has no effect and only forwards control flow to |
590 | its single destination. */ |
591 | |
592 | bool |
593 | contains_no_active_insn_p (const_basic_block bb) |
594 | { |
595 | rtx_insn *insn; |
596 | |
597 | if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
598 | || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) |
599 | || !single_succ_p (bb) |
600 | || (single_succ_edge (bb)->flags & EDGE_FAKE) != 0) |
601 | return false; |
602 | |
603 | for (insn = BB_HEAD (bb)(bb)->il.x.head_; insn != BB_END (bb)(bb)->il.x.rtl->end_; insn = NEXT_INSN (insn)) |
604 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN)) && flow_active_insn_p (insn)) |
605 | return false; |
606 | |
607 | return (!INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
608 | || (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN) && simplejump_p (insn)) |
609 | || !flow_active_insn_p (insn)); |
610 | } |
611 | |
612 | /* Likewise, but protect loop latches, headers and preheaders. */ |
613 | /* FIXME: Make this a cfg hook. */ |
614 | |
615 | bool |
616 | forwarder_block_p (const_basic_block bb) |
617 | { |
618 | if (!contains_no_active_insn_p (bb)) |
619 | return false; |
620 | |
621 | /* Protect loop latches, headers and preheaders. */ |
622 | if (current_loops((cfun + 0)->x_current_loops)) |
623 | { |
624 | basic_block dest; |
625 | if (bb->loop_father->header == bb) |
626 | return false; |
627 | dest = EDGE_SUCC (bb, 0)(*(bb)->succs)[(0)]->dest; |
628 | if (dest->loop_father->header == dest) |
629 | return false; |
630 | } |
631 | |
632 | return true; |
633 | } |
634 | |
635 | /* Return nonzero if we can reach target from src by falling through. */ |
636 | /* FIXME: Make this a cfg hook, the result is only valid in cfgrtl mode. */ |
637 | |
638 | bool |
639 | can_fallthru (basic_block src, basic_block target) |
640 | { |
641 | rtx_insn *insn = BB_END (src)(src)->il.x.rtl->end_; |
642 | rtx_insn *insn2; |
643 | edge e; |
644 | edge_iterator ei; |
645 | |
646 | if (target == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
647 | return true; |
648 | if (src->next_bb != target) |
649 | return false; |
650 | |
651 | /* ??? Later we may add code to move jump tables offline. */ |
652 | if (tablejump_p (insn, NULLnullptr, NULLnullptr)) |
653 | return false; |
654 | |
655 | FOR_EACH_EDGE (e, ei, src->succs)for ((ei) = ei_start_1 (&((src->succs))); ei_cond ((ei ), &(e)); ei_next (&(ei))) |
656 | if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
657 | && e->flags & EDGE_FALLTHRU) |
658 | return false; |
659 | |
660 | insn2 = BB_HEAD (target)(target)->il.x.head_; |
661 | if (!active_insn_p (insn2)) |
662 | insn2 = next_active_insn (insn2); |
663 | |
664 | return next_active_insn (insn) == insn2; |
665 | } |
666 | |
667 | /* Return nonzero if we could reach target from src by falling through, |
668 | if the target was made adjacent. If we already have a fall-through |
669 | edge to the exit block, we can't do that. */ |
670 | static bool |
671 | could_fall_through (basic_block src, basic_block target) |
672 | { |
673 | edge e; |
674 | edge_iterator ei; |
675 | |
676 | if (target == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
677 | return true; |
678 | FOR_EACH_EDGE (e, ei, src->succs)for ((ei) = ei_start_1 (&((src->succs))); ei_cond ((ei ), &(e)); ei_next (&(ei))) |
679 | if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
680 | && e->flags & EDGE_FALLTHRU) |
681 | return 0; |
682 | return true; |
683 | } |
684 | |
685 | /* Return the NOTE_INSN_BASIC_BLOCK of BB. */ |
686 | rtx_note * |
687 | bb_note (basic_block bb) |
688 | { |
689 | rtx_insn *note; |
690 | |
691 | note = BB_HEAD (bb)(bb)->il.x.head_; |
692 | if (LABEL_P (note)(((enum rtx_code) (note)->code) == CODE_LABEL)) |
693 | note = NEXT_INSN (note); |
694 | |
695 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note))((void)(!(((((enum rtx_code) (note)->code) == NOTE) && (((note)->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 695, __FUNCTION__), 0 : 0)); |
696 | return as_a <rtx_note *> (note); |
697 | } |
698 | |
699 | /* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK |
700 | note associated with the BLOCK. */ |
701 | |
702 | static rtx_insn * |
703 | first_insn_after_basic_block_note (basic_block block) |
704 | { |
705 | rtx_insn *insn; |
706 | |
707 | /* Get the first instruction in the block. */ |
708 | insn = BB_HEAD (block)(block)->il.x.head_; |
709 | |
710 | if (insn == NULL_RTX(rtx) 0) |
711 | return NULLnullptr; |
712 | if (LABEL_P (insn)(((enum rtx_code) (insn)->code) == CODE_LABEL)) |
713 | insn = NEXT_INSN (insn); |
714 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn))((void)(!(((((enum rtx_code) (insn)->code) == NOTE) && (((insn)->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 714, __FUNCTION__), 0 : 0)); |
715 | |
716 | return NEXT_INSN (insn); |
717 | } |
718 | |
719 | /* Creates a new basic block just after basic block BB by splitting |
720 | everything after specified instruction INSNP. */ |
721 | |
722 | static basic_block |
723 | rtl_split_block (basic_block bb, void *insnp) |
724 | { |
725 | basic_block new_bb; |
726 | rtx_insn *insn = (rtx_insn *) insnp; |
727 | edge e; |
728 | edge_iterator ei; |
729 | |
730 | if (!insn) |
731 | { |
732 | insn = first_insn_after_basic_block_note (bb); |
733 | |
734 | if (insn) |
735 | { |
736 | rtx_insn *next = insn; |
737 | |
738 | insn = PREV_INSN (insn); |
739 | |
740 | /* If the block contains only debug insns, insn would have |
741 | been NULL in a non-debug compilation, and then we'd end |
742 | up emitting a DELETED note. For -fcompare-debug |
743 | stability, emit the note too. */ |
744 | if (insn != BB_END (bb)(bb)->il.x.rtl->end_ |
745 | && DEBUG_INSN_P (next)(((enum rtx_code) (next)->code) == DEBUG_INSN) |
746 | && DEBUG_INSN_P (BB_END (bb))(((enum rtx_code) ((bb)->il.x.rtl->end_)->code) == DEBUG_INSN )) |
747 | { |
748 | while (next != BB_END (bb)(bb)->il.x.rtl->end_ && DEBUG_INSN_P (next)(((enum rtx_code) (next)->code) == DEBUG_INSN)) |
749 | next = NEXT_INSN (next); |
750 | |
751 | if (next == BB_END (bb)(bb)->il.x.rtl->end_) |
752 | emit_note_after (NOTE_INSN_DELETED, next); |
753 | } |
754 | } |
755 | else |
756 | insn = get_last_insn (); |
757 | } |
758 | |
759 | /* We probably should check type of the insn so that we do not create |
760 | inconsistent cfg. It is checked in verify_flow_info anyway, so do not |
761 | bother. */ |
762 | if (insn == BB_END (bb)(bb)->il.x.rtl->end_) |
763 | emit_note_after (NOTE_INSN_DELETED, insn); |
764 | |
765 | /* Create the new basic block. */ |
766 | new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb)(bb)->il.x.rtl->end_, bb); |
767 | BB_COPY_PARTITION (new_bb, bb)do { basic_block bb_ = (new_bb); bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) | (((bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)))); } while (0); |
768 | BB_END (bb)(bb)->il.x.rtl->end_ = insn; |
769 | |
770 | /* Redirect the outgoing edges. */ |
771 | new_bb->succs = bb->succs; |
772 | bb->succs = NULLnullptr; |
773 | FOR_EACH_EDGE (e, ei, new_bb->succs)for ((ei) = ei_start_1 (&((new_bb->succs))); ei_cond ( (ei), &(e)); ei_next (&(ei))) |
774 | e->src = new_bb; |
775 | |
776 | /* The new block starts off being dirty. */ |
777 | df_set_bb_dirty (bb); |
778 | return new_bb; |
779 | } |
780 | |
781 | /* Return true if LOC1 and LOC2 are equivalent for |
782 | unique_locus_on_edge_between_p purposes. */ |
783 | |
784 | static bool |
785 | loc_equal (location_t loc1, location_t loc2) |
786 | { |
787 | if (loc1 == loc2) |
788 | return true; |
789 | |
790 | expanded_location loce1 = expand_location (loc1); |
791 | expanded_location loce2 = expand_location (loc2); |
792 | |
793 | if (loce1.line != loce2.line |
794 | || loce1.column != loce2.column |
795 | || loce1.data != loce2.data) |
796 | return false; |
797 | if (loce1.file == loce2.file) |
798 | return true; |
799 | return (loce1.file != NULLnullptr |
800 | && loce2.file != NULLnullptr |
801 | && filename_cmp (loce1.file, loce2.file) == 0); |
802 | } |
803 | |
804 | /* Return true if the single edge between blocks A and B is the only place |
805 | in RTL which holds some unique locus. */ |
806 | |
807 | static bool |
808 | unique_locus_on_edge_between_p (basic_block a, basic_block b) |
809 | { |
810 | const location_t goto_locus = EDGE_SUCC (a, 0)(*(a)->succs)[(0)]->goto_locus; |
811 | rtx_insn *insn, *end; |
812 | |
813 | if (LOCATION_LOCUS (goto_locus)((IS_ADHOC_LOC (goto_locus)) ? get_location_from_adhoc_loc (line_table , goto_locus) : (goto_locus)) == UNKNOWN_LOCATION((location_t) 0)) |
814 | return false; |
815 | |
816 | /* First scan block A backward. */ |
817 | insn = BB_END (a)(a)->il.x.rtl->end_; |
818 | end = PREV_INSN (BB_HEAD (a)(a)->il.x.head_); |
819 | while (insn != end && (!NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || !INSN_HAS_LOCATION (insn))) |
820 | insn = PREV_INSN (insn); |
821 | |
822 | if (insn != end && loc_equal (INSN_LOCATION (insn), goto_locus)) |
823 | return false; |
824 | |
825 | /* Then scan block B forward. */ |
826 | insn = BB_HEAD (b)(b)->il.x.head_; |
827 | if (insn) |
828 | { |
829 | end = NEXT_INSN (BB_END (b)(b)->il.x.rtl->end_); |
830 | while (insn != end && !NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN))) |
831 | insn = NEXT_INSN (insn); |
832 | |
833 | if (insn != end && INSN_HAS_LOCATION (insn) |
834 | && loc_equal (INSN_LOCATION (insn), goto_locus)) |
835 | return false; |
836 | } |
837 | |
838 | return true; |
839 | } |
840 | |
841 | /* If the single edge between blocks A and B is the only place in RTL which |
842 | holds some unique locus, emit a nop with that locus between the blocks. */ |
843 | |
844 | static void |
845 | emit_nop_for_unique_locus_between (basic_block a, basic_block b) |
846 | { |
847 | if (!unique_locus_on_edge_between_p (a, b)) |
848 | return; |
849 | |
850 | BB_END (a)(a)->il.x.rtl->end_ = emit_insn_after_noloc (gen_nop (), BB_END (a)(a)->il.x.rtl->end_, a); |
851 | INSN_LOCATION (BB_END (a)(a)->il.x.rtl->end_) = EDGE_SUCC (a, 0)(*(a)->succs)[(0)]->goto_locus; |
852 | } |
853 | |
854 | /* Blocks A and B are to be merged into a single block A. The insns |
855 | are already contiguous. */ |
856 | |
857 | static void |
858 | rtl_merge_blocks (basic_block a, basic_block b) |
859 | { |
860 | /* If B is a forwarder block whose outgoing edge has no location, we'll |
861 | propagate the locus of the edge between A and B onto it. */ |
862 | const bool forward_edge_locus |
863 | = (b->flags & BB_FORWARDER_BLOCK) != 0 |
864 | && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus)((IS_ADHOC_LOC ((*(b)->succs)[(0)]->goto_locus)) ? get_location_from_adhoc_loc (line_table, (*(b)->succs)[(0)]->goto_locus) : ((*(b)-> succs)[(0)]->goto_locus)) == UNKNOWN_LOCATION((location_t) 0); |
865 | rtx_insn *b_head = BB_HEAD (b)(b)->il.x.head_, *b_end = BB_END (b)(b)->il.x.rtl->end_, *a_end = BB_END (a)(a)->il.x.rtl->end_; |
866 | rtx_insn *del_first = NULLnullptr, *del_last = NULLnullptr; |
867 | rtx_insn *b_debug_start = b_end, *b_debug_end = b_end; |
868 | int b_empty = 0; |
869 | |
870 | if (dump_file) |
871 | fprintf (dump_file, "Merging block %d into block %d...\n", b->index, |
872 | a->index); |
873 | |
874 | while (DEBUG_INSN_P (b_end)(((enum rtx_code) (b_end)->code) == DEBUG_INSN)) |
875 | b_end = PREV_INSN (b_debug_start = b_end); |
876 | |
877 | /* If there was a CODE_LABEL beginning B, delete it. */ |
878 | if (LABEL_P (b_head)(((enum rtx_code) (b_head)->code) == CODE_LABEL)) |
879 | { |
880 | /* Detect basic blocks with nothing but a label. This can happen |
881 | in particular at the end of a function. */ |
882 | if (b_head == b_end) |
883 | b_empty = 1; |
884 | |
885 | del_first = del_last = b_head; |
886 | b_head = NEXT_INSN (b_head); |
887 | } |
888 | |
889 | /* Delete the basic block note and handle blocks containing just that |
890 | note. */ |
891 | if (NOTE_INSN_BASIC_BLOCK_P (b_head)((((enum rtx_code) (b_head)->code) == NOTE) && ((( b_head)->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) |
892 | { |
893 | if (b_head == b_end) |
894 | b_empty = 1; |
895 | if (! del_last) |
896 | del_first = b_head; |
897 | |
898 | del_last = b_head; |
899 | b_head = NEXT_INSN (b_head); |
900 | } |
901 | |
902 | /* If there was a jump out of A, delete it. */ |
903 | if (JUMP_P (a_end)(((enum rtx_code) (a_end)->code) == JUMP_INSN)) |
904 | { |
905 | rtx_insn *prev; |
906 | |
907 | for (prev = PREV_INSN (a_end); ; prev = PREV_INSN (prev)) |
908 | if (!NOTE_P (prev)(((enum rtx_code) (prev)->code) == NOTE) |
909 | || NOTE_INSN_BASIC_BLOCK_P (prev)((((enum rtx_code) (prev)->code) == NOTE) && (((prev )->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK) |
910 | || prev == BB_HEAD (a)(a)->il.x.head_) |
911 | break; |
912 | |
913 | del_first = a_end; |
914 | |
915 | a_end = PREV_INSN (del_first); |
916 | } |
917 | else if (BARRIER_P (NEXT_INSN (a_end))(((enum rtx_code) (NEXT_INSN (a_end))->code) == BARRIER)) |
918 | del_first = NEXT_INSN (a_end); |
919 | |
920 | /* Delete everything marked above as well as crap that might be |
921 | hanging out between the two blocks. */ |
922 | BB_END (a)(a)->il.x.rtl->end_ = a_end; |
923 | BB_HEAD (b)(b)->il.x.head_ = b_empty ? NULLnullptr : b_head; |
924 | delete_insn_chain (del_first, del_last, true); |
925 | |
926 | /* If not optimizing, preserve the locus of the single edge between |
927 | blocks A and B if necessary by emitting a nop. */ |
928 | if (!optimizeglobal_options.x_optimize |
929 | && !forward_edge_locus |
930 | && !DECL_IGNORED_P (current_function_decl)((contains_struct_check ((current_function_decl), (TS_DECL_COMMON ), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 930, __FUNCTION__))->decl_common.ignored_flag)) |
931 | { |
932 | emit_nop_for_unique_locus_between (a, b); |
933 | a_end = BB_END (a)(a)->il.x.rtl->end_; |
934 | } |
935 | |
936 | /* Reassociate the insns of B with A. */ |
937 | if (!b_empty) |
938 | { |
939 | update_bb_for_insn_chain (a_end, b_debug_end, a); |
940 | |
941 | BB_END (a)(a)->il.x.rtl->end_ = b_debug_end; |
942 | BB_HEAD (b)(b)->il.x.head_ = NULLnullptr; |
943 | } |
944 | else if (b_end != b_debug_end) |
945 | { |
946 | /* Move any deleted labels and other notes between the end of A |
947 | and the debug insns that make up B after the debug insns, |
948 | bringing the debug insns into A while keeping the notes after |
949 | the end of A. */ |
950 | if (NEXT_INSN (a_end) != b_debug_start) |
951 | reorder_insns_nobb (NEXT_INSN (a_end), PREV_INSN (b_debug_start), |
952 | b_debug_end); |
953 | update_bb_for_insn_chain (b_debug_start, b_debug_end, a); |
954 | BB_END (a)(a)->il.x.rtl->end_ = b_debug_end; |
955 | } |
956 | |
957 | df_bb_delete (b->index); |
958 | |
959 | if (forward_edge_locus) |
960 | EDGE_SUCC (b, 0)(*(b)->succs)[(0)]->goto_locus = EDGE_SUCC (a, 0)(*(a)->succs)[(0)]->goto_locus; |
961 | |
962 | if (dump_file) |
963 | fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index); |
964 | } |
965 | |
966 | |
967 | /* Return true when block A and B can be merged. */ |
968 | |
969 | static bool |
970 | rtl_can_merge_blocks (basic_block a, basic_block b) |
971 | { |
972 | /* If we are partitioning hot/cold basic blocks, we don't want to |
973 | mess up unconditional or indirect jumps that cross between hot |
974 | and cold sections. |
975 | |
976 | Basic block partitioning may result in some jumps that appear to |
977 | be optimizable (or blocks that appear to be mergeable), but which really |
978 | must be left untouched (they are required to make it safely across |
979 | partition boundaries). See the comments at the top of |
980 | bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */ |
981 | |
982 | if (BB_PARTITION (a)((a)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) != BB_PARTITION (b)((b)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION))) |
983 | return false; |
984 | |
985 | /* Protect the loop latches. */ |
986 | if (current_loops((cfun + 0)->x_current_loops) && b->loop_father->latch == b) |
987 | return false; |
988 | |
989 | /* There must be exactly one edge in between the blocks. */ |
990 | return (single_succ_p (a) |
991 | && single_succ (a) == b |
992 | && single_pred_p (b) |
993 | && a != b |
994 | /* Must be simple edge. */ |
995 | && !(single_succ_edge (a)->flags & EDGE_COMPLEX(EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE )) |
996 | && a->next_bb == b |
997 | && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) |
998 | && b != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
999 | /* If the jump insn has side effects, |
1000 | we can't kill the edge. */ |
1001 | && (!JUMP_P (BB_END (a))(((enum rtx_code) ((a)->il.x.rtl->end_)->code) == JUMP_INSN ) |
1002 | || (reload_completed |
1003 | ? simplejump_p (BB_END (a)(a)->il.x.rtl->end_) : onlyjump_p (BB_END (a)(a)->il.x.rtl->end_)))); |
1004 | } |
1005 | |
1006 | /* Return the label in the head of basic block BLOCK. Create one if it doesn't |
1007 | exist. */ |
1008 | |
1009 | rtx_code_label * |
1010 | block_label (basic_block block) |
1011 | { |
1012 | if (block == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
1013 | return NULLnullptr; |
1014 | |
1015 | if (!LABEL_P (BB_HEAD (block))(((enum rtx_code) ((block)->il.x.head_)->code) == CODE_LABEL )) |
1016 | { |
1017 | BB_HEAD (block)(block)->il.x.head_ = emit_label_before (gen_label_rtx (), BB_HEAD (block)(block)->il.x.head_); |
1018 | } |
1019 | |
1020 | return as_a <rtx_code_label *> (BB_HEAD (block)(block)->il.x.head_); |
1021 | } |
1022 | |
1023 | /* Remove all barriers from BB_FOOTER of a BB. */ |
1024 | |
1025 | static void |
1026 | remove_barriers_from_footer (basic_block bb) |
1027 | { |
1028 | rtx_insn *insn = BB_FOOTER (bb)(bb)->il.x.rtl->footer_; |
1029 | |
1030 | /* Remove barriers but keep jumptables. */ |
1031 | while (insn) |
1032 | { |
1033 | if (BARRIER_P (insn)(((enum rtx_code) (insn)->code) == BARRIER)) |
1034 | { |
1035 | if (PREV_INSN (insn)) |
1036 | SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn); |
1037 | else |
1038 | BB_FOOTER (bb)(bb)->il.x.rtl->footer_ = NEXT_INSN (insn); |
1039 | if (NEXT_INSN (insn)) |
1040 | SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn); |
1041 | } |
1042 | if (LABEL_P (insn)(((enum rtx_code) (insn)->code) == CODE_LABEL)) |
1043 | return; |
1044 | insn = NEXT_INSN (insn); |
1045 | } |
1046 | } |
1047 | |
1048 | /* Attempt to perform edge redirection by replacing possibly complex jump |
1049 | instruction by unconditional jump or removing jump completely. This can |
1050 | apply only if all edges now point to the same block. The parameters and |
1051 | return values are equivalent to redirect_edge_and_branch. */ |
1052 | |
1053 | edge |
1054 | try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout) |
1055 | { |
1056 | basic_block src = e->src; |
1057 | rtx_insn *insn = BB_END (src)(src)->il.x.rtl->end_; |
1058 | rtx set; |
1059 | int fallthru = 0; |
1060 | |
1061 | /* If we are partitioning hot/cold basic blocks, we don't want to |
1062 | mess up unconditional or indirect jumps that cross between hot |
1063 | and cold sections. |
1064 | |
1065 | Basic block partitioning may result in some jumps that appear to |
1066 | be optimizable (or blocks that appear to be mergeable), but which really |
1067 | must be left untouched (they are required to make it safely across |
1068 | partition boundaries). See the comments at the top of |
1069 | bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */ |
1070 | |
1071 | if (BB_PARTITION (src)((src)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) != BB_PARTITION (target)((target)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION ))) |
1072 | return NULLnullptr; |
1073 | |
1074 | /* We can replace or remove a complex jump only when we have exactly |
1075 | two edges. Also, if we have exactly one outgoing edge, we can |
1076 | redirect that. */ |
1077 | if (EDGE_COUNT (src->succs)vec_safe_length (src->succs) >= 3 |
1078 | /* Verify that all targets will be TARGET. Specifically, the |
1079 | edge that is not E must also go to TARGET. */ |
1080 | || (EDGE_COUNT (src->succs)vec_safe_length (src->succs) == 2 |
1081 | && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)(*(src)->succs)[((*(src)->succs)[(0)] == e)]->dest != target)) |
1082 | return NULLnullptr; |
1083 | |
1084 | if (!onlyjump_p (insn)) |
1085 | return NULLnullptr; |
1086 | if ((!optimizeglobal_options.x_optimize || reload_completed) && tablejump_p (insn, NULLnullptr, NULLnullptr)) |
1087 | return NULLnullptr; |
1088 | |
1089 | /* Avoid removing branch with side effects. */ |
1090 | set = single_set (insn); |
1091 | if (!set || side_effects_p (set)) |
1092 | return NULLnullptr; |
1093 | |
1094 | /* See if we can create the fallthru edge. */ |
1095 | if (in_cfglayout || can_fallthru (src, target)) |
1096 | { |
1097 | if (dump_file) |
1098 | fprintf (dump_file, "Removing jump %i.\n", INSN_UID (insn)); |
1099 | fallthru = 1; |
1100 | |
1101 | /* Selectively unlink whole insn chain. */ |
1102 | if (in_cfglayout) |
1103 | { |
1104 | delete_insn_chain (insn, BB_END (src)(src)->il.x.rtl->end_, false); |
1105 | remove_barriers_from_footer (src); |
1106 | } |
1107 | else |
1108 | delete_insn_chain (insn, PREV_INSN (BB_HEAD (target)(target)->il.x.head_), false); |
1109 | } |
1110 | |
1111 | /* If this already is simplejump, redirect it. */ |
1112 | else if (simplejump_p (insn)) |
1113 | { |
1114 | if (e->dest == target) |
1115 | return NULLnullptr; |
1116 | if (dump_file) |
1117 | fprintf (dump_file, "Redirecting jump %i from %i to %i.\n", |
1118 | INSN_UID (insn), e->dest->index, target->index); |
1119 | if (!redirect_jump (as_a <rtx_jump_insn *> (insn), |
1120 | block_label (target), 0)) |
1121 | { |
1122 | gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun))((void)(!(target == (((cfun + 0))->cfg->x_exit_block_ptr )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1122, __FUNCTION__), 0 : 0)); |
1123 | return NULLnullptr; |
1124 | } |
1125 | } |
1126 | |
1127 | /* Cannot do anything for target exit block. */ |
1128 | else if (target == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
1129 | return NULLnullptr; |
1130 | |
1131 | /* Or replace possibly complicated jump insn by simple jump insn. */ |
1132 | else |
1133 | { |
1134 | rtx_code_label *target_label = block_label (target); |
1135 | rtx_insn *barrier; |
1136 | rtx_insn *label; |
1137 | rtx_jump_table_data *table; |
1138 | |
1139 | emit_jump_insn_after_noloc (targetm.gen_jump (target_label), insn); |
1140 | JUMP_LABEL (BB_END (src))((((src)->il.x.rtl->end_)->u.fld[7]).rt_rtx) = target_label; |
1141 | LABEL_NUSES (target_label)(((target_label)->u.fld[4]).rt_int)++; |
1142 | if (dump_file) |
1143 | fprintf (dump_file, "Replacing insn %i by jump %i\n", |
1144 | INSN_UID (insn), INSN_UID (BB_END (src)(src)->il.x.rtl->end_)); |
1145 | |
1146 | |
1147 | delete_insn_chain (insn, insn, false); |
1148 | |
1149 | /* Recognize a tablejump that we are converting to a |
1150 | simple jump and remove its associated CODE_LABEL |
1151 | and ADDR_VEC or ADDR_DIFF_VEC. */ |
1152 | if (tablejump_p (insn, &label, &table)) |
1153 | delete_insn_chain (label, table, false); |
1154 | |
1155 | barrier = next_nonnote_nondebug_insn (BB_END (src)(src)->il.x.rtl->end_); |
1156 | if (!barrier || !BARRIER_P (barrier)(((enum rtx_code) (barrier)->code) == BARRIER)) |
1157 | emit_barrier_after (BB_END (src)(src)->il.x.rtl->end_); |
1158 | else |
1159 | { |
1160 | if (barrier != NEXT_INSN (BB_END (src)(src)->il.x.rtl->end_)) |
1161 | { |
1162 | /* Move the jump before barrier so that the notes |
1163 | which originally were or were created before jump table are |
1164 | inside the basic block. */ |
1165 | rtx_insn *new_insn = BB_END (src)(src)->il.x.rtl->end_; |
1166 | |
1167 | update_bb_for_insn_chain (NEXT_INSN (BB_END (src)(src)->il.x.rtl->end_), |
1168 | PREV_INSN (barrier), src); |
1169 | |
1170 | SET_NEXT_INSN (PREV_INSN (new_insn)) = NEXT_INSN (new_insn); |
1171 | SET_PREV_INSN (NEXT_INSN (new_insn)) = PREV_INSN (new_insn); |
1172 | |
1173 | SET_NEXT_INSN (new_insn) = barrier; |
1174 | SET_NEXT_INSN (PREV_INSN (barrier)) = new_insn; |
1175 | |
1176 | SET_PREV_INSN (new_insn) = PREV_INSN (barrier); |
1177 | SET_PREV_INSN (barrier) = new_insn; |
1178 | } |
1179 | } |
1180 | } |
1181 | |
1182 | /* Keep only one edge out and set proper flags. */ |
1183 | if (!single_succ_p (src)) |
1184 | remove_edge (e); |
1185 | gcc_assert (single_succ_p (src))((void)(!(single_succ_p (src)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1185, __FUNCTION__), 0 : 0)); |
1186 | |
1187 | e = single_succ_edge (src); |
1188 | if (fallthru) |
1189 | e->flags = EDGE_FALLTHRU; |
1190 | else |
1191 | e->flags = 0; |
1192 | |
1193 | e->probability = profile_probability::always (); |
1194 | |
1195 | if (e->dest != target) |
1196 | redirect_edge_succ (e, target); |
1197 | return e; |
1198 | } |
1199 | |
1200 | /* Subroutine of redirect_branch_edge that tries to patch the jump |
1201 | instruction INSN so that it reaches block NEW. Do this |
1202 | only when it originally reached block OLD. Return true if this |
1203 | worked or the original target wasn't OLD, return false if redirection |
1204 | doesn't work. */ |
1205 | |
1206 | static bool |
1207 | patch_jump_insn (rtx_insn *insn, rtx_insn *old_label, basic_block new_bb) |
1208 | { |
1209 | rtx_jump_table_data *table; |
1210 | rtx tmp; |
1211 | /* Recognize a tablejump and adjust all matching cases. */ |
1212 | if (tablejump_p (insn, NULLnullptr, &table)) |
1213 | { |
1214 | rtvec vec; |
1215 | int j; |
1216 | rtx_code_label *new_label = block_label (new_bb); |
1217 | |
1218 | if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
1219 | return false; |
1220 | vec = table->get_labels (); |
1221 | |
1222 | for (j = GET_NUM_ELEM (vec)((vec)->num_elem) - 1; j >= 0; --j) |
1223 | if (XEXP (RTVEC_ELT (vec, j), 0)(((((vec)->elem[j]))->u.fld[0]).rt_rtx) == old_label) |
1224 | { |
1225 | RTVEC_ELT (vec, j)((vec)->elem[j]) = gen_rtx_LABEL_REF (Pmode, new_label)gen_rtx_fmt_u_stat ((LABEL_REF), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((new_label)) ); |
1226 | --LABEL_NUSES (old_label)(((old_label)->u.fld[4]).rt_int); |
1227 | ++LABEL_NUSES (new_label)(((new_label)->u.fld[4]).rt_int); |
1228 | } |
1229 | |
1230 | /* Handle casesi dispatch insns. */ |
1231 | if ((tmp = tablejump_casesi_pattern (insn)) != NULL_RTX(rtx) 0 |
1232 | && label_ref_label (XEXP (SET_SRC (tmp), 2)((((((tmp)->u.fld[1]).rt_rtx))->u.fld[2]).rt_rtx)) == old_label) |
1233 | { |
1234 | XEXP (SET_SRC (tmp), 2)((((((tmp)->u.fld[1]).rt_rtx))->u.fld[2]).rt_rtx) = gen_rtx_LABEL_REF (Pmode,gen_rtx_fmt_u_stat ((LABEL_REF), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((new_label)) ) |
1235 | new_label)gen_rtx_fmt_u_stat ((LABEL_REF), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((new_label)) ); |
1236 | --LABEL_NUSES (old_label)(((old_label)->u.fld[4]).rt_int); |
1237 | ++LABEL_NUSES (new_label)(((new_label)->u.fld[4]).rt_int); |
1238 | } |
1239 | } |
1240 | else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULLnullptr) |
1241 | { |
1242 | int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp)(((((tmp)->u.fld[5]).rt_rtvec))->num_elem); |
1243 | rtx note; |
1244 | |
1245 | if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
1246 | return false; |
1247 | rtx_code_label *new_label = block_label (new_bb); |
1248 | |
1249 | for (i = 0; i < n; ++i) |
1250 | { |
1251 | rtx old_ref = ASM_OPERANDS_LABEL (tmp, i)(((((tmp)->u.fld[5]).rt_rtvec))->elem[i]); |
1252 | gcc_assert (GET_CODE (old_ref) == LABEL_REF)((void)(!(((enum rtx_code) (old_ref)->code) == LABEL_REF) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1252, __FUNCTION__), 0 : 0)); |
1253 | if (XEXP (old_ref, 0)(((old_ref)->u.fld[0]).rt_rtx) == old_label) |
1254 | { |
1255 | ASM_OPERANDS_LABEL (tmp, i)(((((tmp)->u.fld[5]).rt_rtvec))->elem[i]) |
1256 | = gen_rtx_LABEL_REF (Pmode, new_label)gen_rtx_fmt_u_stat ((LABEL_REF), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((new_label)) ); |
1257 | --LABEL_NUSES (old_label)(((old_label)->u.fld[4]).rt_int); |
1258 | ++LABEL_NUSES (new_label)(((new_label)->u.fld[4]).rt_int); |
1259 | } |
1260 | } |
1261 | |
1262 | if (JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx) == old_label) |
1263 | { |
1264 | JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx) = new_label; |
1265 | note = find_reg_note (insn, REG_LABEL_TARGET, new_label); |
1266 | if (note) |
1267 | remove_note (insn, note); |
1268 | } |
1269 | else |
1270 | { |
1271 | note = find_reg_note (insn, REG_LABEL_TARGET, old_label); |
1272 | if (note) |
1273 | remove_note (insn, note); |
1274 | if (JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx) != new_label |
1275 | && !find_reg_note (insn, REG_LABEL_TARGET, new_label)) |
1276 | add_reg_note (insn, REG_LABEL_TARGET, new_label); |
1277 | } |
1278 | while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label)) |
1279 | != NULL_RTX(rtx) 0) |
1280 | XEXP (note, 0)(((note)->u.fld[0]).rt_rtx) = new_label; |
1281 | } |
1282 | else |
1283 | { |
1284 | /* ?? We may play the games with moving the named labels from |
1285 | one basic block to the other in case only one computed_jump is |
1286 | available. */ |
1287 | if (computed_jump_p (insn) |
1288 | /* A return instruction can't be redirected. */ |
1289 | || returnjump_p (insn)) |
1290 | return false; |
1291 | |
1292 | if (!currently_expanding_to_rtl || JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx) == old_label) |
1293 | { |
1294 | /* If the insn doesn't go where we think, we're confused. */ |
1295 | gcc_assert (JUMP_LABEL (insn) == old_label)((void)(!((((insn)->u.fld[7]).rt_rtx) == old_label) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1295, __FUNCTION__), 0 : 0)); |
1296 | |
1297 | /* If the substitution doesn't succeed, die. This can happen |
1298 | if the back end emitted unrecognizable instructions or if |
1299 | target is exit block on some arches. Or for crossing |
1300 | jumps. */ |
1301 | if (!redirect_jump (as_a <rtx_jump_insn *> (insn), |
1302 | block_label (new_bb), 0)) |
1303 | { |
1304 | gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)((void)(!(new_bb == (((cfun + 0))->cfg->x_exit_block_ptr ) || (__extension__ ({ __typeof ((insn)) const _rtx = ((insn) ); if (((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag ("CROSSING_JUMP_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1305, __FUNCTION__); _rtx; })->jump)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1305, __FUNCTION__), 0 : 0)) |
1305 | || CROSSING_JUMP_P (insn))((void)(!(new_bb == (((cfun + 0))->cfg->x_exit_block_ptr ) || (__extension__ ({ __typeof ((insn)) const _rtx = ((insn) ); if (((enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag ("CROSSING_JUMP_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1305, __FUNCTION__); _rtx; })->jump)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1305, __FUNCTION__), 0 : 0)); |
1306 | return false; |
1307 | } |
1308 | } |
1309 | } |
1310 | return true; |
1311 | } |
1312 | |
1313 | |
1314 | /* Redirect edge representing branch of (un)conditional jump or tablejump, |
1315 | NULL on failure */ |
1316 | static edge |
1317 | redirect_branch_edge (edge e, basic_block target) |
1318 | { |
1319 | rtx_insn *old_label = BB_HEAD (e->dest)(e->dest)->il.x.head_; |
1320 | basic_block src = e->src; |
1321 | rtx_insn *insn = BB_END (src)(src)->il.x.rtl->end_; |
1322 | |
1323 | /* We can only redirect non-fallthru edges of jump insn. */ |
1324 | if (e->flags & EDGE_FALLTHRU) |
1325 | return NULLnullptr; |
1326 | else if (!JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN) && !currently_expanding_to_rtl) |
1327 | return NULLnullptr; |
1328 | |
1329 | if (!currently_expanding_to_rtl) |
1330 | { |
1331 | if (!patch_jump_insn (as_a <rtx_jump_insn *> (insn), old_label, target)) |
1332 | return NULLnullptr; |
1333 | } |
1334 | else |
1335 | /* When expanding this BB might actually contain multiple |
1336 | jumps (i.e. not yet split by find_many_sub_basic_blocks). |
1337 | Redirect all of those that match our label. */ |
1338 | FOR_BB_INSNS (src, insn)for ((insn) = (src)->il.x.head_; (insn) && (insn) != NEXT_INSN ((src)->il.x.rtl->end_); (insn) = NEXT_INSN ( insn)) |
1339 | if (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN) && !patch_jump_insn (as_a <rtx_jump_insn *> (insn), |
1340 | old_label, target)) |
1341 | return NULLnullptr; |
1342 | |
1343 | if (dump_file) |
1344 | fprintf (dump_file, "Edge %i->%i redirected to %i\n", |
1345 | e->src->index, e->dest->index, target->index); |
1346 | |
1347 | if (e->dest != target) |
1348 | e = redirect_edge_succ_nodup (e, target); |
1349 | |
1350 | return e; |
1351 | } |
1352 | |
1353 | /* Called when edge E has been redirected to a new destination, |
1354 | in order to update the region crossing flag on the edge and |
1355 | jump. */ |
1356 | |
1357 | static void |
1358 | fixup_partition_crossing (edge e) |
1359 | { |
1360 | if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) || e->dest |
1361 | == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
1362 | return; |
1363 | /* If we redirected an existing edge, it may already be marked |
1364 | crossing, even though the new src is missing a reg crossing note. |
1365 | But make sure reg crossing note doesn't already exist before |
1366 | inserting. */ |
1367 | if (BB_PARTITION (e->src)((e->src)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION )) != BB_PARTITION (e->dest)((e->dest)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION ))) |
1368 | { |
1369 | e->flags |= EDGE_CROSSING; |
1370 | if (JUMP_P (BB_END (e->src))(((enum rtx_code) ((e->src)->il.x.rtl->end_)->code ) == JUMP_INSN)) |
1371 | CROSSING_JUMP_P (BB_END (e->src))(__extension__ ({ __typeof (((e->src)->il.x.rtl->end_ )) const _rtx = (((e->src)->il.x.rtl->end_)); if ((( enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag ("CROSSING_JUMP_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1371, __FUNCTION__); _rtx; })->jump) = 1; |
1372 | } |
1373 | else if (BB_PARTITION (e->src)((e->src)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION )) == BB_PARTITION (e->dest)((e->dest)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION ))) |
1374 | { |
1375 | e->flags &= ~EDGE_CROSSING; |
1376 | /* Remove the section crossing note from jump at end of |
1377 | src if it exists, and if no other successors are |
1378 | still crossing. */ |
1379 | if (JUMP_P (BB_END (e->src))(((enum rtx_code) ((e->src)->il.x.rtl->end_)->code ) == JUMP_INSN) && CROSSING_JUMP_P (BB_END (e->src))(__extension__ ({ __typeof (((e->src)->il.x.rtl->end_ )) const _rtx = (((e->src)->il.x.rtl->end_)); if ((( enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag ("CROSSING_JUMP_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1379, __FUNCTION__); _rtx; })->jump)) |
1380 | { |
1381 | bool has_crossing_succ = false; |
1382 | edge e2; |
1383 | edge_iterator ei; |
1384 | FOR_EACH_EDGE (e2, ei, e->src->succs)for ((ei) = ei_start_1 (&((e->src->succs))); ei_cond ((ei), &(e2)); ei_next (&(ei))) |
1385 | { |
1386 | has_crossing_succ |= (e2->flags & EDGE_CROSSING); |
1387 | if (has_crossing_succ) |
1388 | break; |
1389 | } |
1390 | if (!has_crossing_succ) |
1391 | CROSSING_JUMP_P (BB_END (e->src))(__extension__ ({ __typeof (((e->src)->il.x.rtl->end_ )) const _rtx = (((e->src)->il.x.rtl->end_)); if ((( enum rtx_code) (_rtx)->code) != JUMP_INSN) rtl_check_failed_flag ("CROSSING_JUMP_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1391, __FUNCTION__); _rtx; })->jump) = 0; |
1392 | } |
1393 | } |
1394 | } |
1395 | |
1396 | /* Called when block BB has been reassigned to the cold partition, |
1397 | because it is now dominated by another cold block, |
1398 | to ensure that the region crossing attributes are updated. */ |
1399 | |
1400 | static void |
1401 | fixup_new_cold_bb (basic_block bb) |
1402 | { |
1403 | edge e; |
1404 | edge_iterator ei; |
1405 | |
1406 | /* This is called when a hot bb is found to now be dominated |
1407 | by a cold bb and therefore needs to become cold. Therefore, |
1408 | its preds will no longer be region crossing. Any non-dominating |
1409 | preds that were previously hot would also have become cold |
1410 | in the caller for the same region. Any preds that were previously |
1411 | region-crossing will be adjusted in fixup_partition_crossing. */ |
1412 | FOR_EACH_EDGE (e, ei, bb->preds)for ((ei) = ei_start_1 (&((bb->preds))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
1413 | { |
1414 | fixup_partition_crossing (e); |
1415 | } |
1416 | |
1417 | /* Possibly need to make bb's successor edges region crossing, |
1418 | or remove stale region crossing. */ |
1419 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
1420 | { |
1421 | /* We can't have fall-through edges across partition boundaries. |
1422 | Note that force_nonfallthru will do any necessary partition |
1423 | boundary fixup by calling fixup_partition_crossing itself. */ |
1424 | if ((e->flags & EDGE_FALLTHRU) |
1425 | && BB_PARTITION (bb)((bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) != BB_PARTITION (e->dest)((e->dest)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION )) |
1426 | && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
1427 | force_nonfallthru (e); |
1428 | else |
1429 | fixup_partition_crossing (e); |
1430 | } |
1431 | } |
1432 | |
1433 | /* Attempt to change code to redirect edge E to TARGET. Don't do that on |
1434 | expense of adding new instructions or reordering basic blocks. |
1435 | |
1436 | Function can be also called with edge destination equivalent to the TARGET. |
1437 | Then it should try the simplifications and do nothing if none is possible. |
1438 | |
1439 | Return edge representing the branch if transformation succeeded. Return NULL |
1440 | on failure. |
1441 | We still return NULL in case E already destinated TARGET and we didn't |
1442 | managed to simplify instruction stream. */ |
1443 | |
1444 | static edge |
1445 | rtl_redirect_edge_and_branch (edge e, basic_block target) |
1446 | { |
1447 | edge ret; |
1448 | basic_block src = e->src; |
1449 | basic_block dest = e->dest; |
1450 | |
1451 | if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)) |
1452 | return NULLnullptr; |
1453 | |
1454 | if (dest == target) |
1455 | return e; |
1456 | |
1457 | if ((ret = try_redirect_by_replacing_jump (e, target, false)) != NULLnullptr) |
1458 | { |
1459 | df_set_bb_dirty (src); |
1460 | fixup_partition_crossing (ret); |
1461 | return ret; |
1462 | } |
1463 | |
1464 | ret = redirect_branch_edge (e, target); |
1465 | if (!ret) |
1466 | return NULLnullptr; |
1467 | |
1468 | df_set_bb_dirty (src); |
1469 | fixup_partition_crossing (ret); |
1470 | return ret; |
1471 | } |
1472 | |
1473 | /* Emit a barrier after BB, into the footer if we are in CFGLAYOUT mode. */ |
1474 | |
1475 | void |
1476 | emit_barrier_after_bb (basic_block bb) |
1477 | { |
1478 | rtx_barrier *barrier = emit_barrier_after (BB_END (bb)(bb)->il.x.rtl->end_); |
1479 | gcc_assert (current_ir_type () == IR_RTL_CFGRTL((void)(!(current_ir_type () == IR_RTL_CFGRTL || current_ir_type () == IR_RTL_CFGLAYOUT) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1480, __FUNCTION__), 0 : 0)) |
1480 | || current_ir_type () == IR_RTL_CFGLAYOUT)((void)(!(current_ir_type () == IR_RTL_CFGRTL || current_ir_type () == IR_RTL_CFGLAYOUT) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1480, __FUNCTION__), 0 : 0)); |
1481 | if (current_ir_type () == IR_RTL_CFGLAYOUT) |
1482 | { |
1483 | rtx_insn *insn = unlink_insn_chain (barrier, barrier); |
1484 | |
1485 | if (BB_FOOTER (bb)(bb)->il.x.rtl->footer_) |
1486 | { |
1487 | rtx_insn *footer_tail = BB_FOOTER (bb)(bb)->il.x.rtl->footer_; |
1488 | |
1489 | while (NEXT_INSN (footer_tail)) |
1490 | footer_tail = NEXT_INSN (footer_tail); |
1491 | if (!BARRIER_P (footer_tail)(((enum rtx_code) (footer_tail)->code) == BARRIER)) |
1492 | { |
1493 | SET_NEXT_INSN (footer_tail) = insn; |
1494 | SET_PREV_INSN (insn) = footer_tail; |
1495 | } |
1496 | } |
1497 | else |
1498 | BB_FOOTER (bb)(bb)->il.x.rtl->footer_ = insn; |
1499 | } |
1500 | } |
1501 | |
1502 | /* Like force_nonfallthru below, but additionally performs redirection |
1503 | Used by redirect_edge_and_branch_force. JUMP_LABEL is used only |
1504 | when redirecting to the EXIT_BLOCK, it is either ret_rtx or |
1505 | simple_return_rtx, indicating which kind of returnjump to create. |
1506 | It should be NULL otherwise. */ |
1507 | |
1508 | basic_block |
1509 | force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label) |
1510 | { |
1511 | basic_block jump_block, new_bb = NULLnullptr, src = e->src; |
1512 | rtx note; |
1513 | edge new_edge; |
1514 | int abnormal_edge_flags = 0; |
1515 | bool asm_goto_edge = false; |
1516 | int loc; |
1517 | |
1518 | /* In the case the last instruction is conditional jump to the next |
1519 | instruction, first redirect the jump itself and then continue |
1520 | by creating a basic block afterwards to redirect fallthru edge. */ |
1521 | if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) |
1522 | && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
1523 | && any_condjump_p (BB_END (e->src)(e->src)->il.x.rtl->end_) |
1524 | && JUMP_LABEL (BB_END (e->src))((((e->src)->il.x.rtl->end_)->u.fld[7]).rt_rtx) == BB_HEAD (e->dest)(e->dest)->il.x.head_) |
1525 | { |
1526 | rtx note; |
1527 | edge b = unchecked_make_edge (e->src, target, 0); |
1528 | bool redirected; |
1529 | |
1530 | redirected = redirect_jump (as_a <rtx_jump_insn *> (BB_END (e->src)(e->src)->il.x.rtl->end_), |
1531 | block_label (target), 0); |
1532 | gcc_assert (redirected)((void)(!(redirected) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1532, __FUNCTION__), 0 : 0)); |
1533 | |
1534 | note = find_reg_note (BB_END (e->src)(e->src)->il.x.rtl->end_, REG_BR_PROB, NULL_RTX(rtx) 0); |
1535 | if (note) |
1536 | { |
1537 | int prob = XINT (note, 0)(((note)->u.fld[0]).rt_int); |
1538 | |
1539 | b->probability = profile_probability::from_reg_br_prob_note (prob); |
1540 | e->probability -= e->probability; |
1541 | } |
1542 | } |
1543 | |
1544 | if (e->flags & EDGE_ABNORMAL) |
1545 | { |
1546 | /* Irritating special case - fallthru edge to the same block as abnormal |
1547 | edge. |
1548 | We can't redirect abnormal edge, but we still can split the fallthru |
1549 | one and create separate abnormal edge to original destination. |
1550 | This allows bb-reorder to make such edge non-fallthru. */ |
1551 | gcc_assert (e->dest == target)((void)(!(e->dest == target) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1551, __FUNCTION__), 0 : 0)); |
1552 | abnormal_edge_flags = e->flags & ~EDGE_FALLTHRU; |
1553 | e->flags &= EDGE_FALLTHRU; |
1554 | } |
1555 | else |
1556 | { |
1557 | gcc_assert (e->flags & EDGE_FALLTHRU)((void)(!(e->flags & EDGE_FALLTHRU) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1557, __FUNCTION__), 0 : 0)); |
1558 | if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)) |
1559 | { |
1560 | /* We can't redirect the entry block. Create an empty block |
1561 | at the start of the function which we use to add the new |
1562 | jump. */ |
1563 | edge tmp; |
1564 | edge_iterator ei; |
1565 | bool found = false; |
1566 | |
1567 | basic_block bb = create_basic_block (BB_HEAD (e->dest)(e->dest)->il.x.head_, NULLnullptr, |
1568 | ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)); |
1569 | bb->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)->count; |
1570 | |
1571 | /* Make sure new block ends up in correct hot/cold section. */ |
1572 | BB_COPY_PARTITION (bb, e->dest)do { basic_block bb_ = (bb); bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) | (((e->dest)-> flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)))); } while ( 0); |
1573 | |
1574 | /* Change the existing edge's source to be the new block, and add |
1575 | a new edge from the entry block to the new block. */ |
1576 | e->src = bb; |
1577 | for (ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)ei_start_1 (&((((cfun + 0))->cfg->x_entry_block_ptr )->succs)); |
1578 | (tmp = ei_safe_edge (ei)); ) |
1579 | { |
1580 | if (tmp == e) |
1581 | { |
1582 | ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)->succs->unordered_remove (ei.index); |
1583 | found = true; |
1584 | break; |
1585 | } |
1586 | else |
1587 | ei_next (&ei); |
1588 | } |
1589 | |
1590 | gcc_assert (found)((void)(!(found) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1590, __FUNCTION__), 0 : 0)); |
1591 | |
1592 | vec_safe_push (bb->succs, e); |
1593 | make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr), bb, |
1594 | EDGE_FALLTHRU); |
1595 | } |
1596 | } |
1597 | |
1598 | /* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs |
1599 | don't point to the target or fallthru label. */ |
1600 | if (JUMP_P (BB_END (e->src))(((enum rtx_code) ((e->src)->il.x.rtl->end_)->code ) == JUMP_INSN) |
1601 | && target != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
1602 | && (e->flags & EDGE_FALLTHRU) |
1603 | && (note = extract_asm_operands (PATTERN (BB_END (e->src)(e->src)->il.x.rtl->end_)))) |
1604 | { |
1605 | int i, n = ASM_OPERANDS_LABEL_LENGTH (note)(((((note)->u.fld[5]).rt_rtvec))->num_elem); |
1606 | bool adjust_jump_target = false; |
1607 | |
1608 | for (i = 0; i < n; ++i) |
1609 | { |
1610 | if (XEXP (ASM_OPERANDS_LABEL (note, i), 0)((((((((note)->u.fld[5]).rt_rtvec))->elem[i]))->u.fld [0]).rt_rtx) == BB_HEAD (e->dest)(e->dest)->il.x.head_) |
1611 | { |
1612 | LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))(((((((((((note)->u.fld[5]).rt_rtvec))->elem[i]))->u .fld[0]).rt_rtx))->u.fld[4]).rt_int)--; |
1613 | XEXP (ASM_OPERANDS_LABEL (note, i), 0)((((((((note)->u.fld[5]).rt_rtvec))->elem[i]))->u.fld [0]).rt_rtx) = block_label (target); |
1614 | LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))(((((((((((note)->u.fld[5]).rt_rtvec))->elem[i]))->u .fld[0]).rt_rtx))->u.fld[4]).rt_int)++; |
1615 | adjust_jump_target = true; |
1616 | } |
1617 | if (XEXP (ASM_OPERANDS_LABEL (note, i), 0)((((((((note)->u.fld[5]).rt_rtvec))->elem[i]))->u.fld [0]).rt_rtx) == BB_HEAD (target)(target)->il.x.head_) |
1618 | asm_goto_edge = true; |
1619 | } |
1620 | if (adjust_jump_target) |
1621 | { |
1622 | rtx_insn *insn = BB_END (e->src)(e->src)->il.x.rtl->end_; |
1623 | rtx note; |
1624 | rtx_insn *old_label = BB_HEAD (e->dest)(e->dest)->il.x.head_; |
1625 | rtx_insn *new_label = BB_HEAD (target)(target)->il.x.head_; |
1626 | |
1627 | if (JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx) == old_label) |
1628 | { |
1629 | JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx) = new_label; |
1630 | note = find_reg_note (insn, REG_LABEL_TARGET, new_label); |
1631 | if (note) |
1632 | remove_note (insn, note); |
1633 | } |
1634 | else |
1635 | { |
1636 | note = find_reg_note (insn, REG_LABEL_TARGET, old_label); |
1637 | if (note) |
1638 | remove_note (insn, note); |
1639 | if (JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx) != new_label |
1640 | && !find_reg_note (insn, REG_LABEL_TARGET, new_label)) |
1641 | add_reg_note (insn, REG_LABEL_TARGET, new_label); |
1642 | } |
1643 | while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label)) |
1644 | != NULL_RTX(rtx) 0) |
1645 | XEXP (note, 0)(((note)->u.fld[0]).rt_rtx) = new_label; |
1646 | } |
1647 | } |
1648 | |
1649 | if (EDGE_COUNT (e->src->succs)vec_safe_length (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge) |
1650 | { |
1651 | rtx_insn *new_head; |
1652 | profile_count count = e->count (); |
1653 | profile_probability probability = e->probability; |
1654 | /* Create the new structures. */ |
1655 | |
1656 | /* If the old block ended with a tablejump, skip its table |
1657 | by searching forward from there. Otherwise start searching |
1658 | forward from the last instruction of the old block. */ |
1659 | rtx_jump_table_data *table; |
1660 | if (tablejump_p (BB_END (e->src)(e->src)->il.x.rtl->end_, NULLnullptr, &table)) |
1661 | new_head = table; |
1662 | else |
1663 | new_head = BB_END (e->src)(e->src)->il.x.rtl->end_; |
1664 | new_head = NEXT_INSN (new_head); |
1665 | |
1666 | jump_block = create_basic_block (new_head, NULLnullptr, e->src); |
1667 | jump_block->count = count; |
1668 | |
1669 | /* Make sure new block ends up in correct hot/cold section. */ |
1670 | |
1671 | BB_COPY_PARTITION (jump_block, e->src)do { basic_block bb_ = (jump_block); bb_->flags = ((bb_-> flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) | (((e-> src)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)))); } while (0); |
1672 | |
1673 | /* Wire edge in. */ |
1674 | new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU); |
1675 | new_edge->probability = probability; |
1676 | |
1677 | /* Redirect old edge. */ |
1678 | redirect_edge_pred (e, jump_block); |
1679 | e->probability = profile_probability::always (); |
1680 | |
1681 | /* If e->src was previously region crossing, it no longer is |
1682 | and the reg crossing note should be removed. */ |
1683 | fixup_partition_crossing (new_edge); |
1684 | |
1685 | /* If asm goto has any label refs to target's label, |
1686 | add also edge from asm goto bb to target. */ |
1687 | if (asm_goto_edge) |
1688 | { |
1689 | new_edge->probability /= 2; |
1690 | jump_block->count /= 2; |
1691 | edge new_edge2 = make_edge (new_edge->src, target, |
1692 | e->flags & ~EDGE_FALLTHRU); |
1693 | new_edge2->probability = probability - new_edge->probability; |
1694 | } |
1695 | |
1696 | new_bb = jump_block; |
1697 | } |
1698 | else |
1699 | jump_block = e->src; |
1700 | |
1701 | loc = e->goto_locus; |
1702 | e->flags &= ~EDGE_FALLTHRU; |
1703 | if (target == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
1704 | { |
1705 | if (jump_label == ret_rtx) |
1706 | emit_jump_insn_after_setloc (targetm.gen_return (), |
1707 | BB_END (jump_block)(jump_block)->il.x.rtl->end_, loc); |
1708 | else |
1709 | { |
1710 | gcc_assert (jump_label == simple_return_rtx)((void)(!(jump_label == simple_return_rtx) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1710, __FUNCTION__), 0 : 0)); |
1711 | emit_jump_insn_after_setloc (targetm.gen_simple_return (), |
1712 | BB_END (jump_block)(jump_block)->il.x.rtl->end_, loc); |
1713 | } |
1714 | set_return_jump_label (BB_END (jump_block)(jump_block)->il.x.rtl->end_); |
1715 | } |
1716 | else |
1717 | { |
1718 | rtx_code_label *label = block_label (target); |
1719 | emit_jump_insn_after_setloc (targetm.gen_jump (label), |
1720 | BB_END (jump_block)(jump_block)->il.x.rtl->end_, loc); |
1721 | JUMP_LABEL (BB_END (jump_block))((((jump_block)->il.x.rtl->end_)->u.fld[7]).rt_rtx) = label; |
1722 | LABEL_NUSES (label)(((label)->u.fld[4]).rt_int)++; |
1723 | } |
1724 | |
1725 | /* We might be in cfg layout mode, and if so, the following routine will |
1726 | insert the barrier correctly. */ |
1727 | emit_barrier_after_bb (jump_block); |
1728 | redirect_edge_succ_nodup (e, target); |
1729 | |
1730 | if (abnormal_edge_flags) |
1731 | make_edge (src, target, abnormal_edge_flags); |
1732 | |
1733 | df_mark_solutions_dirty (); |
1734 | fixup_partition_crossing (e); |
1735 | return new_bb; |
1736 | } |
1737 | |
1738 | /* Edge E is assumed to be fallthru edge. Emit needed jump instruction |
1739 | (and possibly create new basic block) to make edge non-fallthru. |
1740 | Return newly created BB or NULL if none. */ |
1741 | |
1742 | static basic_block |
1743 | rtl_force_nonfallthru (edge e) |
1744 | { |
1745 | return force_nonfallthru_and_redirect (e, e->dest, NULL_RTX(rtx) 0); |
1746 | } |
1747 | |
1748 | /* Redirect edge even at the expense of creating new jump insn or |
1749 | basic block. Return new basic block if created, NULL otherwise. |
1750 | Conversion must be possible. */ |
1751 | |
1752 | static basic_block |
1753 | rtl_redirect_edge_and_branch_force (edge e, basic_block target) |
1754 | { |
1755 | if (redirect_edge_and_branch (e, target) |
1756 | || e->dest == target) |
1757 | return NULLnullptr; |
1758 | |
1759 | /* In case the edge redirection failed, try to force it to be non-fallthru |
1760 | and redirect newly created simplejump. */ |
1761 | df_set_bb_dirty (e->src); |
1762 | return force_nonfallthru_and_redirect (e, target, NULL_RTX(rtx) 0); |
1763 | } |
1764 | |
1765 | /* The given edge should potentially be a fallthru edge. If that is in |
1766 | fact true, delete the jump and barriers that are in the way. */ |
1767 | |
1768 | static void |
1769 | rtl_tidy_fallthru_edge (edge e) |
1770 | { |
1771 | rtx_insn *q; |
1772 | basic_block b = e->src, c = b->next_bb; |
1773 | |
1774 | /* ??? In a late-running flow pass, other folks may have deleted basic |
1775 | blocks by nopping out blocks, leaving multiple BARRIERs between here |
1776 | and the target label. They ought to be chastised and fixed. |
1777 | |
1778 | We can also wind up with a sequence of undeletable labels between |
1779 | one block and the next. |
1780 | |
1781 | So search through a sequence of barriers, labels, and notes for |
1782 | the head of block C and assert that we really do fall through. */ |
1783 | |
1784 | for (q = NEXT_INSN (BB_END (b)(b)->il.x.rtl->end_); q != BB_HEAD (c)(c)->il.x.head_; q = NEXT_INSN (q)) |
1785 | if (NONDEBUG_INSN_P (q)((((enum rtx_code) (q)->code) == INSN) || (((enum rtx_code ) (q)->code) == JUMP_INSN) || (((enum rtx_code) (q)->code ) == CALL_INSN))) |
1786 | return; |
1787 | |
1788 | /* Remove what will soon cease being the jump insn from the source block. |
1789 | If block B consisted only of this single jump, turn it into a deleted |
1790 | note. */ |
1791 | q = BB_END (b)(b)->il.x.rtl->end_; |
1792 | if (JUMP_P (q)(((enum rtx_code) (q)->code) == JUMP_INSN) |
1793 | && onlyjump_p (q) |
1794 | && (any_uncondjump_p (q) |
1795 | || single_succ_p (b))) |
1796 | { |
1797 | rtx_insn *label; |
1798 | rtx_jump_table_data *table; |
1799 | |
1800 | if (tablejump_p (q, &label, &table)) |
1801 | { |
1802 | /* The label is likely mentioned in some instruction before |
1803 | the tablejump and might not be DCEd, so turn it into |
1804 | a note instead and move before the tablejump that is going to |
1805 | be deleted. */ |
1806 | const char *name = LABEL_NAME (label)(((label)->u.fld[6]).rt_str); |
1807 | PUT_CODE (label, NOTE)((label)->code = (NOTE)); |
1808 | NOTE_KIND (label)(((label)->u.fld[4]).rt_int) = NOTE_INSN_DELETED_LABEL; |
1809 | NOTE_DELETED_LABEL_NAME (label)(((label)->u.fld[3]).rt_str) = name; |
1810 | reorder_insns (label, label, PREV_INSN (q)); |
1811 | delete_insn (table); |
1812 | } |
1813 | |
1814 | q = PREV_INSN (q); |
1815 | } |
1816 | /* Unconditional jumps with side-effects (i.e. which we can't just delete |
1817 | together with the barrier) should never have a fallthru edge. */ |
1818 | else if (JUMP_P (q)(((enum rtx_code) (q)->code) == JUMP_INSN) && any_uncondjump_p (q)) |
1819 | return; |
1820 | |
1821 | /* Selectively unlink the sequence. */ |
1822 | if (q != PREV_INSN (BB_HEAD (c)(c)->il.x.head_)) |
1823 | delete_insn_chain (NEXT_INSN (q), PREV_INSN (BB_HEAD (c)(c)->il.x.head_), false); |
1824 | |
1825 | e->flags |= EDGE_FALLTHRU; |
1826 | } |
1827 | |
1828 | /* Should move basic block BB after basic block AFTER. NIY. */ |
1829 | |
1830 | static bool |
1831 | rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED__attribute__ ((__unused__)), |
1832 | basic_block after ATTRIBUTE_UNUSED__attribute__ ((__unused__))) |
1833 | { |
1834 | return false; |
1835 | } |
1836 | |
1837 | /* Locate the last bb in the same partition as START_BB. */ |
1838 | |
1839 | static basic_block |
1840 | last_bb_in_partition (basic_block start_bb) |
1841 | { |
1842 | basic_block bb; |
1843 | FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)for (bb = start_bb; bb != (((cfun + 0))->cfg->x_exit_block_ptr ); bb = bb->next_bb) |
1844 | { |
1845 | if (BB_PARTITION (start_bb)((start_bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION )) != BB_PARTITION (bb->next_bb)((bb->next_bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION ))) |
1846 | return bb; |
1847 | } |
1848 | /* Return bb before the exit block. */ |
1849 | return bb->prev_bb; |
1850 | } |
1851 | |
1852 | /* Split a (typically critical) edge. Return the new block. |
1853 | The edge must not be abnormal. |
1854 | |
1855 | ??? The code generally expects to be called on critical edges. |
1856 | The case of a block ending in an unconditional jump to a |
1857 | block with multiple predecessors is not handled optimally. */ |
1858 | |
1859 | static basic_block |
1860 | rtl_split_edge (edge edge_in) |
1861 | { |
1862 | basic_block bb, new_bb; |
1863 | rtx_insn *before; |
1864 | |
1865 | /* Abnormal edges cannot be split. */ |
1866 | gcc_assert (!(edge_in->flags & EDGE_ABNORMAL))((void)(!(!(edge_in->flags & EDGE_ABNORMAL)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1866, __FUNCTION__), 0 : 0)); |
1867 | |
1868 | /* We are going to place the new block in front of edge destination. |
1869 | Avoid existence of fallthru predecessors. */ |
1870 | if ((edge_in->flags & EDGE_FALLTHRU) == 0) |
1871 | { |
1872 | edge e = find_fallthru_edge (edge_in->dest->preds); |
1873 | |
1874 | if (e) |
1875 | force_nonfallthru (e); |
1876 | } |
1877 | |
1878 | /* Create the basic block note. */ |
1879 | if (edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
1880 | before = BB_HEAD (edge_in->dest)(edge_in->dest)->il.x.head_; |
1881 | else |
1882 | before = NULLnullptr; |
1883 | |
1884 | /* If this is a fall through edge to the exit block, the blocks might be |
1885 | not adjacent, and the right place is after the source. */ |
1886 | if ((edge_in->flags & EDGE_FALLTHRU) |
1887 | && edge_in->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
1888 | { |
1889 | before = NEXT_INSN (BB_END (edge_in->src)(edge_in->src)->il.x.rtl->end_); |
1890 | bb = create_basic_block (before, NULLnullptr, edge_in->src); |
1891 | BB_COPY_PARTITION (bb, edge_in->src)do { basic_block bb_ = (bb); bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) | (((edge_in->src) ->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)))); } while (0); |
1892 | } |
1893 | else |
1894 | { |
1895 | if (edge_in->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)) |
1896 | { |
1897 | bb = create_basic_block (before, NULLnullptr, edge_in->dest->prev_bb); |
1898 | BB_COPY_PARTITION (bb, edge_in->dest)do { basic_block bb_ = (bb); bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) | (((edge_in->dest )->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)))); } while (0); |
1899 | } |
1900 | else |
1901 | { |
1902 | basic_block after = edge_in->dest->prev_bb; |
1903 | /* If this is post-bb reordering, and the edge crosses a partition |
1904 | boundary, the new block needs to be inserted in the bb chain |
1905 | at the end of the src partition (since we put the new bb into |
1906 | that partition, see below). Otherwise we may end up creating |
1907 | an extra partition crossing in the chain, which is illegal. |
1908 | It can't go after the src, because src may have a fall-through |
1909 | to a different block. */ |
1910 | if (crtl(&x_rtl)->bb_reorder_complete |
1911 | && (edge_in->flags & EDGE_CROSSING)) |
1912 | { |
1913 | after = last_bb_in_partition (edge_in->src); |
1914 | before = get_last_bb_insn (after); |
1915 | /* The instruction following the last bb in partition should |
1916 | be a barrier, since it cannot end in a fall-through. */ |
1917 | gcc_checking_assert (BARRIER_P (before))((void)(!((((enum rtx_code) (before)->code) == BARRIER)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1917, __FUNCTION__), 0 : 0)); |
1918 | before = NEXT_INSN (before); |
1919 | } |
1920 | bb = create_basic_block (before, NULLnullptr, after); |
1921 | /* Put the split bb into the src partition, to avoid creating |
1922 | a situation where a cold bb dominates a hot bb, in the case |
1923 | where src is cold and dest is hot. The src will dominate |
1924 | the new bb (whereas it might not have dominated dest). */ |
1925 | BB_COPY_PARTITION (bb, edge_in->src)do { basic_block bb_ = (bb); bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) | (((edge_in->src) ->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)))); } while (0); |
1926 | } |
1927 | } |
1928 | |
1929 | make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU); |
1930 | |
1931 | /* Can't allow a region crossing edge to be fallthrough. */ |
1932 | if (BB_PARTITION (bb)((bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) != BB_PARTITION (edge_in->dest)((edge_in->dest)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION )) |
1933 | && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
1934 | { |
1935 | new_bb = force_nonfallthru (single_succ_edge (bb)); |
1936 | gcc_assert (!new_bb)((void)(!(!new_bb) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1936, __FUNCTION__), 0 : 0)); |
1937 | } |
1938 | |
1939 | /* For non-fallthru edges, we must adjust the predecessor's |
1940 | jump instruction to target our new block. */ |
1941 | if ((edge_in->flags & EDGE_FALLTHRU) == 0) |
1942 | { |
1943 | edge redirected = redirect_edge_and_branch (edge_in, bb); |
1944 | gcc_assert (redirected)((void)(!(redirected) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1944, __FUNCTION__), 0 : 0)); |
1945 | } |
1946 | else |
1947 | { |
1948 | if (edge_in->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)) |
1949 | { |
1950 | /* For asm goto even splitting of fallthru edge might |
1951 | need insn patching, as other labels might point to the |
1952 | old label. */ |
1953 | rtx_insn *last = BB_END (edge_in->src)(edge_in->src)->il.x.rtl->end_; |
1954 | if (last |
1955 | && JUMP_P (last)(((enum rtx_code) (last)->code) == JUMP_INSN) |
1956 | && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
1957 | && (extract_asm_operands (PATTERN (last)) |
1958 | || JUMP_LABEL (last)(((last)->u.fld[7]).rt_rtx) == before) |
1959 | && patch_jump_insn (last, before, bb)) |
1960 | df_set_bb_dirty (edge_in->src); |
1961 | } |
1962 | redirect_edge_succ (edge_in, bb); |
1963 | } |
1964 | |
1965 | return bb; |
1966 | } |
1967 | |
1968 | /* Queue instructions for insertion on an edge between two basic blocks. |
1969 | The new instructions and basic blocks (if any) will not appear in the |
1970 | CFG until commit_edge_insertions is called. */ |
1971 | |
1972 | void |
1973 | insert_insn_on_edge (rtx pattern, edge e) |
1974 | { |
1975 | /* We cannot insert instructions on an abnormal critical edge. |
1976 | It will be easier to find the culprit if we die now. */ |
1977 | gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)))((void)(!(!((e->flags & EDGE_ABNORMAL) && (vec_safe_length ((e)->src->succs) >= 2 && vec_safe_length ( (e)->dest->preds) >= 2))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 1977, __FUNCTION__), 0 : 0)); |
1978 | |
1979 | if (e->insns.r == NULL_RTX(rtx) 0) |
1980 | start_sequence (); |
1981 | else |
1982 | push_to_sequence (e->insns.r); |
1983 | |
1984 | emit_insn (pattern); |
1985 | |
1986 | e->insns.r = get_insns (); |
1987 | end_sequence (); |
1988 | } |
1989 | |
1990 | /* Update the CFG for the instructions queued on edge E. */ |
1991 | |
1992 | void |
1993 | commit_one_edge_insertion (edge e) |
1994 | { |
1995 | rtx_insn *before = NULLnullptr, *after = NULLnullptr, *insns, *tmp, *last; |
1996 | basic_block bb; |
1997 | |
1998 | /* Pull the insns off the edge now since the edge might go away. */ |
1999 | insns = e->insns.r; |
2000 | e->insns.r = NULLnullptr; |
2001 | |
2002 | /* Figure out where to put these insns. If the destination has |
2003 | one predecessor, insert there. Except for the exit block. */ |
2004 | if (single_pred_p (e->dest) && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
2005 | { |
2006 | bb = e->dest; |
2007 | |
2008 | /* Get the location correct wrt a code label, and "nice" wrt |
2009 | a basic block note, and before everything else. */ |
2010 | tmp = BB_HEAD (bb)(bb)->il.x.head_; |
2011 | if (LABEL_P (tmp)(((enum rtx_code) (tmp)->code) == CODE_LABEL)) |
2012 | tmp = NEXT_INSN (tmp); |
2013 | if (NOTE_INSN_BASIC_BLOCK_P (tmp)((((enum rtx_code) (tmp)->code) == NOTE) && (((tmp )->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) |
2014 | tmp = NEXT_INSN (tmp); |
2015 | if (tmp == BB_HEAD (bb)(bb)->il.x.head_) |
2016 | before = tmp; |
2017 | else if (tmp) |
2018 | after = PREV_INSN (tmp); |
2019 | else |
2020 | after = get_last_insn (); |
2021 | } |
2022 | |
2023 | /* If the source has one successor and the edge is not abnormal, |
2024 | insert there. Except for the entry block. |
2025 | Don't do this if the predecessor ends in a jump other than |
2026 | unconditional simple jump. E.g. for asm goto that points all |
2027 | its labels at the fallthru basic block, we can't insert instructions |
2028 | before the asm goto, as the asm goto can have various of side effects, |
2029 | and can't emit instructions after the asm goto, as it must end |
2030 | the basic block. */ |
2031 | else if ((e->flags & EDGE_ABNORMAL) == 0 |
2032 | && single_succ_p (e->src) |
2033 | && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) |
2034 | && (!JUMP_P (BB_END (e->src))(((enum rtx_code) ((e->src)->il.x.rtl->end_)->code ) == JUMP_INSN) |
2035 | || simplejump_p (BB_END (e->src)(e->src)->il.x.rtl->end_))) |
2036 | { |
2037 | bb = e->src; |
2038 | |
2039 | /* It is possible to have a non-simple jump here. Consider a target |
2040 | where some forms of unconditional jumps clobber a register. This |
2041 | happens on the fr30 for example. |
2042 | |
2043 | We know this block has a single successor, so we can just emit |
2044 | the queued insns before the jump. */ |
2045 | if (JUMP_P (BB_END (bb))(((enum rtx_code) ((bb)->il.x.rtl->end_)->code) == JUMP_INSN )) |
2046 | before = BB_END (bb)(bb)->il.x.rtl->end_; |
2047 | else |
2048 | { |
2049 | /* We'd better be fallthru, or we've lost track of what's what. */ |
2050 | gcc_assert (e->flags & EDGE_FALLTHRU)((void)(!(e->flags & EDGE_FALLTHRU) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 2050, __FUNCTION__), 0 : 0)); |
2051 | |
2052 | after = BB_END (bb)(bb)->il.x.rtl->end_; |
2053 | } |
2054 | } |
2055 | |
2056 | /* Otherwise we must split the edge. */ |
2057 | else |
2058 | { |
2059 | bb = split_edge (e); |
2060 | |
2061 | /* If E crossed a partition boundary, we needed to make bb end in |
2062 | a region-crossing jump, even though it was originally fallthru. */ |
2063 | if (JUMP_P (BB_END (bb))(((enum rtx_code) ((bb)->il.x.rtl->end_)->code) == JUMP_INSN )) |
2064 | before = BB_END (bb)(bb)->il.x.rtl->end_; |
2065 | else |
2066 | after = BB_END (bb)(bb)->il.x.rtl->end_; |
2067 | } |
2068 | |
2069 | /* Now that we've found the spot, do the insertion. */ |
2070 | if (before) |
2071 | { |
2072 | emit_insn_before_noloc (insns, before, bb); |
2073 | last = prev_nonnote_insn (before); |
2074 | } |
2075 | else |
2076 | last = emit_insn_after_noloc (insns, after, bb); |
2077 | |
2078 | if (returnjump_p (last)) |
2079 | { |
2080 | /* ??? Remove all outgoing edges from BB and add one for EXIT. |
2081 | This is not currently a problem because this only happens |
2082 | for the (single) epilogue, which already has a fallthru edge |
2083 | to EXIT. */ |
2084 | |
2085 | e = single_succ_edge (bb); |
2086 | gcc_assert (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)((void)(!(e->dest == (((cfun + 0))->cfg->x_exit_block_ptr ) && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 2087, __FUNCTION__), 0 : 0)) |
2087 | && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU))((void)(!(e->dest == (((cfun + 0))->cfg->x_exit_block_ptr ) && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 2087, __FUNCTION__), 0 : 0)); |
2088 | |
2089 | e->flags &= ~EDGE_FALLTHRU; |
2090 | emit_barrier_after (last); |
2091 | |
2092 | if (before) |
2093 | delete_insn (before); |
2094 | } |
2095 | else |
2096 | gcc_assert (!JUMP_P (last))((void)(!(!(((enum rtx_code) (last)->code) == JUMP_INSN)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 2096, __FUNCTION__), 0 : 0)); |
2097 | } |
2098 | |
2099 | /* Update the CFG for all queued instructions. */ |
2100 | |
2101 | void |
2102 | commit_edge_insertions (void) |
2103 | { |
2104 | basic_block bb; |
2105 | |
2106 | /* Optimization passes that invoke this routine can cause hot blocks |
2107 | previously reached by both hot and cold blocks to become dominated only |
2108 | by cold blocks. This will cause the verification below to fail, |
2109 | and lead to now cold code in the hot section. In some cases this |
2110 | may only be visible after newly unreachable blocks are deleted, |
2111 | which will be done by fixup_partitions. */ |
2112 | fixup_partitions (); |
2113 | |
2114 | if (!currently_expanding_to_rtl) |
2115 | checking_verify_flow_info (); |
2116 | |
2117 | FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),for (bb = (((cfun + 0))->cfg->x_entry_block_ptr); bb != (((cfun + 0))->cfg->x_exit_block_ptr); bb = bb->next_bb ) |
2118 | EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)for (bb = (((cfun + 0))->cfg->x_entry_block_ptr); bb != (((cfun + 0))->cfg->x_exit_block_ptr); bb = bb->next_bb ) |
2119 | { |
2120 | edge e; |
2121 | edge_iterator ei; |
2122 | |
2123 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
2124 | if (e->insns.r) |
2125 | { |
2126 | if (currently_expanding_to_rtl) |
2127 | rebuild_jump_labels_chain (e->insns.r); |
2128 | commit_one_edge_insertion (e); |
2129 | } |
2130 | } |
2131 | } |
2132 | |
2133 | |
2134 | /* Print out RTL-specific basic block information (live information |
2135 | at start and end with TDF_DETAILS). FLAGS are the TDF_* masks |
2136 | documented in dumpfile.h. */ |
2137 | |
2138 | static void |
2139 | rtl_dump_bb (FILE *outf, basic_block bb, int indent, dump_flags_t flags) |
2140 | { |
2141 | char *s_indent; |
2142 | |
2143 | s_indent = (char *) alloca ((size_t) indent + 1)__builtin_alloca((size_t) indent + 1); |
2144 | memset (s_indent, ' ', (size_t) indent); |
2145 | s_indent[indent] = '\0'; |
2146 | |
2147 | if (df && (flags & TDF_DETAILS)) |
2148 | { |
2149 | df_dump_top (bb, outf); |
2150 | putc ('\n', outf); |
2151 | } |
2152 | |
2153 | if (bb->index != ENTRY_BLOCK(0) && bb->index != EXIT_BLOCK(1) |
2154 | && rtl_bb_info_initialized_p (bb)) |
2155 | { |
2156 | rtx_insn *last = BB_END (bb)(bb)->il.x.rtl->end_; |
2157 | if (last) |
2158 | last = NEXT_INSN (last); |
2159 | for (rtx_insn *insn = BB_HEAD (bb)(bb)->il.x.head_; insn != last; insn = NEXT_INSN (insn)) |
2160 | { |
2161 | if (flags & TDF_DETAILS) |
2162 | df_dump_insn_top (insn, outf); |
2163 | if (! (flags & TDF_SLIM)) |
2164 | print_rtl_single (outf, insn); |
2165 | else |
2166 | dump_insn_slim (outf, insn); |
2167 | if (flags & TDF_DETAILS) |
2168 | df_dump_insn_bottom (insn, outf); |
2169 | } |
2170 | } |
2171 | |
2172 | if (df && (flags & TDF_DETAILS)) |
2173 | { |
2174 | df_dump_bottom (bb, outf); |
2175 | putc ('\n', outf); |
2176 | } |
2177 | |
2178 | } |
2179 | |
2180 | /* Like dump_function_to_file, but for RTL. Print out dataflow information |
2181 | for the start of each basic block. FLAGS are the TDF_* masks documented |
2182 | in dumpfile.h. */ |
2183 | |
2184 | void |
2185 | print_rtl_with_bb (FILE *outf, const rtx_insn *rtx_first, dump_flags_t flags) |
2186 | { |
2187 | const rtx_insn *tmp_rtx; |
2188 | if (rtx_first == 0) |
2189 | fprintf (outf, "(nil)\n"); |
2190 | else |
2191 | { |
2192 | enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB }; |
2193 | int max_uid = get_max_uid (); |
2194 | basic_block *start = XCNEWVEC (basic_block, max_uid)((basic_block *) xcalloc ((max_uid), sizeof (basic_block))); |
2195 | basic_block *end = XCNEWVEC (basic_block, max_uid)((basic_block *) xcalloc ((max_uid), sizeof (basic_block))); |
2196 | enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid)((enum bb_state *) xcalloc ((max_uid), sizeof (enum bb_state) )); |
2197 | basic_block bb; |
2198 | |
2199 | /* After freeing the CFG, we still have BLOCK_FOR_INSN set on most |
2200 | insns, but the CFG is not maintained so the basic block info |
2201 | is not reliable. Therefore it's omitted from the dumps. */ |
2202 | if (! (cfun(cfun + 0)->curr_properties & PROP_cfg(1 << 3))) |
2203 | flags &= ~TDF_BLOCKS; |
2204 | |
2205 | if (df) |
2206 | df_dump_start (outf); |
2207 | |
2208 | if (cfun(cfun + 0)->curr_properties & PROP_cfg(1 << 3)) |
2209 | { |
2210 | FOR_EACH_BB_REVERSE_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_exit_block_ptr->prev_bb ; bb != ((cfun + 0))->cfg->x_entry_block_ptr; bb = bb-> prev_bb) |
2211 | { |
2212 | rtx_insn *x; |
2213 | |
2214 | start[INSN_UID (BB_HEAD (bb)(bb)->il.x.head_)] = bb; |
2215 | end[INSN_UID (BB_END (bb)(bb)->il.x.rtl->end_)] = bb; |
2216 | if (flags & TDF_BLOCKS) |
2217 | { |
2218 | for (x = BB_HEAD (bb)(bb)->il.x.head_; x != NULL_RTX(rtx) 0; x = NEXT_INSN (x)) |
2219 | { |
2220 | enum bb_state state = IN_MULTIPLE_BB; |
2221 | |
2222 | if (in_bb_p[INSN_UID (x)] == NOT_IN_BB) |
2223 | state = IN_ONE_BB; |
2224 | in_bb_p[INSN_UID (x)] = state; |
2225 | |
2226 | if (x == BB_END (bb)(bb)->il.x.rtl->end_) |
2227 | break; |
2228 | } |
2229 | } |
2230 | } |
2231 | } |
2232 | |
2233 | for (tmp_rtx = rtx_first; tmp_rtx != NULLnullptr; tmp_rtx = NEXT_INSN (tmp_rtx)) |
2234 | { |
2235 | if (flags & TDF_BLOCKS) |
2236 | { |
2237 | bb = start[INSN_UID (tmp_rtx)]; |
2238 | if (bb != NULLnullptr) |
2239 | { |
2240 | dump_bb_info (outf, bb, 0, dump_flags, true, false); |
2241 | if (df && (flags & TDF_DETAILS)) |
2242 | df_dump_top (bb, outf); |
2243 | } |
2244 | |
2245 | if (in_bb_p[INSN_UID (tmp_rtx)] == NOT_IN_BB |
2246 | && !NOTE_P (tmp_rtx)(((enum rtx_code) (tmp_rtx)->code) == NOTE) |
2247 | && !BARRIER_P (tmp_rtx)(((enum rtx_code) (tmp_rtx)->code) == BARRIER)) |
2248 | fprintf (outf, ";; Insn is not within a basic block\n"); |
2249 | else if (in_bb_p[INSN_UID (tmp_rtx)] == IN_MULTIPLE_BB) |
2250 | fprintf (outf, ";; Insn is in multiple basic blocks\n"); |
2251 | } |
2252 | |
2253 | if (flags & TDF_DETAILS) |
2254 | df_dump_insn_top (tmp_rtx, outf); |
2255 | if (! (flags & TDF_SLIM)) |
2256 | print_rtl_single (outf, tmp_rtx); |
2257 | else |
2258 | dump_insn_slim (outf, tmp_rtx); |
2259 | if (flags & TDF_DETAILS) |
2260 | df_dump_insn_bottom (tmp_rtx, outf); |
2261 | |
2262 | bb = end[INSN_UID (tmp_rtx)]; |
2263 | if (bb != NULLnullptr) |
2264 | { |
2265 | if (flags & TDF_BLOCKS) |
2266 | { |
2267 | dump_bb_info (outf, bb, 0, dump_flags, false, true); |
2268 | if (df && (flags & TDF_DETAILS)) |
2269 | df_dump_bottom (bb, outf); |
2270 | putc ('\n', outf); |
2271 | } |
2272 | /* Emit a hint if the fallthrough target of current basic block |
2273 | isn't the one placed right next. */ |
2274 | else if (EDGE_COUNT (bb->succs)vec_safe_length (bb->succs) > 0) |
2275 | { |
2276 | gcc_assert (BB_END (bb) == tmp_rtx)((void)(!((bb)->il.x.rtl->end_ == tmp_rtx) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 2276, __FUNCTION__), 0 : 0)); |
2277 | const rtx_insn *ninsn = NEXT_INSN (tmp_rtx); |
2278 | /* Bypass intervening deleted-insn notes and debug insns. */ |
2279 | while (ninsn |
2280 | && !NONDEBUG_INSN_P (ninsn)((((enum rtx_code) (ninsn)->code) == INSN) || (((enum rtx_code ) (ninsn)->code) == JUMP_INSN) || (((enum rtx_code) (ninsn )->code) == CALL_INSN)) |
2281 | && !start[INSN_UID (ninsn)]) |
2282 | ninsn = NEXT_INSN (ninsn); |
2283 | edge e = find_fallthru_edge (bb->succs); |
2284 | if (e && ninsn) |
2285 | { |
2286 | basic_block dest = e->dest; |
2287 | if (start[INSN_UID (ninsn)] != dest) |
2288 | fprintf (outf, "%s ; pc falls through to BB %d\n", |
2289 | print_rtx_head, dest->index); |
2290 | } |
2291 | } |
2292 | } |
2293 | } |
2294 | |
2295 | free (start); |
2296 | free (end); |
2297 | free (in_bb_p); |
2298 | } |
2299 | } |
2300 | |
2301 | /* Update the branch probability of BB if a REG_BR_PROB is present. */ |
2302 | |
2303 | void |
2304 | update_br_prob_note (basic_block bb) |
2305 | { |
2306 | rtx note; |
2307 | note = find_reg_note (BB_END (bb)(bb)->il.x.rtl->end_, REG_BR_PROB, NULL_RTX(rtx) 0); |
2308 | if (!JUMP_P (BB_END (bb))(((enum rtx_code) ((bb)->il.x.rtl->end_)->code) == JUMP_INSN ) || !BRANCH_EDGE (bb)((*((bb))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*( (bb))->succs)[(1)] : (*((bb))->succs)[(0)])->probability.initialized_p ()) |
2309 | { |
2310 | if (note) |
2311 | { |
2312 | rtx *note_link, this_rtx; |
2313 | |
2314 | note_link = ®_NOTES (BB_END (bb))((((bb)->il.x.rtl->end_)->u.fld[6]).rt_rtx); |
2315 | for (this_rtx = *note_link; this_rtx; this_rtx = XEXP (this_rtx, 1)(((this_rtx)->u.fld[1]).rt_rtx)) |
2316 | if (this_rtx == note) |
2317 | { |
2318 | *note_link = XEXP (this_rtx, 1)(((this_rtx)->u.fld[1]).rt_rtx); |
2319 | break; |
2320 | } |
2321 | } |
2322 | return; |
2323 | } |
2324 | if (!note |
2325 | || XINT (note, 0)(((note)->u.fld[0]).rt_int) == BRANCH_EDGE (bb)((*((bb))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*( (bb))->succs)[(1)] : (*((bb))->succs)[(0)])->probability.to_reg_br_prob_note ()) |
2326 | return; |
2327 | XINT (note, 0)(((note)->u.fld[0]).rt_int) = BRANCH_EDGE (bb)((*((bb))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*( (bb))->succs)[(1)] : (*((bb))->succs)[(0)])->probability.to_reg_br_prob_note (); |
2328 | } |
2329 | |
2330 | /* Get the last insn associated with block BB (that includes barriers and |
2331 | tablejumps after BB). */ |
2332 | rtx_insn * |
2333 | get_last_bb_insn (basic_block bb) |
2334 | { |
2335 | rtx_jump_table_data *table; |
2336 | rtx_insn *tmp; |
2337 | rtx_insn *end = BB_END (bb)(bb)->il.x.rtl->end_; |
2338 | |
2339 | /* Include any jump table following the basic block. */ |
2340 | if (tablejump_p (end, NULLnullptr, &table)) |
2341 | end = table; |
2342 | |
2343 | /* Include any barriers that may follow the basic block. */ |
2344 | tmp = next_nonnote_nondebug_insn_bb (end); |
2345 | while (tmp && BARRIER_P (tmp)(((enum rtx_code) (tmp)->code) == BARRIER)) |
2346 | { |
2347 | end = tmp; |
2348 | tmp = next_nonnote_nondebug_insn_bb (end); |
2349 | } |
2350 | |
2351 | return end; |
2352 | } |
2353 | |
2354 | /* Add all BBs reachable from entry via hot paths into the SET. */ |
2355 | |
2356 | void |
2357 | find_bbs_reachable_by_hot_paths (hash_set<basic_block> *set) |
2358 | { |
2359 | auto_vec<basic_block, 64> worklist; |
2360 | |
2361 | set->add (ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)); |
2362 | worklist.safe_push (ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)); |
2363 | |
2364 | while (worklist.length () > 0) |
2365 | { |
2366 | basic_block bb = worklist.pop (); |
2367 | edge_iterator ei; |
2368 | edge e; |
2369 | |
2370 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
2371 | if (BB_PARTITION (e->dest)((e->dest)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION )) != BB_COLD_PARTITION |
2372 | && !set->add (e->dest)) |
2373 | worklist.safe_push (e->dest); |
2374 | } |
2375 | } |
2376 | |
2377 | /* Sanity check partition hotness to ensure that basic blocks in |
2378 | the cold partition don't dominate basic blocks in the hot partition. |
2379 | If FLAG_ONLY is true, report violations as errors. Otherwise |
2380 | re-mark the dominated blocks as cold, since this is run after |
2381 | cfg optimizations that may make hot blocks previously reached |
2382 | by both hot and cold blocks now only reachable along cold paths. */ |
2383 | |
2384 | static auto_vec<basic_block> |
2385 | find_partition_fixes (bool flag_only) |
2386 | { |
2387 | basic_block bb; |
2388 | auto_vec<basic_block> bbs_to_fix; |
2389 | hash_set<basic_block> set; |
2390 | |
2391 | /* Callers check this. */ |
2392 | gcc_checking_assert (crtl->has_bb_partition)((void)(!((&x_rtl)->has_bb_partition) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 2392, __FUNCTION__), 0 : 0)); |
2393 | |
2394 | find_bbs_reachable_by_hot_paths (&set); |
2395 | |
2396 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
2397 | if (!set.contains (bb) |
2398 | && BB_PARTITION (bb)((bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) != BB_COLD_PARTITION) |
2399 | { |
2400 | if (flag_only) |
2401 | error ("non-cold basic block %d reachable only " |
2402 | "by paths crossing the cold partition", bb->index); |
2403 | else |
2404 | BB_SET_PARTITION (bb, BB_COLD_PARTITION)do { basic_block bb_ = (bb); bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) | (BB_COLD_PARTITION) ); } while (0); |
2405 | bbs_to_fix.safe_push (bb); |
2406 | } |
2407 | |
2408 | return bbs_to_fix; |
2409 | } |
2410 | |
2411 | /* Perform cleanup on the hot/cold bb partitioning after optimization |
2412 | passes that modify the cfg. */ |
2413 | |
2414 | void |
2415 | fixup_partitions (void) |
2416 | { |
2417 | if (!crtl(&x_rtl)->has_bb_partition) |
2418 | return; |
2419 | |
2420 | /* Delete any blocks that became unreachable and weren't |
2421 | already cleaned up, for example during edge forwarding |
2422 | and convert_jumps_to_returns. This will expose more |
2423 | opportunities for fixing the partition boundaries here. |
2424 | Also, the calculation of the dominance graph during verification |
2425 | will assert if there are unreachable nodes. */ |
2426 | delete_unreachable_blocks (); |
2427 | |
2428 | /* If there are partitions, do a sanity check on them: A basic block in |
2429 | a cold partition cannot dominate a basic block in a hot partition. |
2430 | Fixup any that now violate this requirement, as a result of edge |
2431 | forwarding and unreachable block deletion. */ |
2432 | auto_vec<basic_block> bbs_to_fix = find_partition_fixes (false); |
2433 | |
2434 | /* Do the partition fixup after all necessary blocks have been converted to |
2435 | cold, so that we only update the region crossings the minimum number of |
2436 | places, which can require forcing edges to be non fallthru. */ |
2437 | if (! bbs_to_fix.is_empty ()) |
2438 | { |
2439 | do |
2440 | { |
2441 | basic_block bb = bbs_to_fix.pop (); |
2442 | fixup_new_cold_bb (bb); |
2443 | } |
2444 | while (! bbs_to_fix.is_empty ()); |
2445 | |
2446 | /* Fix up hot cold block grouping if needed. */ |
2447 | if (crtl(&x_rtl)->bb_reorder_complete && current_ir_type () == IR_RTL_CFGRTL) |
2448 | { |
2449 | basic_block bb, first = NULLnullptr, second = NULLnullptr; |
2450 | int current_partition = BB_UNPARTITIONED0; |
2451 | |
2452 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
2453 | { |
2454 | if (current_partition != BB_UNPARTITIONED0 |
2455 | && BB_PARTITION (bb)((bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) != current_partition) |
2456 | { |
2457 | if (first == NULLnullptr) |
2458 | first = bb; |
2459 | else if (second == NULLnullptr) |
2460 | second = bb; |
2461 | else |
2462 | { |
2463 | /* If we switch partitions for the 3rd, 5th etc. time, |
2464 | move bbs first (inclusive) .. second (exclusive) right |
2465 | before bb. */ |
2466 | basic_block prev_first = first->prev_bb; |
2467 | basic_block prev_second = second->prev_bb; |
2468 | basic_block prev_bb = bb->prev_bb; |
2469 | prev_first->next_bb = second; |
2470 | second->prev_bb = prev_first; |
2471 | prev_second->next_bb = bb; |
2472 | bb->prev_bb = prev_second; |
2473 | prev_bb->next_bb = first; |
2474 | first->prev_bb = prev_bb; |
2475 | rtx_insn *prev_first_insn = PREV_INSN (BB_HEAD (first)(first)->il.x.head_); |
2476 | rtx_insn *prev_second_insn |
2477 | = PREV_INSN (BB_HEAD (second)(second)->il.x.head_); |
2478 | rtx_insn *prev_bb_insn = PREV_INSN (BB_HEAD (bb)(bb)->il.x.head_); |
2479 | SET_NEXT_INSN (prev_first_insn) = BB_HEAD (second)(second)->il.x.head_; |
2480 | SET_PREV_INSN (BB_HEAD (second)(second)->il.x.head_) = prev_first_insn; |
2481 | SET_NEXT_INSN (prev_second_insn) = BB_HEAD (bb)(bb)->il.x.head_; |
2482 | SET_PREV_INSN (BB_HEAD (bb)(bb)->il.x.head_) = prev_second_insn; |
2483 | SET_NEXT_INSN (prev_bb_insn) = BB_HEAD (first)(first)->il.x.head_; |
2484 | SET_PREV_INSN (BB_HEAD (first)(first)->il.x.head_) = prev_bb_insn; |
2485 | second = NULLnullptr; |
2486 | } |
2487 | } |
2488 | current_partition = BB_PARTITION (bb)((bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)); |
2489 | } |
2490 | gcc_assert (!second)((void)(!(!second) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 2490, __FUNCTION__), 0 : 0)); |
2491 | } |
2492 | } |
2493 | } |
2494 | |
2495 | /* Verify, in the basic block chain, that there is at most one switch |
2496 | between hot/cold partitions. This condition will not be true until |
2497 | after reorder_basic_blocks is called. */ |
2498 | |
2499 | static int |
2500 | verify_hot_cold_block_grouping (void) |
2501 | { |
2502 | basic_block bb; |
2503 | int err = 0; |
2504 | bool switched_sections = false; |
2505 | int current_partition = BB_UNPARTITIONED0; |
2506 | |
2507 | /* Even after bb reordering is complete, we go into cfglayout mode |
2508 | again (in compgoto). Ensure we don't call this before going back |
2509 | into linearized RTL when any layout fixes would have been committed. */ |
2510 | if (!crtl(&x_rtl)->bb_reorder_complete |
2511 | || current_ir_type () != IR_RTL_CFGRTL) |
2512 | return err; |
2513 | |
2514 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
2515 | { |
2516 | if (current_partition != BB_UNPARTITIONED0 |
2517 | && BB_PARTITION (bb)((bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) != current_partition) |
2518 | { |
2519 | if (switched_sections) |
2520 | { |
2521 | error ("multiple hot/cold transitions found (bb %i)", |
2522 | bb->index); |
2523 | err = 1; |
2524 | } |
2525 | else |
2526 | switched_sections = true; |
2527 | |
2528 | if (!crtl(&x_rtl)->has_bb_partition) |
2529 | error ("partition found but function partition flag not set"); |
2530 | } |
2531 | current_partition = BB_PARTITION (bb)((bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)); |
2532 | } |
2533 | |
2534 | return err; |
2535 | } |
2536 | |
2537 | |
2538 | /* Perform several checks on the edges out of each block, such as |
2539 | the consistency of the branch probabilities, the correctness |
2540 | of hot/cold partition crossing edges, and the number of expected |
2541 | successor edges. Also verify that the dominance relationship |
2542 | between hot/cold blocks is sane. */ |
2543 | |
2544 | static int |
2545 | rtl_verify_edges (void) |
2546 | { |
2547 | int err = 0; |
2548 | basic_block bb; |
2549 | |
2550 | FOR_EACH_BB_REVERSE_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_exit_block_ptr->prev_bb ; bb != ((cfun + 0))->cfg->x_entry_block_ptr; bb = bb-> prev_bb) |
2551 | { |
2552 | int n_fallthru = 0, n_branch = 0, n_abnormal_call = 0, n_sibcall = 0; |
2553 | int n_eh = 0, n_abnormal = 0; |
2554 | edge e, fallthru = NULLnullptr; |
2555 | edge_iterator ei; |
2556 | rtx note; |
2557 | bool has_crossing_edge = false; |
2558 | |
2559 | if (JUMP_P (BB_END (bb))(((enum rtx_code) ((bb)->il.x.rtl->end_)->code) == JUMP_INSN ) |
2560 | && (note = find_reg_note (BB_END (bb)(bb)->il.x.rtl->end_, REG_BR_PROB, NULL_RTX(rtx) 0)) |
2561 | && EDGE_COUNT (bb->succs)vec_safe_length (bb->succs) >= 2 |
2562 | && any_condjump_p (BB_END (bb)(bb)->il.x.rtl->end_)) |
2563 | { |
2564 | if (!BRANCH_EDGE (bb)((*((bb))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*( (bb))->succs)[(1)] : (*((bb))->succs)[(0)])->probability.initialized_p ()) |
2565 | { |
2566 | if (profile_status_for_fn (cfun)(((cfun + 0))->cfg->x_profile_status) != PROFILE_ABSENT) |
2567 | { |
2568 | error ("verify_flow_info: " |
2569 | "REG_BR_PROB is set but cfg probability is not"); |
2570 | err = 1; |
2571 | } |
2572 | } |
2573 | else if (XINT (note, 0)(((note)->u.fld[0]).rt_int) |
2574 | != BRANCH_EDGE (bb)((*((bb))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*( (bb))->succs)[(1)] : (*((bb))->succs)[(0)])->probability.to_reg_br_prob_note () |
2575 | && profile_status_for_fn (cfun)(((cfun + 0))->cfg->x_profile_status) != PROFILE_ABSENT) |
2576 | { |
2577 | error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i", |
2578 | XINT (note, 0)(((note)->u.fld[0]).rt_int), |
2579 | BRANCH_EDGE (bb)((*((bb))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*( (bb))->succs)[(1)] : (*((bb))->succs)[(0)])->probability.to_reg_br_prob_note ()); |
2580 | err = 1; |
2581 | } |
2582 | } |
2583 | |
2584 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
2585 | { |
2586 | bool is_crossing; |
2587 | |
2588 | if (e->flags & EDGE_FALLTHRU) |
2589 | n_fallthru++, fallthru = e; |
2590 | |
2591 | is_crossing = (BB_PARTITION (e->src)((e->src)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION )) != BB_PARTITION (e->dest)((e->dest)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION )) |
2592 | && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) |
2593 | && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)); |
2594 | has_crossing_edge |= is_crossing; |
2595 | if (e->flags & EDGE_CROSSING) |
2596 | { |
2597 | if (!is_crossing) |
2598 | { |
2599 | error ("EDGE_CROSSING incorrectly set across same section"); |
2600 | err = 1; |
2601 | } |
2602 | if (e->flags & EDGE_FALLTHRU) |
2603 | { |
2604 | error ("fallthru edge crosses section boundary in bb %i", |
2605 | e->src->index); |
2606 | err = 1; |
2607 | } |
2608 | if (e->flags & EDGE_EH) |
2609 | { |
2610 | error ("EH edge crosses section boundary in bb %i", |
2611 | e->src->index); |
2612 | err = 1; |
2613 | } |
2614 | if (JUMP_P (BB_END (bb))(((enum rtx_code) ((bb)->il.x.rtl->end_)->code) == JUMP_INSN ) && !CROSSING_JUMP_P (BB_END (bb))(__extension__ ({ __typeof (((bb)->il.x.rtl->end_)) const _rtx = (((bb)->il.x.rtl->end_)); if (((enum rtx_code) ( _rtx)->code) != JUMP_INSN) rtl_check_failed_flag ("CROSSING_JUMP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 2614, __FUNCTION__); _rtx; })->jump)) |
2615 | { |
2616 | error ("No region crossing jump at section boundary in bb %i", |
2617 | bb->index); |
2618 | err = 1; |
2619 | } |
2620 | } |
2621 | else if (is_crossing) |
2622 | { |
2623 | error ("EDGE_CROSSING missing across section boundary"); |
2624 | err = 1; |
2625 | } |
2626 | |
2627 | if ((e->flags & ~(EDGE_DFS_BACK |
2628 | | EDGE_CAN_FALLTHRU |
2629 | | EDGE_IRREDUCIBLE_LOOP |
2630 | | EDGE_LOOP_EXIT |
2631 | | EDGE_CROSSING |
2632 | | EDGE_PRESERVE)) == 0) |
2633 | n_branch++; |
2634 | |
2635 | if (e->flags & EDGE_ABNORMAL_CALL) |
2636 | n_abnormal_call++; |
2637 | |
2638 | if (e->flags & EDGE_SIBCALL) |
2639 | n_sibcall++; |
2640 | |
2641 | if (e->flags & EDGE_EH) |
2642 | n_eh++; |
2643 | |
2644 | if (e->flags & EDGE_ABNORMAL) |
2645 | n_abnormal++; |
2646 | } |
2647 | |
2648 | if (!has_crossing_edge |
2649 | && JUMP_P (BB_END (bb))(((enum rtx_code) ((bb)->il.x.rtl->end_)->code) == JUMP_INSN ) |
2650 | && CROSSING_JUMP_P (BB_END (bb))(__extension__ ({ __typeof (((bb)->il.x.rtl->end_)) const _rtx = (((bb)->il.x.rtl->end_)); if (((enum rtx_code) ( _rtx)->code) != JUMP_INSN) rtl_check_failed_flag ("CROSSING_JUMP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 2650, __FUNCTION__); _rtx; })->jump)) |
2651 | { |
2652 | print_rtl_with_bb (stderrstderr, get_insns (), TDF_BLOCKS | TDF_DETAILS); |
2653 | error ("Region crossing jump across same section in bb %i", |
2654 | bb->index); |
2655 | err = 1; |
2656 | } |
2657 | |
2658 | if (n_eh && !find_reg_note (BB_END (bb)(bb)->il.x.rtl->end_, REG_EH_REGION, NULL_RTX(rtx) 0)) |
2659 | { |
2660 | error ("missing REG_EH_REGION note at the end of bb %i", bb->index); |
2661 | err = 1; |
2662 | } |
2663 | if (n_eh > 1) |
2664 | { |
2665 | error ("too many exception handling edges in bb %i", bb->index); |
2666 | err = 1; |
2667 | } |
2668 | if (n_branch |
2669 | && (!JUMP_P (BB_END (bb))(((enum rtx_code) ((bb)->il.x.rtl->end_)->code) == JUMP_INSN ) |
2670 | || (n_branch > 1 && (any_uncondjump_p (BB_END (bb)(bb)->il.x.rtl->end_) |
2671 | || any_condjump_p (BB_END (bb)(bb)->il.x.rtl->end_))))) |
2672 | { |
2673 | error ("too many outgoing branch edges from bb %i", bb->index); |
2674 | err = 1; |
2675 | } |
2676 | if (n_fallthru && any_uncondjump_p (BB_END (bb)(bb)->il.x.rtl->end_)) |
2677 | { |
2678 | error ("fallthru edge after unconditional jump in bb %i", bb->index); |
2679 | err = 1; |
2680 | } |
2681 | if (n_branch != 1 && any_uncondjump_p (BB_END (bb)(bb)->il.x.rtl->end_)) |
2682 | { |
2683 | error ("wrong number of branch edges after unconditional jump" |
2684 | " in bb %i", bb->index); |
2685 | err = 1; |
2686 | } |
2687 | if (n_branch != 1 && any_condjump_p (BB_END (bb)(bb)->il.x.rtl->end_) |
2688 | && JUMP_LABEL (BB_END (bb))((((bb)->il.x.rtl->end_)->u.fld[7]).rt_rtx) != BB_HEAD (fallthru->dest)(fallthru->dest)->il.x.head_) |
2689 | { |
2690 | error ("wrong amount of branch edges after conditional jump" |
2691 | " in bb %i", bb->index); |
2692 | err = 1; |
2693 | } |
2694 | if (n_abnormal_call && !CALL_P (BB_END (bb))(((enum rtx_code) ((bb)->il.x.rtl->end_)->code) == CALL_INSN )) |
2695 | { |
2696 | error ("abnormal call edges for non-call insn in bb %i", bb->index); |
2697 | err = 1; |
2698 | } |
2699 | if (n_sibcall && !CALL_P (BB_END (bb))(((enum rtx_code) ((bb)->il.x.rtl->end_)->code) == CALL_INSN )) |
2700 | { |
2701 | error ("sibcall edges for non-call insn in bb %i", bb->index); |
2702 | err = 1; |
2703 | } |
2704 | if (n_abnormal > n_eh |
2705 | && !(CALL_P (BB_END (bb))(((enum rtx_code) ((bb)->il.x.rtl->end_)->code) == CALL_INSN ) |
2706 | && n_abnormal == n_abnormal_call + n_sibcall) |
2707 | && (!JUMP_P (BB_END (bb))(((enum rtx_code) ((bb)->il.x.rtl->end_)->code) == JUMP_INSN ) |
2708 | || any_condjump_p (BB_END (bb)(bb)->il.x.rtl->end_) |
2709 | || any_uncondjump_p (BB_END (bb)(bb)->il.x.rtl->end_))) |
2710 | { |
2711 | error ("abnormal edges for no purpose in bb %i", bb->index); |
2712 | err = 1; |
2713 | } |
2714 | |
2715 | int has_eh = -1; |
2716 | FOR_EACH_EDGE (e, ei, bb->preds)for ((ei) = ei_start_1 (&((bb->preds))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
2717 | { |
2718 | if (has_eh == -1) |
2719 | has_eh = (e->flags & EDGE_EH); |
2720 | if ((e->flags & EDGE_EH) == has_eh) |
2721 | continue; |
2722 | error ("EH incoming edge mixed with non-EH incoming edges " |
2723 | "in bb %i", bb->index); |
2724 | err = 1; |
2725 | break; |
2726 | } |
2727 | } |
2728 | |
2729 | /* If there are partitions, do a sanity check on them: A basic block in |
2730 | a cold partition cannot dominate a basic block in a hot partition. */ |
2731 | if (crtl(&x_rtl)->has_bb_partition && !err |
2732 | && current_ir_type () == IR_RTL_CFGLAYOUT) |
2733 | { |
2734 | auto_vec<basic_block> bbs_to_fix = find_partition_fixes (true); |
2735 | err = !bbs_to_fix.is_empty (); |
2736 | } |
2737 | |
2738 | /* Clean up. */ |
2739 | return err; |
2740 | } |
2741 | |
2742 | /* Checks on the instructions within blocks. Currently checks that each |
2743 | block starts with a basic block note, and that basic block notes and |
2744 | control flow jumps are not found in the middle of the block. */ |
2745 | |
2746 | static int |
2747 | rtl_verify_bb_insns (void) |
2748 | { |
2749 | rtx_insn *x; |
2750 | int err = 0; |
2751 | basic_block bb; |
2752 | |
2753 | FOR_EACH_BB_REVERSE_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_exit_block_ptr->prev_bb ; bb != ((cfun + 0))->cfg->x_entry_block_ptr; bb = bb-> prev_bb) |
2754 | { |
2755 | /* Now check the header of basic |
2756 | block. It ought to contain optional CODE_LABEL followed |
2757 | by NOTE_BASIC_BLOCK. */ |
2758 | x = BB_HEAD (bb)(bb)->il.x.head_; |
2759 | if (LABEL_P (x)(((enum rtx_code) (x)->code) == CODE_LABEL)) |
2760 | { |
2761 | if (BB_END (bb)(bb)->il.x.rtl->end_ == x) |
2762 | { |
2763 | error ("NOTE_INSN_BASIC_BLOCK is missing for block %d", |
2764 | bb->index); |
2765 | err = 1; |
2766 | } |
2767 | |
2768 | x = NEXT_INSN (x); |
2769 | } |
2770 | |
2771 | if (!NOTE_INSN_BASIC_BLOCK_P (x)((((enum rtx_code) (x)->code) == NOTE) && (((x)-> u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK) || NOTE_BASIC_BLOCK (x)(((x)->u.fld[3]).rt_bb) != bb) |
2772 | { |
2773 | error ("NOTE_INSN_BASIC_BLOCK is missing for block %d", |
2774 | bb->index); |
2775 | err = 1; |
2776 | } |
2777 | |
2778 | if (BB_END (bb)(bb)->il.x.rtl->end_ == x) |
2779 | /* Do checks for empty blocks here. */ |
2780 | ; |
2781 | else |
2782 | for (x = NEXT_INSN (x); x; x = NEXT_INSN (x)) |
2783 | { |
2784 | if (NOTE_INSN_BASIC_BLOCK_P (x)((((enum rtx_code) (x)->code) == NOTE) && (((x)-> u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) |
2785 | { |
2786 | error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d", |
2787 | INSN_UID (x), bb->index); |
2788 | err = 1; |
2789 | } |
2790 | |
2791 | if (x == BB_END (bb)(bb)->il.x.rtl->end_) |
2792 | break; |
2793 | |
2794 | if (control_flow_insn_p (x)) |
2795 | { |
2796 | error ("in basic block %d:", bb->index); |
2797 | fatal_insn ("flow control insn inside a basic block", x)_fatal_insn ("flow control insn inside a basic block", x, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 2797, __FUNCTION__); |
2798 | } |
2799 | } |
2800 | } |
2801 | |
2802 | /* Clean up. */ |
2803 | return err; |
2804 | } |
2805 | |
2806 | /* Verify that block pointers for instructions in basic blocks, headers and |
2807 | footers are set appropriately. */ |
2808 | |
2809 | static int |
2810 | rtl_verify_bb_pointers (void) |
2811 | { |
2812 | int err = 0; |
2813 | basic_block bb; |
2814 | |
2815 | /* Check the general integrity of the basic blocks. */ |
2816 | FOR_EACH_BB_REVERSE_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_exit_block_ptr->prev_bb ; bb != ((cfun + 0))->cfg->x_entry_block_ptr; bb = bb-> prev_bb) |
2817 | { |
2818 | rtx_insn *insn; |
2819 | |
2820 | if (!(bb->flags & BB_RTL)) |
2821 | { |
2822 | error ("BB_RTL flag not set for block %d", bb->index); |
2823 | err = 1; |
2824 | } |
2825 | |
2826 | FOR_BB_INSNS (bb, insn)for ((insn) = (bb)->il.x.head_; (insn) && (insn) != NEXT_INSN ((bb)->il.x.rtl->end_); (insn) = NEXT_INSN ( insn)) |
2827 | if (BLOCK_FOR_INSN (insn) != bb) |
2828 | { |
2829 | error ("insn %d basic block pointer is %d, should be %d", |
2830 | INSN_UID (insn), |
2831 | BLOCK_FOR_INSN (insn) ? BLOCK_FOR_INSN (insn)->index : 0, |
2832 | bb->index); |
2833 | err = 1; |
2834 | } |
2835 | |
2836 | for (insn = BB_HEADER (bb)(bb)->il.x.rtl->header_; insn; insn = NEXT_INSN (insn)) |
2837 | if (!BARRIER_P (insn)(((enum rtx_code) (insn)->code) == BARRIER) |
2838 | && BLOCK_FOR_INSN (insn) != NULLnullptr) |
2839 | { |
2840 | error ("insn %d in header of bb %d has non-NULL basic block", |
2841 | INSN_UID (insn), bb->index); |
2842 | err = 1; |
2843 | } |
2844 | for (insn = BB_FOOTER (bb)(bb)->il.x.rtl->footer_; insn; insn = NEXT_INSN (insn)) |
2845 | if (!BARRIER_P (insn)(((enum rtx_code) (insn)->code) == BARRIER) |
2846 | && BLOCK_FOR_INSN (insn) != NULLnullptr) |
2847 | { |
2848 | error ("insn %d in footer of bb %d has non-NULL basic block", |
2849 | INSN_UID (insn), bb->index); |
2850 | err = 1; |
2851 | } |
2852 | } |
2853 | |
2854 | /* Clean up. */ |
2855 | return err; |
2856 | } |
2857 | |
2858 | /* Verify the CFG and RTL consistency common for both underlying RTL and |
2859 | cfglayout RTL. |
2860 | |
2861 | Currently it does following checks: |
2862 | |
2863 | - overlapping of basic blocks |
2864 | - insns with wrong BLOCK_FOR_INSN pointers |
2865 | - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note) |
2866 | - tails of basic blocks (ensure that boundary is necessary) |
2867 | - scans body of the basic block for JUMP_INSN, CODE_LABEL |
2868 | and NOTE_INSN_BASIC_BLOCK |
2869 | - verify that no fall_thru edge crosses hot/cold partition boundaries |
2870 | - verify that there are no pending RTL branch predictions |
2871 | - verify that hot blocks are not dominated by cold blocks |
2872 | |
2873 | In future it can be extended check a lot of other stuff as well |
2874 | (reachability of basic blocks, life information, etc. etc.). */ |
2875 | |
2876 | static int |
2877 | rtl_verify_flow_info_1 (void) |
2878 | { |
2879 | int err = 0; |
2880 | |
2881 | err |= rtl_verify_bb_pointers (); |
2882 | |
2883 | err |= rtl_verify_bb_insns (); |
2884 | |
2885 | err |= rtl_verify_edges (); |
2886 | |
2887 | return err; |
2888 | } |
2889 | |
2890 | /* Walk the instruction chain and verify that bb head/end pointers |
2891 | are correct, and that instructions are in exactly one bb and have |
2892 | correct block pointers. */ |
2893 | |
2894 | static int |
2895 | rtl_verify_bb_insn_chain (void) |
2896 | { |
2897 | basic_block bb; |
2898 | int err = 0; |
2899 | rtx_insn *x; |
2900 | rtx_insn *last_head = get_last_insn (); |
2901 | basic_block *bb_info; |
2902 | const int max_uid = get_max_uid (); |
2903 | |
2904 | bb_info = XCNEWVEC (basic_block, max_uid)((basic_block *) xcalloc ((max_uid), sizeof (basic_block))); |
2905 | |
2906 | FOR_EACH_BB_REVERSE_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_exit_block_ptr->prev_bb ; bb != ((cfun + 0))->cfg->x_entry_block_ptr; bb = bb-> prev_bb) |
2907 | { |
2908 | rtx_insn *head = BB_HEAD (bb)(bb)->il.x.head_; |
2909 | rtx_insn *end = BB_END (bb)(bb)->il.x.rtl->end_; |
2910 | |
2911 | for (x = last_head; x != NULL_RTX(rtx) 0; x = PREV_INSN (x)) |
2912 | { |
2913 | /* Verify the end of the basic block is in the INSN chain. */ |
2914 | if (x == end) |
2915 | break; |
2916 | |
2917 | /* And that the code outside of basic blocks has NULL bb field. */ |
2918 | if (!BARRIER_P (x)(((enum rtx_code) (x)->code) == BARRIER) |
2919 | && BLOCK_FOR_INSN (x) != NULLnullptr) |
2920 | { |
2921 | error ("insn %d outside of basic blocks has non-NULL bb field", |
2922 | INSN_UID (x)); |
2923 | err = 1; |
2924 | } |
2925 | } |
2926 | |
2927 | if (!x) |
2928 | { |
2929 | error ("end insn %d for block %d not found in the insn stream", |
2930 | INSN_UID (end), bb->index); |
2931 | err = 1; |
2932 | } |
2933 | |
2934 | /* Work backwards from the end to the head of the basic block |
2935 | to verify the head is in the RTL chain. */ |
2936 | for (; x != NULL_RTX(rtx) 0; x = PREV_INSN (x)) |
2937 | { |
2938 | /* While walking over the insn chain, verify insns appear |
2939 | in only one basic block. */ |
2940 | if (bb_info[INSN_UID (x)] != NULLnullptr) |
2941 | { |
2942 | error ("insn %d is in multiple basic blocks (%d and %d)", |
2943 | INSN_UID (x), bb->index, bb_info[INSN_UID (x)]->index); |
2944 | err = 1; |
2945 | } |
2946 | |
2947 | bb_info[INSN_UID (x)] = bb; |
2948 | |
2949 | if (x == head) |
2950 | break; |
2951 | } |
2952 | if (!x) |
2953 | { |
2954 | error ("head insn %d for block %d not found in the insn stream", |
2955 | INSN_UID (head), bb->index); |
2956 | err = 1; |
2957 | } |
2958 | |
2959 | last_head = PREV_INSN (x); |
2960 | } |
2961 | |
2962 | for (x = last_head; x != NULL_RTX(rtx) 0; x = PREV_INSN (x)) |
2963 | { |
2964 | /* Check that the code before the first basic block has NULL |
2965 | bb field. */ |
2966 | if (!BARRIER_P (x)(((enum rtx_code) (x)->code) == BARRIER) |
2967 | && BLOCK_FOR_INSN (x) != NULLnullptr) |
2968 | { |
2969 | error ("insn %d outside of basic blocks has non-NULL bb field", |
2970 | INSN_UID (x)); |
2971 | err = 1; |
2972 | } |
2973 | } |
2974 | free (bb_info); |
2975 | |
2976 | return err; |
2977 | } |
2978 | |
2979 | /* Verify that fallthru edges point to adjacent blocks in layout order and |
2980 | that barriers exist after non-fallthru blocks. */ |
2981 | |
2982 | static int |
2983 | rtl_verify_fallthru (void) |
2984 | { |
2985 | basic_block bb; |
2986 | int err = 0; |
2987 | |
2988 | FOR_EACH_BB_REVERSE_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_exit_block_ptr->prev_bb ; bb != ((cfun + 0))->cfg->x_entry_block_ptr; bb = bb-> prev_bb) |
2989 | { |
2990 | edge e; |
2991 | |
2992 | e = find_fallthru_edge (bb->succs); |
2993 | if (!e) |
2994 | { |
2995 | rtx_insn *insn; |
2996 | |
2997 | /* Ensure existence of barrier in BB with no fallthru edges. */ |
2998 | for (insn = NEXT_INSN (BB_END (bb)(bb)->il.x.rtl->end_); ; insn = NEXT_INSN (insn)) |
2999 | { |
3000 | if (!insn || NOTE_INSN_BASIC_BLOCK_P (insn)((((enum rtx_code) (insn)->code) == NOTE) && (((insn )->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) |
3001 | { |
3002 | error ("missing barrier after block %i", bb->index); |
3003 | err = 1; |
3004 | break; |
3005 | } |
3006 | if (BARRIER_P (insn)(((enum rtx_code) (insn)->code) == BARRIER)) |
3007 | break; |
3008 | } |
3009 | } |
3010 | else if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) |
3011 | && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
3012 | { |
3013 | rtx_insn *insn; |
3014 | |
3015 | if (e->src->next_bb != e->dest) |
3016 | { |
3017 | error |
3018 | ("verify_flow_info: Incorrect blocks for fallthru %i->%i", |
3019 | e->src->index, e->dest->index); |
3020 | err = 1; |
3021 | } |
3022 | else |
3023 | for (insn = NEXT_INSN (BB_END (e->src)(e->src)->il.x.rtl->end_); insn != BB_HEAD (e->dest)(e->dest)->il.x.head_; |
3024 | insn = NEXT_INSN (insn)) |
3025 | if (BARRIER_P (insn)(((enum rtx_code) (insn)->code) == BARRIER) || NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN))) |
3026 | { |
3027 | error ("verify_flow_info: Incorrect fallthru %i->%i", |
3028 | e->src->index, e->dest->index); |
3029 | error ("wrong insn in the fallthru edge"); |
3030 | debug_rtx (insn); |
3031 | err = 1; |
3032 | } |
3033 | } |
3034 | } |
3035 | |
3036 | return err; |
3037 | } |
3038 | |
3039 | /* Verify that blocks are laid out in consecutive order. While walking the |
3040 | instructions, verify that all expected instructions are inside the basic |
3041 | blocks, and that all returns are followed by barriers. */ |
3042 | |
3043 | static int |
3044 | rtl_verify_bb_layout (void) |
3045 | { |
3046 | basic_block bb; |
3047 | int err = 0; |
3048 | rtx_insn *x, *y; |
3049 | int num_bb_notes; |
3050 | rtx_insn * const rtx_first = get_insns (); |
3051 | basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr), curr_bb = NULLnullptr; |
3052 | |
3053 | num_bb_notes = 0; |
3054 | |
3055 | for (x = rtx_first; x; x = NEXT_INSN (x)) |
3056 | { |
3057 | if (NOTE_INSN_BASIC_BLOCK_P (x)((((enum rtx_code) (x)->code) == NOTE) && (((x)-> u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) |
3058 | { |
3059 | bb = NOTE_BASIC_BLOCK (x)(((x)->u.fld[3]).rt_bb); |
3060 | |
3061 | num_bb_notes++; |
3062 | if (bb != last_bb_seen->next_bb) |
3063 | internal_error ("basic blocks not laid down consecutively"); |
3064 | |
3065 | curr_bb = last_bb_seen = bb; |
3066 | } |
3067 | |
3068 | if (!curr_bb) |
3069 | { |
3070 | switch (GET_CODE (x)((enum rtx_code) (x)->code)) |
3071 | { |
3072 | case BARRIER: |
3073 | case NOTE: |
3074 | break; |
3075 | |
3076 | case CODE_LABEL: |
3077 | /* An ADDR_VEC is placed outside any basic block. */ |
3078 | if (NEXT_INSN (x) |
3079 | && JUMP_TABLE_DATA_P (NEXT_INSN (x))(((enum rtx_code) (NEXT_INSN (x))->code) == JUMP_TABLE_DATA )) |
3080 | x = NEXT_INSN (x); |
3081 | |
3082 | /* But in any case, non-deletable labels can appear anywhere. */ |
3083 | break; |
3084 | |
3085 | default: |
3086 | fatal_insn ("insn outside basic block", x)_fatal_insn ("insn outside basic block", x, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3086, __FUNCTION__); |
3087 | } |
3088 | } |
3089 | |
3090 | if (JUMP_P (x)(((enum rtx_code) (x)->code) == JUMP_INSN) |
3091 | && returnjump_p (x) && ! condjump_p (x) |
3092 | && ! ((y = next_nonnote_nondebug_insn (x)) |
3093 | && BARRIER_P (y)(((enum rtx_code) (y)->code) == BARRIER))) |
3094 | fatal_insn ("return not followed by barrier", x)_fatal_insn ("return not followed by barrier", x, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3094, __FUNCTION__); |
3095 | |
3096 | if (curr_bb && x == BB_END (curr_bb)(curr_bb)->il.x.rtl->end_) |
3097 | curr_bb = NULLnullptr; |
3098 | } |
3099 | |
3100 | if (num_bb_notes != n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks) - NUM_FIXED_BLOCKS(2)) |
3101 | internal_error |
3102 | ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)", |
3103 | num_bb_notes, n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks)); |
3104 | |
3105 | return err; |
3106 | } |
3107 | |
3108 | /* Verify the CFG and RTL consistency common for both underlying RTL and |
3109 | cfglayout RTL, plus consistency checks specific to linearized RTL mode. |
3110 | |
3111 | Currently it does following checks: |
3112 | - all checks of rtl_verify_flow_info_1 |
3113 | - test head/end pointers |
3114 | - check that blocks are laid out in consecutive order |
3115 | - check that all insns are in the basic blocks |
3116 | (except the switch handling code, barriers and notes) |
3117 | - check that all returns are followed by barriers |
3118 | - check that all fallthru edge points to the adjacent blocks |
3119 | - verify that there is a single hot/cold partition boundary after bbro */ |
3120 | |
3121 | static int |
3122 | rtl_verify_flow_info (void) |
3123 | { |
3124 | int err = 0; |
3125 | |
3126 | err |= rtl_verify_flow_info_1 (); |
3127 | |
3128 | err |= rtl_verify_bb_insn_chain (); |
3129 | |
3130 | err |= rtl_verify_fallthru (); |
3131 | |
3132 | err |= rtl_verify_bb_layout (); |
3133 | |
3134 | err |= verify_hot_cold_block_grouping (); |
3135 | |
3136 | return err; |
3137 | } |
3138 | |
3139 | /* Assume that the preceding pass has possibly eliminated jump instructions |
3140 | or converted the unconditional jumps. Eliminate the edges from CFG. |
3141 | Return true if any edges are eliminated. */ |
3142 | |
3143 | bool |
3144 | purge_dead_edges (basic_block bb) |
3145 | { |
3146 | edge e; |
3147 | rtx_insn *insn = BB_END (bb)(bb)->il.x.rtl->end_; |
3148 | rtx note; |
3149 | bool purged = false; |
3150 | bool found; |
3151 | edge_iterator ei; |
3152 | |
3153 | if ((DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN) || NOTE_P (insn)(((enum rtx_code) (insn)->code) == NOTE)) && insn != BB_HEAD (bb)(bb)->il.x.head_) |
3154 | do |
3155 | insn = PREV_INSN (insn); |
3156 | while ((DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN) || NOTE_P (insn)(((enum rtx_code) (insn)->code) == NOTE)) && insn != BB_HEAD (bb)(bb)->il.x.head_); |
3157 | |
3158 | /* If this instruction cannot trap, remove REG_EH_REGION notes. */ |
3159 | if (NONJUMP_INSN_P (insn)(((enum rtx_code) (insn)->code) == INSN) |
3160 | && (note = find_reg_note (insn, REG_EH_REGION, NULLnullptr))) |
3161 | { |
3162 | rtx eqnote; |
3163 | |
3164 | if (! may_trap_p (PATTERN (insn)) |
3165 | || ((eqnote = find_reg_equal_equiv_note (insn)) |
3166 | && ! may_trap_p (XEXP (eqnote, 0)(((eqnote)->u.fld[0]).rt_rtx)))) |
3167 | remove_note (insn, note); |
3168 | } |
3169 | |
3170 | /* Cleanup abnormal edges caused by exceptions or non-local gotos. */ |
3171 | for (ei = ei_start (bb->succs)ei_start_1 (&(bb->succs)); (e = ei_safe_edge (ei)); ) |
3172 | { |
3173 | bool remove = false; |
3174 | |
3175 | /* There are three types of edges we need to handle correctly here: EH |
3176 | edges, abnormal call EH edges, and abnormal call non-EH edges. The |
3177 | latter can appear when nonlocal gotos are used. */ |
3178 | if (e->flags & EDGE_ABNORMAL_CALL) |
3179 | { |
3180 | if (!CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)) |
3181 | remove = true; |
3182 | else if (can_nonlocal_goto (insn)) |
3183 | ; |
3184 | else if ((e->flags & EDGE_EH) && can_throw_internal (insn)) |
3185 | ; |
3186 | else if (flag_tmglobal_options.x_flag_tm && find_reg_note (insn, REG_TM, NULLnullptr)) |
3187 | ; |
3188 | else |
3189 | remove = true; |
3190 | } |
3191 | else if (e->flags & EDGE_EH) |
3192 | remove = !can_throw_internal (insn); |
3193 | |
3194 | if (remove) |
3195 | { |
3196 | remove_edge (e); |
3197 | df_set_bb_dirty (bb); |
3198 | purged = true; |
3199 | } |
3200 | else |
3201 | ei_next (&ei); |
3202 | } |
3203 | |
3204 | if (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN)) |
3205 | { |
3206 | rtx note; |
3207 | edge b,f; |
3208 | edge_iterator ei; |
3209 | |
3210 | /* We do care only about conditional jumps and simplejumps. */ |
3211 | if (!any_condjump_p (insn) |
3212 | && !returnjump_p (insn) |
3213 | && !simplejump_p (insn)) |
3214 | return purged; |
3215 | |
3216 | /* Branch probability/prediction notes are defined only for |
3217 | condjumps. We've possibly turned condjump into simplejump. */ |
3218 | if (simplejump_p (insn)) |
3219 | { |
3220 | note = find_reg_note (insn, REG_BR_PROB, NULLnullptr); |
3221 | if (note) |
3222 | remove_note (insn, note); |
3223 | while ((note = find_reg_note (insn, REG_BR_PRED, NULLnullptr))) |
3224 | remove_note (insn, note); |
3225 | } |
3226 | |
3227 | for (ei = ei_start (bb->succs)ei_start_1 (&(bb->succs)); (e = ei_safe_edge (ei)); ) |
3228 | { |
3229 | /* Avoid abnormal flags to leak from computed jumps turned |
3230 | into simplejumps. */ |
3231 | |
3232 | e->flags &= ~EDGE_ABNORMAL; |
3233 | |
3234 | /* See if this edge is one we should keep. */ |
3235 | if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn)) |
3236 | /* A conditional jump can fall through into the next |
3237 | block, so we should keep the edge. */ |
3238 | { |
3239 | ei_next (&ei); |
3240 | continue; |
3241 | } |
3242 | else if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
3243 | && BB_HEAD (e->dest)(e->dest)->il.x.head_ == JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx)) |
3244 | /* If the destination block is the target of the jump, |
3245 | keep the edge. */ |
3246 | { |
3247 | ei_next (&ei); |
3248 | continue; |
3249 | } |
3250 | else if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
3251 | && returnjump_p (insn)) |
3252 | /* If the destination block is the exit block, and this |
3253 | instruction is a return, then keep the edge. */ |
3254 | { |
3255 | ei_next (&ei); |
3256 | continue; |
3257 | } |
3258 | else if ((e->flags & EDGE_EH) && can_throw_internal (insn)) |
3259 | /* Keep the edges that correspond to exceptions thrown by |
3260 | this instruction and rematerialize the EDGE_ABNORMAL |
3261 | flag we just cleared above. */ |
3262 | { |
3263 | e->flags |= EDGE_ABNORMAL; |
3264 | ei_next (&ei); |
3265 | continue; |
3266 | } |
3267 | |
3268 | /* We do not need this edge. */ |
3269 | df_set_bb_dirty (bb); |
3270 | purged = true; |
3271 | remove_edge (e); |
3272 | } |
3273 | |
3274 | if (EDGE_COUNT (bb->succs)vec_safe_length (bb->succs) == 0 || !purged) |
3275 | return purged; |
3276 | |
3277 | if (dump_file) |
3278 | fprintf (dump_file, "Purged edges from bb %i\n", bb->index); |
3279 | |
3280 | if (!optimizeglobal_options.x_optimize) |
3281 | return purged; |
3282 | |
3283 | /* Redistribute probabilities. */ |
3284 | if (single_succ_p (bb)) |
3285 | { |
3286 | single_succ_edge (bb)->probability = profile_probability::always (); |
3287 | } |
3288 | else |
3289 | { |
3290 | note = find_reg_note (insn, REG_BR_PROB, NULLnullptr); |
3291 | if (!note) |
3292 | return purged; |
3293 | |
3294 | b = BRANCH_EDGE (bb)((*((bb))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*( (bb))->succs)[(1)] : (*((bb))->succs)[(0)]); |
3295 | f = FALLTHRU_EDGE (bb)((*((bb))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*( (bb))->succs)[(0)] : (*((bb))->succs)[(1)]); |
3296 | b->probability = profile_probability::from_reg_br_prob_note |
3297 | (XINT (note, 0)(((note)->u.fld[0]).rt_int)); |
3298 | f->probability = b->probability.invert (); |
3299 | } |
3300 | |
3301 | return purged; |
3302 | } |
3303 | else if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN) && SIBLING_CALL_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SIBLING_CALL_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3303, __FUNCTION__); _rtx; })->jump)) |
3304 | { |
3305 | /* First, there should not be any EH or ABCALL edges resulting |
3306 | from non-local gotos and the like. If there were, we shouldn't |
3307 | have created the sibcall in the first place. Second, there |
3308 | should of course never have been a fallthru edge. */ |
3309 | gcc_assert (single_succ_p (bb))((void)(!(single_succ_p (bb)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3309, __FUNCTION__), 0 : 0)); |
3310 | gcc_assert (single_succ_edge (bb)->flags((void)(!(single_succ_edge (bb)->flags == (EDGE_SIBCALL | EDGE_ABNORMAL )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3311, __FUNCTION__), 0 : 0)) |
3311 | == (EDGE_SIBCALL | EDGE_ABNORMAL))((void)(!(single_succ_edge (bb)->flags == (EDGE_SIBCALL | EDGE_ABNORMAL )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3311, __FUNCTION__), 0 : 0)); |
3312 | |
3313 | return 0; |
3314 | } |
3315 | |
3316 | /* If we don't see a jump insn, we don't know exactly why the block would |
3317 | have been broken at this point. Look for a simple, non-fallthru edge, |
3318 | as these are only created by conditional branches. If we find such an |
3319 | edge we know that there used to be a jump here and can then safely |
3320 | remove all non-fallthru edges. */ |
3321 | found = false; |
3322 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
3323 | if (! (e->flags & (EDGE_COMPLEX(EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE ) | EDGE_FALLTHRU))) |
3324 | { |
3325 | found = true; |
3326 | break; |
3327 | } |
3328 | |
3329 | if (!found) |
3330 | return purged; |
3331 | |
3332 | /* Remove all but the fake and fallthru edges. The fake edge may be |
3333 | the only successor for this block in the case of noreturn |
3334 | calls. */ |
3335 | for (ei = ei_start (bb->succs)ei_start_1 (&(bb->succs)); (e = ei_safe_edge (ei)); ) |
3336 | { |
3337 | if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE))) |
3338 | { |
3339 | df_set_bb_dirty (bb); |
3340 | remove_edge (e); |
3341 | purged = true; |
3342 | } |
3343 | else |
3344 | ei_next (&ei); |
3345 | } |
3346 | |
3347 | gcc_assert (single_succ_p (bb))((void)(!(single_succ_p (bb)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3347, __FUNCTION__), 0 : 0)); |
3348 | |
3349 | single_succ_edge (bb)->probability = profile_probability::always (); |
3350 | |
3351 | if (dump_file) |
3352 | fprintf (dump_file, "Purged non-fallthru edges from bb %i\n", |
3353 | bb->index); |
3354 | return purged; |
3355 | } |
3356 | |
3357 | /* Search all basic blocks for potentially dead edges and purge them. Return |
3358 | true if some edge has been eliminated. */ |
3359 | |
3360 | bool |
3361 | purge_all_dead_edges (void) |
3362 | { |
3363 | int purged = false; |
3364 | basic_block bb; |
3365 | |
3366 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
3367 | { |
3368 | bool purged_here = purge_dead_edges (bb); |
3369 | |
3370 | purged |= purged_here; |
3371 | } |
3372 | |
3373 | return purged; |
3374 | } |
3375 | |
3376 | /* This is used by a few passes that emit some instructions after abnormal |
3377 | calls, moving the basic block's end, while they in fact do want to emit |
3378 | them on the fallthru edge. Look for abnormal call edges, find backward |
3379 | the call in the block and insert the instructions on the edge instead. |
3380 | |
3381 | Similarly, handle instructions throwing exceptions internally. |
3382 | |
3383 | Return true when instructions have been found and inserted on edges. */ |
3384 | |
3385 | bool |
3386 | fixup_abnormal_edges (void) |
3387 | { |
3388 | bool inserted = false; |
3389 | basic_block bb; |
3390 | |
3391 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
3392 | { |
3393 | edge e; |
3394 | edge_iterator ei; |
3395 | |
3396 | /* Look for cases we are interested in - calls or instructions causing |
3397 | exceptions. */ |
3398 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
3399 | if ((e->flags & EDGE_ABNORMAL_CALL) |
3400 | || ((e->flags & (EDGE_ABNORMAL | EDGE_EH)) |
3401 | == (EDGE_ABNORMAL | EDGE_EH))) |
3402 | break; |
3403 | |
3404 | if (e && !CALL_P (BB_END (bb))(((enum rtx_code) ((bb)->il.x.rtl->end_)->code) == CALL_INSN ) && !can_throw_internal (BB_END (bb)(bb)->il.x.rtl->end_)) |
3405 | { |
3406 | rtx_insn *insn; |
3407 | |
3408 | /* Get past the new insns generated. Allow notes, as the insns |
3409 | may be already deleted. */ |
3410 | insn = BB_END (bb)(bb)->il.x.rtl->end_; |
3411 | while ((NONJUMP_INSN_P (insn)(((enum rtx_code) (insn)->code) == INSN) || NOTE_P (insn)(((enum rtx_code) (insn)->code) == NOTE)) |
3412 | && !can_throw_internal (insn) |
3413 | && insn != BB_HEAD (bb)(bb)->il.x.head_) |
3414 | insn = PREV_INSN (insn); |
3415 | |
3416 | if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN) || can_throw_internal (insn)) |
3417 | { |
3418 | rtx_insn *stop, *next; |
3419 | |
3420 | e = find_fallthru_edge (bb->succs); |
3421 | |
3422 | stop = NEXT_INSN (BB_END (bb)(bb)->il.x.rtl->end_); |
3423 | BB_END (bb)(bb)->il.x.rtl->end_ = insn; |
3424 | |
3425 | for (insn = NEXT_INSN (insn); insn != stop; insn = next) |
3426 | { |
3427 | next = NEXT_INSN (insn); |
3428 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
3429 | { |
3430 | delete_insn (insn); |
3431 | |
3432 | /* Sometimes there's still the return value USE. |
3433 | If it's placed after a trapping call (i.e. that |
3434 | call is the last insn anyway), we have no fallthru |
3435 | edge. Simply delete this use and don't try to insert |
3436 | on the non-existent edge. |
3437 | Similarly, sometimes a call that can throw is |
3438 | followed in the source with __builtin_unreachable (), |
3439 | meaning that there is UB if the call returns rather |
3440 | than throws. If there weren't any instructions |
3441 | following such calls before, supposedly even the ones |
3442 | we've deleted aren't significant and can be |
3443 | removed. */ |
3444 | if (e) |
3445 | { |
3446 | /* We're not deleting it, we're moving it. */ |
3447 | insn->set_undeleted (); |
3448 | SET_PREV_INSN (insn) = NULL_RTX(rtx) 0; |
3449 | SET_NEXT_INSN (insn) = NULL_RTX(rtx) 0; |
3450 | |
3451 | insert_insn_on_edge (insn, e); |
3452 | inserted = true; |
3453 | } |
3454 | } |
3455 | else if (!BARRIER_P (insn)(((enum rtx_code) (insn)->code) == BARRIER)) |
3456 | set_block_for_insn (insn, NULLnullptr); |
3457 | } |
3458 | } |
3459 | |
3460 | /* It may be that we don't find any trapping insn. In this |
3461 | case we discovered quite late that the insn that had been |
3462 | marked as can_throw_internal in fact couldn't trap at all. |
3463 | So we should in fact delete the EH edges out of the block. */ |
3464 | else |
3465 | purge_dead_edges (bb); |
3466 | } |
3467 | } |
3468 | |
3469 | return inserted; |
3470 | } |
3471 | |
3472 | /* Delete the unconditional jump INSN and adjust the CFG correspondingly. |
3473 | Note that the INSN should be deleted *after* removing dead edges, so |
3474 | that the kept edge is the fallthrough edge for a (set (pc) (pc)) |
3475 | but not for a (set (pc) (label_ref FOO)). */ |
3476 | |
3477 | void |
3478 | update_cfg_for_uncondjump (rtx_insn *insn) |
3479 | { |
3480 | basic_block bb = BLOCK_FOR_INSN (insn); |
3481 | gcc_assert (BB_END (bb) == insn)((void)(!((bb)->il.x.rtl->end_ == insn) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3481, __FUNCTION__), 0 : 0)); |
3482 | |
3483 | purge_dead_edges (bb); |
3484 | |
3485 | if (current_ir_type () != IR_RTL_CFGLAYOUT) |
3486 | { |
3487 | if (!find_fallthru_edge (bb->succs)) |
3488 | { |
3489 | auto barrier = next_nonnote_nondebug_insn (insn); |
3490 | if (!barrier || !BARRIER_P (barrier)(((enum rtx_code) (barrier)->code) == BARRIER)) |
3491 | emit_barrier_after (insn); |
3492 | } |
3493 | return; |
3494 | } |
3495 | |
3496 | delete_insn (insn); |
3497 | if (EDGE_COUNT (bb->succs)vec_safe_length (bb->succs) == 1) |
3498 | { |
3499 | rtx_insn *insn; |
3500 | |
3501 | single_succ_edge (bb)->flags |= EDGE_FALLTHRU; |
3502 | |
3503 | /* Remove barriers from the footer if there are any. */ |
3504 | for (insn = BB_FOOTER (bb)(bb)->il.x.rtl->footer_; insn; insn = NEXT_INSN (insn)) |
3505 | if (BARRIER_P (insn)(((enum rtx_code) (insn)->code) == BARRIER)) |
3506 | { |
3507 | if (PREV_INSN (insn)) |
3508 | SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn); |
3509 | else |
3510 | BB_FOOTER (bb)(bb)->il.x.rtl->footer_ = NEXT_INSN (insn); |
3511 | if (NEXT_INSN (insn)) |
3512 | SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn); |
3513 | } |
3514 | else if (LABEL_P (insn)(((enum rtx_code) (insn)->code) == CODE_LABEL)) |
3515 | break; |
3516 | } |
3517 | } |
3518 | |
3519 | /* Cut the insns from FIRST to LAST out of the insns stream. */ |
3520 | |
3521 | rtx_insn * |
3522 | unlink_insn_chain (rtx_insn *first, rtx_insn *last) |
3523 | { |
3524 | rtx_insn *prevfirst = PREV_INSN (first); |
3525 | rtx_insn *nextlast = NEXT_INSN (last); |
3526 | |
3527 | SET_PREV_INSN (first) = NULLnullptr; |
3528 | SET_NEXT_INSN (last) = NULLnullptr; |
3529 | if (prevfirst) |
3530 | SET_NEXT_INSN (prevfirst) = nextlast; |
3531 | if (nextlast) |
3532 | SET_PREV_INSN (nextlast) = prevfirst; |
3533 | else |
3534 | set_last_insn (prevfirst); |
3535 | if (!prevfirst) |
3536 | set_first_insn (nextlast); |
3537 | return first; |
3538 | } |
3539 | |
3540 | /* Skip over inter-block insns occurring after BB which are typically |
3541 | associated with BB (e.g., barriers). If there are any such insns, |
3542 | we return the last one. Otherwise, we return the end of BB. */ |
3543 | |
3544 | static rtx_insn * |
3545 | skip_insns_after_block (basic_block bb) |
3546 | { |
3547 | rtx_insn *insn, *last_insn, *next_head, *prev; |
3548 | |
3549 | next_head = NULLnullptr; |
3550 | if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
3551 | next_head = BB_HEAD (bb->next_bb)(bb->next_bb)->il.x.head_; |
3552 | |
3553 | for (last_insn = insn = BB_END (bb)(bb)->il.x.rtl->end_; (insn = NEXT_INSN (insn)) != 0; ) |
3554 | { |
3555 | if (insn == next_head) |
3556 | break; |
3557 | |
3558 | switch (GET_CODE (insn)((enum rtx_code) (insn)->code)) |
3559 | { |
3560 | case BARRIER: |
3561 | last_insn = insn; |
3562 | continue; |
3563 | |
3564 | case NOTE: |
3565 | gcc_assert (NOTE_KIND (insn) != NOTE_INSN_BLOCK_END)((void)(!((((insn)->u.fld[4]).rt_int) != NOTE_INSN_BLOCK_END ) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3565, __FUNCTION__), 0 : 0)); |
3566 | continue; |
3567 | |
3568 | case CODE_LABEL: |
3569 | if (NEXT_INSN (insn) |
3570 | && JUMP_TABLE_DATA_P (NEXT_INSN (insn))(((enum rtx_code) (NEXT_INSN (insn))->code) == JUMP_TABLE_DATA )) |
3571 | { |
3572 | insn = NEXT_INSN (insn); |
3573 | last_insn = insn; |
3574 | continue; |
3575 | } |
3576 | break; |
3577 | |
3578 | default: |
3579 | break; |
3580 | } |
3581 | |
3582 | break; |
3583 | } |
3584 | |
3585 | /* It is possible to hit contradictory sequence. For instance: |
3586 | |
3587 | jump_insn |
3588 | NOTE_INSN_BLOCK_BEG |
3589 | barrier |
3590 | |
3591 | Where barrier belongs to jump_insn, but the note does not. This can be |
3592 | created by removing the basic block originally following |
3593 | NOTE_INSN_BLOCK_BEG. In such case reorder the notes. */ |
3594 | |
3595 | for (insn = last_insn; insn != BB_END (bb)(bb)->il.x.rtl->end_; insn = prev) |
3596 | { |
3597 | prev = PREV_INSN (insn); |
3598 | if (NOTE_P (insn)(((enum rtx_code) (insn)->code) == NOTE)) |
3599 | switch (NOTE_KIND (insn)(((insn)->u.fld[4]).rt_int)) |
3600 | { |
3601 | case NOTE_INSN_BLOCK_END: |
3602 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3602, __FUNCTION__)); |
3603 | break; |
3604 | case NOTE_INSN_DELETED: |
3605 | case NOTE_INSN_DELETED_LABEL: |
3606 | case NOTE_INSN_DELETED_DEBUG_LABEL: |
3607 | continue; |
3608 | default: |
3609 | reorder_insns (insn, insn, last_insn); |
3610 | } |
3611 | } |
3612 | |
3613 | return last_insn; |
3614 | } |
3615 | |
3616 | /* Locate or create a label for a given basic block. */ |
3617 | |
3618 | static rtx_insn * |
3619 | label_for_bb (basic_block bb) |
3620 | { |
3621 | rtx_insn *label = BB_HEAD (bb)(bb)->il.x.head_; |
3622 | |
3623 | if (!LABEL_P (label)(((enum rtx_code) (label)->code) == CODE_LABEL)) |
3624 | { |
3625 | if (dump_file) |
3626 | fprintf (dump_file, "Emitting label for block %d\n", bb->index); |
3627 | |
3628 | label = block_label (bb); |
3629 | } |
3630 | |
3631 | return label; |
3632 | } |
3633 | |
3634 | /* Locate the effective beginning and end of the insn chain for each |
3635 | block, as defined by skip_insns_after_block above. */ |
3636 | |
3637 | static void |
3638 | record_effective_endpoints (void) |
3639 | { |
3640 | rtx_insn *next_insn; |
3641 | basic_block bb; |
3642 | rtx_insn *insn; |
3643 | |
3644 | for (insn = get_insns (); |
3645 | insn |
3646 | && NOTE_P (insn)(((enum rtx_code) (insn)->code) == NOTE) |
3647 | && NOTE_KIND (insn)(((insn)->u.fld[4]).rt_int) != NOTE_INSN_BASIC_BLOCK; |
3648 | insn = NEXT_INSN (insn)) |
3649 | continue; |
3650 | /* No basic blocks at all? */ |
3651 | gcc_assert (insn)((void)(!(insn) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3651, __FUNCTION__), 0 : 0)); |
3652 | |
3653 | if (PREV_INSN (insn)) |
3654 | cfg_layout_function_header = |
3655 | unlink_insn_chain (get_insns (), PREV_INSN (insn)); |
3656 | else |
3657 | cfg_layout_function_header = NULLnullptr; |
3658 | |
3659 | next_insn = get_insns (); |
3660 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
3661 | { |
3662 | rtx_insn *end; |
3663 | |
3664 | if (PREV_INSN (BB_HEAD (bb)(bb)->il.x.head_) && next_insn != BB_HEAD (bb)(bb)->il.x.head_) |
3665 | BB_HEADER (bb)(bb)->il.x.rtl->header_ = unlink_insn_chain (next_insn, |
3666 | PREV_INSN (BB_HEAD (bb)(bb)->il.x.head_)); |
3667 | end = skip_insns_after_block (bb); |
3668 | if (NEXT_INSN (BB_END (bb)(bb)->il.x.rtl->end_) && BB_END (bb)(bb)->il.x.rtl->end_ != end) |
3669 | BB_FOOTER (bb)(bb)->il.x.rtl->footer_ = unlink_insn_chain (NEXT_INSN (BB_END (bb)(bb)->il.x.rtl->end_), end); |
3670 | next_insn = NEXT_INSN (BB_END (bb)(bb)->il.x.rtl->end_); |
3671 | } |
3672 | |
3673 | cfg_layout_function_footer = next_insn; |
3674 | if (cfg_layout_function_footer) |
3675 | cfg_layout_function_footer = unlink_insn_chain (cfg_layout_function_footer, get_last_insn ()); |
3676 | } |
3677 | |
3678 | namespace { |
3679 | |
3680 | const pass_data pass_data_into_cfg_layout_mode = |
3681 | { |
3682 | RTL_PASS, /* type */ |
3683 | "into_cfglayout", /* name */ |
3684 | OPTGROUP_NONE, /* optinfo_flags */ |
3685 | TV_CFG, /* tv_id */ |
3686 | 0, /* properties_required */ |
3687 | PROP_cfglayout(1 << 9), /* properties_provided */ |
3688 | 0, /* properties_destroyed */ |
3689 | 0, /* todo_flags_start */ |
3690 | 0, /* todo_flags_finish */ |
3691 | }; |
3692 | |
3693 | class pass_into_cfg_layout_mode : public rtl_opt_pass |
3694 | { |
3695 | public: |
3696 | pass_into_cfg_layout_mode (gcc::context *ctxt) |
3697 | : rtl_opt_pass (pass_data_into_cfg_layout_mode, ctxt) |
3698 | {} |
3699 | |
3700 | /* opt_pass methods: */ |
3701 | unsigned int execute (function *) final override |
3702 | { |
3703 | cfg_layout_initialize (0); |
3704 | return 0; |
3705 | } |
3706 | |
3707 | }; // class pass_into_cfg_layout_mode |
3708 | |
3709 | } // anon namespace |
3710 | |
3711 | rtl_opt_pass * |
3712 | make_pass_into_cfg_layout_mode (gcc::context *ctxt) |
3713 | { |
3714 | return new pass_into_cfg_layout_mode (ctxt); |
3715 | } |
3716 | |
3717 | namespace { |
3718 | |
3719 | const pass_data pass_data_outof_cfg_layout_mode = |
3720 | { |
3721 | RTL_PASS, /* type */ |
3722 | "outof_cfglayout", /* name */ |
3723 | OPTGROUP_NONE, /* optinfo_flags */ |
3724 | TV_CFG, /* tv_id */ |
3725 | 0, /* properties_required */ |
3726 | 0, /* properties_provided */ |
3727 | PROP_cfglayout(1 << 9), /* properties_destroyed */ |
3728 | 0, /* todo_flags_start */ |
3729 | 0, /* todo_flags_finish */ |
3730 | }; |
3731 | |
3732 | class pass_outof_cfg_layout_mode : public rtl_opt_pass |
3733 | { |
3734 | public: |
3735 | pass_outof_cfg_layout_mode (gcc::context *ctxt) |
3736 | : rtl_opt_pass (pass_data_outof_cfg_layout_mode, ctxt) |
3737 | {} |
3738 | |
3739 | /* opt_pass methods: */ |
3740 | unsigned int execute (function *) final override; |
3741 | |
3742 | }; // class pass_outof_cfg_layout_mode |
3743 | |
3744 | unsigned int |
3745 | pass_outof_cfg_layout_mode::execute (function *fun) |
3746 | { |
3747 | basic_block bb; |
3748 | |
3749 | FOR_EACH_BB_FN (bb, fun)for (bb = (fun)->cfg->x_entry_block_ptr->next_bb; bb != (fun)->cfg->x_exit_block_ptr; bb = bb->next_bb) |
3750 | if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun)((fun)->cfg->x_exit_block_ptr)) |
3751 | bb->aux = bb->next_bb; |
3752 | |
3753 | cfg_layout_finalize (); |
3754 | |
3755 | return 0; |
3756 | } |
3757 | |
3758 | } // anon namespace |
3759 | |
3760 | rtl_opt_pass * |
3761 | make_pass_outof_cfg_layout_mode (gcc::context *ctxt) |
3762 | { |
3763 | return new pass_outof_cfg_layout_mode (ctxt); |
3764 | } |
3765 | |
3766 | |
3767 | /* Link the basic blocks in the correct order, compacting the basic |
3768 | block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this |
3769 | function also clears the basic block header and footer fields. |
3770 | |
3771 | This function is usually called after a pass (e.g. tracer) finishes |
3772 | some transformations while in cfglayout mode. The required sequence |
3773 | of the basic blocks is in a linked list along the bb->aux field. |
3774 | This functions re-links the basic block prev_bb and next_bb pointers |
3775 | accordingly, and it compacts and renumbers the blocks. |
3776 | |
3777 | FIXME: This currently works only for RTL, but the only RTL-specific |
3778 | bits are the STAY_IN_CFGLAYOUT_MODE bits. The tracer pass was moved |
3779 | to GIMPLE a long time ago, but it doesn't relink the basic block |
3780 | chain. It could do that (to give better initial RTL) if this function |
3781 | is made IR-agnostic (and moved to cfganal.cc or cfg.cc while at it). */ |
3782 | |
3783 | void |
3784 | relink_block_chain (bool stay_in_cfglayout_mode) |
3785 | { |
3786 | basic_block bb, prev_bb; |
3787 | int index; |
3788 | |
3789 | /* Maybe dump the re-ordered sequence. */ |
3790 | if (dump_file) |
3791 | { |
3792 | fprintf (dump_file, "Reordered sequence:\n"); |
3793 | for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)->next_bb, index = |
3794 | NUM_FIXED_BLOCKS(2); |
3795 | bb; |
3796 | bb = (basic_block) bb->aux, index++) |
3797 | { |
3798 | fprintf (dump_file, " %i ", index); |
3799 | if (get_bb_original (bb)) |
3800 | fprintf (dump_file, "duplicate of %i\n", |
3801 | get_bb_original (bb)->index); |
3802 | else if (forwarder_block_p (bb) |
3803 | && !LABEL_P (BB_HEAD (bb))(((enum rtx_code) ((bb)->il.x.head_)->code) == CODE_LABEL )) |
3804 | fprintf (dump_file, "compensation\n"); |
3805 | else |
3806 | fprintf (dump_file, "bb %i\n", bb->index); |
3807 | } |
3808 | } |
3809 | |
3810 | /* Now reorder the blocks. */ |
3811 | prev_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr); |
3812 | bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)->next_bb; |
3813 | for (; bb; prev_bb = bb, bb = (basic_block) bb->aux) |
3814 | { |
3815 | bb->prev_bb = prev_bb; |
3816 | prev_bb->next_bb = bb; |
3817 | } |
3818 | prev_bb->next_bb = EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr); |
3819 | EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)->prev_bb = prev_bb; |
3820 | |
3821 | /* Then, clean up the aux fields. */ |
3822 | FOR_ALL_BB_FN (bb, cfun)for (bb = (((cfun + 0))->cfg->x_entry_block_ptr); bb; bb = bb->next_bb) |
3823 | { |
3824 | bb->aux = NULLnullptr; |
3825 | if (!stay_in_cfglayout_mode) |
3826 | BB_HEADER (bb)(bb)->il.x.rtl->header_ = BB_FOOTER (bb)(bb)->il.x.rtl->footer_ = NULLnullptr; |
3827 | } |
3828 | |
3829 | /* Maybe reset the original copy tables, they are not valid anymore |
3830 | when we renumber the basic blocks in compact_blocks. If we are |
3831 | are going out of cfglayout mode, don't re-allocate the tables. */ |
3832 | if (original_copy_tables_initialized_p ()) |
3833 | free_original_copy_tables (); |
3834 | if (stay_in_cfglayout_mode) |
3835 | initialize_original_copy_tables (); |
3836 | |
3837 | /* Finally, put basic_block_info in the new order. */ |
3838 | compact_blocks (); |
3839 | } |
3840 | |
3841 | |
3842 | /* Given a reorder chain, rearrange the code to match. */ |
3843 | |
3844 | static void |
3845 | fixup_reorder_chain (void) |
3846 | { |
3847 | basic_block bb; |
3848 | rtx_insn *insn = NULLnullptr; |
3849 | |
3850 | if (cfg_layout_function_header) |
3851 | { |
3852 | set_first_insn (cfg_layout_function_header); |
3853 | insn = cfg_layout_function_header; |
3854 | while (NEXT_INSN (insn)) |
3855 | insn = NEXT_INSN (insn); |
3856 | } |
3857 | |
3858 | /* First do the bulk reordering -- rechain the blocks without regard to |
3859 | the needed changes to jumps and labels. */ |
3860 | |
3861 | for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)->next_bb; bb; bb = (basic_block) |
3862 | bb->aux) |
3863 | { |
3864 | if (BB_HEADER (bb)(bb)->il.x.rtl->header_) |
3865 | { |
3866 | if (insn) |
3867 | SET_NEXT_INSN (insn) = BB_HEADER (bb)(bb)->il.x.rtl->header_; |
3868 | else |
3869 | set_first_insn (BB_HEADER (bb)(bb)->il.x.rtl->header_); |
3870 | SET_PREV_INSN (BB_HEADER (bb)(bb)->il.x.rtl->header_) = insn; |
3871 | insn = BB_HEADER (bb)(bb)->il.x.rtl->header_; |
3872 | while (NEXT_INSN (insn)) |
3873 | insn = NEXT_INSN (insn); |
3874 | } |
3875 | if (insn) |
3876 | SET_NEXT_INSN (insn) = BB_HEAD (bb)(bb)->il.x.head_; |
3877 | else |
3878 | set_first_insn (BB_HEAD (bb)(bb)->il.x.head_); |
3879 | SET_PREV_INSN (BB_HEAD (bb)(bb)->il.x.head_) = insn; |
3880 | insn = BB_END (bb)(bb)->il.x.rtl->end_; |
3881 | if (BB_FOOTER (bb)(bb)->il.x.rtl->footer_) |
3882 | { |
3883 | SET_NEXT_INSN (insn) = BB_FOOTER (bb)(bb)->il.x.rtl->footer_; |
3884 | SET_PREV_INSN (BB_FOOTER (bb)(bb)->il.x.rtl->footer_) = insn; |
3885 | while (NEXT_INSN (insn)) |
3886 | insn = NEXT_INSN (insn); |
3887 | } |
3888 | } |
3889 | |
3890 | SET_NEXT_INSN (insn) = cfg_layout_function_footer; |
3891 | if (cfg_layout_function_footer) |
3892 | SET_PREV_INSN (cfg_layout_function_footer) = insn; |
3893 | |
3894 | while (NEXT_INSN (insn)) |
3895 | insn = NEXT_INSN (insn); |
3896 | |
3897 | set_last_insn (insn); |
3898 | if (flag_checkingglobal_options.x_flag_checking) |
3899 | verify_insn_chain (); |
3900 | |
3901 | /* Now add jumps and labels as needed to match the blocks new |
3902 | outgoing edges. */ |
3903 | |
3904 | bool remove_unreachable_blocks = false; |
3905 | for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)->next_bb; bb ; bb = (basic_block) |
3906 | bb->aux) |
3907 | { |
3908 | edge e_fall, e_taken, e; |
3909 | rtx_insn *bb_end_insn; |
3910 | rtx ret_label = NULL_RTX(rtx) 0; |
3911 | basic_block nb; |
3912 | edge_iterator ei; |
3913 | bool asm_goto = false; |
3914 | |
3915 | if (EDGE_COUNT (bb->succs)vec_safe_length (bb->succs) == 0) |
3916 | continue; |
3917 | |
3918 | /* Find the old fallthru edge, and another non-EH edge for |
3919 | a taken jump. */ |
3920 | e_taken = e_fall = NULLnullptr; |
3921 | |
3922 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
3923 | if (e->flags & EDGE_FALLTHRU) |
3924 | e_fall = e; |
3925 | else if (! (e->flags & EDGE_EH)) |
3926 | e_taken = e; |
3927 | |
3928 | bb_end_insn = BB_END (bb)(bb)->il.x.rtl->end_; |
3929 | if (rtx_jump_insn *bb_end_jump = dyn_cast <rtx_jump_insn *> (bb_end_insn)) |
3930 | { |
3931 | ret_label = JUMP_LABEL (bb_end_jump)(((bb_end_jump)->u.fld[7]).rt_rtx); |
3932 | if (any_condjump_p (bb_end_jump)) |
3933 | { |
3934 | /* This might happen if the conditional jump has side |
3935 | effects and could therefore not be optimized away. |
3936 | Make the basic block to end with a barrier in order |
3937 | to prevent rtl_verify_flow_info from complaining. */ |
3938 | if (!e_fall) |
3939 | { |
3940 | gcc_assert (!onlyjump_p (bb_end_jump)((void)(!(!onlyjump_p (bb_end_jump) || returnjump_p (bb_end_jump ) || (e_taken->flags & EDGE_CROSSING)) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3942, __FUNCTION__), 0 : 0)) |
3941 | || returnjump_p (bb_end_jump)((void)(!(!onlyjump_p (bb_end_jump) || returnjump_p (bb_end_jump ) || (e_taken->flags & EDGE_CROSSING)) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3942, __FUNCTION__), 0 : 0)) |
3942 | || (e_taken->flags & EDGE_CROSSING))((void)(!(!onlyjump_p (bb_end_jump) || returnjump_p (bb_end_jump ) || (e_taken->flags & EDGE_CROSSING)) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3942, __FUNCTION__), 0 : 0)); |
3943 | emit_barrier_after (bb_end_jump); |
3944 | continue; |
3945 | } |
3946 | |
3947 | /* If the old fallthru is still next, nothing to do. */ |
3948 | if (bb->aux == e_fall->dest |
3949 | || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
3950 | continue; |
3951 | |
3952 | /* The degenerated case of conditional jump jumping to the next |
3953 | instruction can happen for jumps with side effects. We need |
3954 | to construct a forwarder block and this will be done just |
3955 | fine by force_nonfallthru below. */ |
3956 | if (!e_taken) |
3957 | ; |
3958 | |
3959 | /* There is another special case: if *neither* block is next, |
3960 | such as happens at the very end of a function, then we'll |
3961 | need to add a new unconditional jump. Choose the taken |
3962 | edge based on known or assumed probability. */ |
3963 | else if (bb->aux != e_taken->dest) |
3964 | { |
3965 | rtx note = find_reg_note (bb_end_jump, REG_BR_PROB, 0); |
3966 | |
3967 | if (note |
3968 | && profile_probability::from_reg_br_prob_note |
3969 | (XINT (note, 0)(((note)->u.fld[0]).rt_int)) < profile_probability::even () |
3970 | && invert_jump (bb_end_jump, |
3971 | (e_fall->dest |
3972 | == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
3973 | ? NULL_RTX(rtx) 0 |
3974 | : label_for_bb (e_fall->dest)), 0)) |
3975 | { |
3976 | e_fall->flags &= ~EDGE_FALLTHRU; |
3977 | gcc_checking_assert (could_fall_through((void)(!(could_fall_through (e_taken->src, e_taken->dest )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3978, __FUNCTION__), 0 : 0)) |
3978 | (e_taken->src, e_taken->dest))((void)(!(could_fall_through (e_taken->src, e_taken->dest )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 3978, __FUNCTION__), 0 : 0)); |
3979 | e_taken->flags |= EDGE_FALLTHRU; |
3980 | update_br_prob_note (bb); |
3981 | e = e_fall, e_fall = e_taken, e_taken = e; |
Although the value stored to 'e_taken' is used in the enclosing expression, the value is never actually read from 'e_taken' | |
3982 | } |
3983 | } |
3984 | |
3985 | /* If the "jumping" edge is a crossing edge, and the fall |
3986 | through edge is non-crossing, leave things as they are. */ |
3987 | else if ((e_taken->flags & EDGE_CROSSING) |
3988 | && !(e_fall->flags & EDGE_CROSSING)) |
3989 | continue; |
3990 | |
3991 | /* Otherwise we can try to invert the jump. This will |
3992 | basically never fail, however, keep up the pretense. */ |
3993 | else if (invert_jump (bb_end_jump, |
3994 | (e_fall->dest |
3995 | == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
3996 | ? NULL_RTX(rtx) 0 |
3997 | : label_for_bb (e_fall->dest)), 0)) |
3998 | { |
3999 | e_fall->flags &= ~EDGE_FALLTHRU; |
4000 | gcc_checking_assert (could_fall_through((void)(!(could_fall_through (e_taken->src, e_taken->dest )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4001, __FUNCTION__), 0 : 0)) |
4001 | (e_taken->src, e_taken->dest))((void)(!(could_fall_through (e_taken->src, e_taken->dest )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4001, __FUNCTION__), 0 : 0)); |
4002 | e_taken->flags |= EDGE_FALLTHRU; |
4003 | update_br_prob_note (bb); |
4004 | if (LABEL_NUSES (ret_label)(((ret_label)->u.fld[4]).rt_int) == 0 |
4005 | && single_pred_p (e_taken->dest)) |
4006 | delete_insn (as_a<rtx_insn *> (ret_label)); |
4007 | continue; |
4008 | } |
4009 | } |
4010 | else if (extract_asm_operands (PATTERN (bb_end_insn)) != NULLnullptr) |
4011 | { |
4012 | /* If the old fallthru is still next or if |
4013 | asm goto doesn't have a fallthru (e.g. when followed by |
4014 | __builtin_unreachable ()), nothing to do. */ |
4015 | if (! e_fall |
4016 | || bb->aux == e_fall->dest |
4017 | || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
4018 | continue; |
4019 | |
4020 | /* Otherwise we'll have to use the fallthru fixup below. |
4021 | But avoid redirecting asm goto to EXIT. */ |
4022 | asm_goto = true; |
4023 | } |
4024 | else |
4025 | { |
4026 | /* Otherwise we have some return, switch or computed |
4027 | jump. In the 99% case, there should not have been a |
4028 | fallthru edge. */ |
4029 | gcc_assert (returnjump_p (bb_end_insn) || !e_fall)((void)(!(returnjump_p (bb_end_insn) || !e_fall) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4029, __FUNCTION__), 0 : 0)); |
4030 | continue; |
4031 | } |
4032 | } |
4033 | else |
4034 | { |
4035 | /* No fallthru implies a noreturn function with EH edges, or |
4036 | something similarly bizarre. In any case, we don't need to |
4037 | do anything. */ |
4038 | if (! e_fall) |
4039 | continue; |
4040 | |
4041 | /* If the fallthru block is still next, nothing to do. */ |
4042 | if (bb->aux == e_fall->dest) |
4043 | continue; |
4044 | |
4045 | /* A fallthru to exit block. */ |
4046 | if (e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
4047 | continue; |
4048 | } |
4049 | |
4050 | /* If E_FALL->dest is just a return block, then we can emit a |
4051 | return rather than a jump to the return block. */ |
4052 | rtx_insn *ret, *use; |
4053 | basic_block dest; |
4054 | if (!asm_goto |
4055 | && bb_is_just_return (e_fall->dest, &ret, &use) |
4056 | && ((PATTERN (ret) == simple_return_rtx && targetm.have_simple_return ()) |
4057 | || (PATTERN (ret) == ret_rtx && targetm.have_return ()))) |
4058 | { |
4059 | ret_label = PATTERN (ret); |
4060 | dest = EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr); |
4061 | |
4062 | e_fall->flags &= ~EDGE_CROSSING; |
4063 | /* E_FALL->dest might become unreachable as a result of |
4064 | replacing the jump with a return. So arrange to remove |
4065 | unreachable blocks. */ |
4066 | remove_unreachable_blocks = true; |
4067 | } |
4068 | else |
4069 | { |
4070 | dest = e_fall->dest; |
4071 | } |
4072 | |
4073 | /* We got here if we need to add a new jump insn. |
4074 | Note force_nonfallthru can delete E_FALL and thus we have to |
4075 | save E_FALL->src prior to the call to force_nonfallthru. */ |
4076 | nb = force_nonfallthru_and_redirect (e_fall, dest, ret_label); |
4077 | if (nb) |
4078 | { |
4079 | nb->aux = bb->aux; |
4080 | bb->aux = nb; |
4081 | /* Don't process this new block. */ |
4082 | bb = nb; |
4083 | } |
4084 | } |
4085 | |
4086 | relink_block_chain (/*stay_in_cfglayout_mode=*/false); |
4087 | |
4088 | /* Annoying special case - jump around dead jumptables left in the code. */ |
4089 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
4090 | { |
4091 | edge e = find_fallthru_edge (bb->succs); |
4092 | |
4093 | if (e && !can_fallthru (e->src, e->dest)) |
4094 | force_nonfallthru (e); |
4095 | } |
4096 | |
4097 | /* Ensure goto_locus from edges has some instructions with that locus in RTL |
4098 | when not optimizing. */ |
4099 | if (!optimizeglobal_options.x_optimize && !DECL_IGNORED_P (current_function_decl)((contains_struct_check ((current_function_decl), (TS_DECL_COMMON ), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4099, __FUNCTION__))->decl_common.ignored_flag)) |
4100 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
4101 | { |
4102 | edge e; |
4103 | edge_iterator ei; |
4104 | |
4105 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
4106 | if (LOCATION_LOCUS (e->goto_locus)((IS_ADHOC_LOC (e->goto_locus)) ? get_location_from_adhoc_loc (line_table, e->goto_locus) : (e->goto_locus)) != UNKNOWN_LOCATION((location_t) 0) |
4107 | && !(e->flags & EDGE_ABNORMAL)) |
4108 | { |
4109 | edge e2; |
4110 | edge_iterator ei2; |
4111 | basic_block dest, nb; |
4112 | rtx_insn *end; |
4113 | |
4114 | insn = BB_END (e->src)(e->src)->il.x.rtl->end_; |
4115 | end = PREV_INSN (BB_HEAD (e->src)(e->src)->il.x.head_); |
4116 | while (insn != end |
4117 | && (!NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || !INSN_HAS_LOCATION (insn))) |
4118 | insn = PREV_INSN (insn); |
4119 | if (insn != end |
4120 | && loc_equal (INSN_LOCATION (insn), e->goto_locus)) |
4121 | continue; |
4122 | if (simplejump_p (BB_END (e->src)(e->src)->il.x.rtl->end_) |
4123 | && !INSN_HAS_LOCATION (BB_END (e->src)(e->src)->il.x.rtl->end_)) |
4124 | { |
4125 | INSN_LOCATION (BB_END (e->src)(e->src)->il.x.rtl->end_) = e->goto_locus; |
4126 | continue; |
4127 | } |
4128 | dest = e->dest; |
4129 | if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
4130 | { |
4131 | /* Non-fallthru edges to the exit block cannot be split. */ |
4132 | if (!(e->flags & EDGE_FALLTHRU)) |
4133 | continue; |
4134 | } |
4135 | else |
4136 | { |
4137 | insn = BB_HEAD (dest)(dest)->il.x.head_; |
4138 | end = NEXT_INSN (BB_END (dest)(dest)->il.x.rtl->end_); |
4139 | while (insn != end && !NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN))) |
4140 | insn = NEXT_INSN (insn); |
4141 | if (insn != end && INSN_HAS_LOCATION (insn) |
4142 | && loc_equal (INSN_LOCATION (insn), e->goto_locus)) |
4143 | continue; |
4144 | } |
4145 | nb = split_edge (e); |
4146 | if (!INSN_P (BB_END (nb))(((((enum rtx_code) ((nb)->il.x.rtl->end_)->code) == INSN) || (((enum rtx_code) ((nb)->il.x.rtl->end_)-> code) == JUMP_INSN) || (((enum rtx_code) ((nb)->il.x.rtl-> end_)->code) == CALL_INSN)) || (((enum rtx_code) ((nb)-> il.x.rtl->end_)->code) == DEBUG_INSN))) |
4147 | BB_END (nb)(nb)->il.x.rtl->end_ = emit_insn_after_noloc (gen_nop (), BB_END (nb)(nb)->il.x.rtl->end_, |
4148 | nb); |
4149 | INSN_LOCATION (BB_END (nb)(nb)->il.x.rtl->end_) = e->goto_locus; |
4150 | |
4151 | /* If there are other incoming edges to the destination block |
4152 | with the same goto locus, redirect them to the new block as |
4153 | well, this can prevent other such blocks from being created |
4154 | in subsequent iterations of the loop. */ |
4155 | for (ei2 = ei_start (dest->preds)ei_start_1 (&(dest->preds)); (e2 = ei_safe_edge (ei2)); ) |
4156 | if (LOCATION_LOCUS (e2->goto_locus)((IS_ADHOC_LOC (e2->goto_locus)) ? get_location_from_adhoc_loc (line_table, e2->goto_locus) : (e2->goto_locus)) != UNKNOWN_LOCATION((location_t) 0) |
4157 | && !(e2->flags & (EDGE_ABNORMAL | EDGE_FALLTHRU)) |
4158 | && e->goto_locus == e2->goto_locus) |
4159 | redirect_edge_and_branch (e2, nb); |
4160 | else |
4161 | ei_next (&ei2); |
4162 | } |
4163 | } |
4164 | |
4165 | /* Replacing a jump with a return may have exposed an unreachable |
4166 | block. Conditionally remove them if such transformations were |
4167 | made. */ |
4168 | if (remove_unreachable_blocks) |
4169 | delete_unreachable_blocks (); |
4170 | } |
4171 | |
4172 | /* Perform sanity checks on the insn chain. |
4173 | 1. Check that next/prev pointers are consistent in both the forward and |
4174 | reverse direction. |
4175 | 2. Count insns in chain, going both directions, and check if equal. |
4176 | 3. Check that get_last_insn () returns the actual end of chain. */ |
4177 | |
4178 | DEBUG_FUNCTION__attribute__ ((__used__)) void |
4179 | verify_insn_chain (void) |
4180 | { |
4181 | rtx_insn *x, *prevx, *nextx; |
4182 | int insn_cnt1, insn_cnt2; |
4183 | |
4184 | for (prevx = NULLnullptr, insn_cnt1 = 1, x = get_insns (); |
4185 | x != 0; |
4186 | prevx = x, insn_cnt1++, x = NEXT_INSN (x)) |
4187 | gcc_assert (PREV_INSN (x) == prevx)((void)(!(PREV_INSN (x) == prevx) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4187, __FUNCTION__), 0 : 0)); |
4188 | |
4189 | gcc_assert (prevx == get_last_insn ())((void)(!(prevx == get_last_insn ()) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4189, __FUNCTION__), 0 : 0)); |
4190 | |
4191 | for (nextx = NULLnullptr, insn_cnt2 = 1, x = get_last_insn (); |
4192 | x != 0; |
4193 | nextx = x, insn_cnt2++, x = PREV_INSN (x)) |
4194 | gcc_assert (NEXT_INSN (x) == nextx)((void)(!(NEXT_INSN (x) == nextx) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4194, __FUNCTION__), 0 : 0)); |
4195 | |
4196 | gcc_assert (insn_cnt1 == insn_cnt2)((void)(!(insn_cnt1 == insn_cnt2) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4196, __FUNCTION__), 0 : 0)); |
4197 | } |
4198 | |
4199 | /* If we have assembler epilogues, the block falling through to exit must |
4200 | be the last one in the reordered chain when we reach final. Ensure |
4201 | that this condition is met. */ |
4202 | static void |
4203 | fixup_fallthru_exit_predecessor (void) |
4204 | { |
4205 | edge e; |
4206 | basic_block bb = NULLnullptr; |
4207 | |
4208 | /* This transformation is not valid before reload, because we might |
4209 | separate a call from the instruction that copies the return |
4210 | value. */ |
4211 | gcc_assert (reload_completed)((void)(!(reload_completed) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4211, __FUNCTION__), 0 : 0)); |
4212 | |
4213 | e = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)->preds); |
4214 | if (e) |
4215 | bb = e->src; |
4216 | |
4217 | if (bb && bb->aux) |
4218 | { |
4219 | basic_block c = ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)->next_bb; |
4220 | |
4221 | /* If the very first block is the one with the fall-through exit |
4222 | edge, we have to split that block. */ |
4223 | if (c == bb) |
4224 | { |
4225 | bb = split_block_after_labels (bb)->dest; |
4226 | bb->aux = c->aux; |
4227 | c->aux = bb; |
4228 | BB_FOOTER (bb)(bb)->il.x.rtl->footer_ = BB_FOOTER (c)(c)->il.x.rtl->footer_; |
4229 | BB_FOOTER (c)(c)->il.x.rtl->footer_ = NULLnullptr; |
4230 | } |
4231 | |
4232 | while (c->aux != bb) |
4233 | c = (basic_block) c->aux; |
4234 | |
4235 | c->aux = bb->aux; |
4236 | while (c->aux) |
4237 | c = (basic_block) c->aux; |
4238 | |
4239 | c->aux = bb; |
4240 | bb->aux = NULLnullptr; |
4241 | } |
4242 | } |
4243 | |
4244 | /* In case there are more than one fallthru predecessors of exit, force that |
4245 | there is only one. */ |
4246 | |
4247 | static void |
4248 | force_one_exit_fallthru (void) |
4249 | { |
4250 | edge e, predecessor = NULLnullptr; |
4251 | bool more = false; |
4252 | edge_iterator ei; |
4253 | basic_block forwarder, bb; |
4254 | |
4255 | FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)for ((ei) = ei_start_1 (&(((((cfun + 0))->cfg->x_exit_block_ptr )->preds))); ei_cond ((ei), &(e)); ei_next (&(ei)) ) |
4256 | if (e->flags & EDGE_FALLTHRU) |
4257 | { |
4258 | if (predecessor == NULLnullptr) |
4259 | predecessor = e; |
4260 | else |
4261 | { |
4262 | more = true; |
4263 | break; |
4264 | } |
4265 | } |
4266 | |
4267 | if (!more) |
4268 | return; |
4269 | |
4270 | /* Exit has several fallthru predecessors. Create a forwarder block for |
4271 | them. */ |
4272 | forwarder = split_edge (predecessor); |
4273 | for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)ei_start_1 (&((((cfun + 0))->cfg->x_exit_block_ptr) ->preds)); |
4274 | (e = ei_safe_edge (ei)); ) |
4275 | { |
4276 | if (e->src == forwarder |
4277 | || !(e->flags & EDGE_FALLTHRU)) |
4278 | ei_next (&ei); |
4279 | else |
4280 | redirect_edge_and_branch_force (e, forwarder); |
4281 | } |
4282 | |
4283 | /* Fix up the chain of blocks -- make FORWARDER immediately precede the |
4284 | exit block. */ |
4285 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
4286 | { |
4287 | if (bb->aux == NULLnullptr && bb != forwarder) |
4288 | { |
4289 | bb->aux = forwarder; |
4290 | break; |
4291 | } |
4292 | } |
4293 | } |
4294 | |
4295 | /* Return true in case it is possible to duplicate the basic block BB. */ |
4296 | |
4297 | static bool |
4298 | cfg_layout_can_duplicate_bb_p (const_basic_block bb) |
4299 | { |
4300 | /* Do not attempt to duplicate tablejumps, as we need to unshare |
4301 | the dispatch table. This is difficult to do, as the instructions |
4302 | computing jump destination may be hoisted outside the basic block. */ |
4303 | if (tablejump_p (BB_END (bb)(bb)->il.x.rtl->end_, NULLnullptr, NULLnullptr)) |
4304 | return false; |
4305 | |
4306 | /* Do not duplicate blocks containing insns that can't be copied. */ |
4307 | if (targetm.cannot_copy_insn_p) |
4308 | { |
4309 | rtx_insn *insn = BB_HEAD (bb)(bb)->il.x.head_; |
4310 | while (1) |
4311 | { |
4312 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN)) && targetm.cannot_copy_insn_p (insn)) |
4313 | return false; |
4314 | if (insn == BB_END (bb)(bb)->il.x.rtl->end_) |
4315 | break; |
4316 | insn = NEXT_INSN (insn); |
4317 | } |
4318 | } |
4319 | |
4320 | return true; |
4321 | } |
4322 | |
4323 | rtx_insn * |
4324 | duplicate_insn_chain (rtx_insn *from, rtx_insn *to, |
4325 | class loop *loop, copy_bb_data *id) |
4326 | { |
4327 | rtx_insn *insn, *next, *copy; |
4328 | rtx_note *last; |
4329 | |
4330 | /* Avoid updating of boundaries of previous basic block. The |
4331 | note will get removed from insn stream in fixup. */ |
4332 | last = emit_note (NOTE_INSN_DELETED); |
4333 | |
4334 | /* Create copy at the end of INSN chain. The chain will |
4335 | be reordered later. */ |
4336 | for (insn = from; insn != NEXT_INSN (to); insn = NEXT_INSN (insn)) |
4337 | { |
4338 | switch (GET_CODE (insn)((enum rtx_code) (insn)->code)) |
4339 | { |
4340 | case DEBUG_INSN: |
4341 | /* Don't duplicate label debug insns. */ |
4342 | if (DEBUG_BIND_INSN_P (insn)((((enum rtx_code) (insn)->code) == DEBUG_INSN) && (((enum rtx_code) (PATTERN (insn))->code) == VAR_LOCATION )) |
4343 | && TREE_CODE (INSN_VAR_LOCATION_DECL (insn))((enum tree_code) (((((((__extension__ ({ __typeof (PATTERN ( insn)) const _rtx = (PATTERN (insn)); if (((enum rtx_code) (_rtx )->code) != VAR_LOCATION) rtl_check_failed_flag ("INSN_VAR_LOCATION" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4343, __FUNCTION__); _rtx; }))))->u.fld[0]).rt_tree)))-> base.code) == LABEL_DECL) |
4344 | break; |
4345 | /* FALLTHRU */ |
4346 | case INSN: |
4347 | case CALL_INSN: |
4348 | case JUMP_INSN: |
4349 | copy = emit_copy_of_insn_after (insn, get_last_insn ()); |
4350 | if (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN) && JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx) != NULL_RTX(rtx) 0 |
4351 | && ANY_RETURN_P (JUMP_LABEL (insn))(((enum rtx_code) ((((insn)->u.fld[7]).rt_rtx))->code) == RETURN || ((enum rtx_code) ((((insn)->u.fld[7]).rt_rtx))-> code) == SIMPLE_RETURN)) |
4352 | JUMP_LABEL (copy)(((copy)->u.fld[7]).rt_rtx) = JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx); |
4353 | maybe_copy_prologue_epilogue_insn (insn, copy); |
4354 | /* If requested remap dependence info of cliques brought in |
4355 | via inlining. */ |
4356 | if (id) |
4357 | { |
4358 | subrtx_iterator::array_type array; |
4359 | FOR_EACH_SUBRTX (iter, array, PATTERN (insn), ALL)for (subrtx_iterator iter (array, PATTERN (insn), rtx_all_subrtx_bounds ); !iter.at_end (); iter.next ()) |
4360 | if (MEM_P (*iter)(((enum rtx_code) (*iter)->code) == MEM) && MEM_EXPR (*iter)(get_mem_attrs (*iter)->expr)) |
4361 | { |
4362 | tree op = MEM_EXPR (*iter)(get_mem_attrs (*iter)->expr); |
4363 | if (TREE_CODE (op)((enum tree_code) (op)->base.code) == WITH_SIZE_EXPR) |
4364 | op = TREE_OPERAND (op, 0)(*((const_cast<tree*> (tree_operand_check ((op), (0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4364, __FUNCTION__))))); |
4365 | while (handled_component_p (op)) |
4366 | op = TREE_OPERAND (op, 0)(*((const_cast<tree*> (tree_operand_check ((op), (0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4366, __FUNCTION__))))); |
4367 | if ((TREE_CODE (op)((enum tree_code) (op)->base.code) == MEM_REF |
4368 | || TREE_CODE (op)((enum tree_code) (op)->base.code) == TARGET_MEM_REF) |
4369 | && MR_DEPENDENCE_CLIQUE (op)((tree_check2 ((op), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4369, __FUNCTION__, (MEM_REF), (TARGET_MEM_REF)))->base. u.dependence_info.clique) > 1 |
4370 | && (!loop |
4371 | || (MR_DEPENDENCE_CLIQUE (op)((tree_check2 ((op), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4371, __FUNCTION__, (MEM_REF), (TARGET_MEM_REF)))->base. u.dependence_info.clique) |
4372 | != loop->owned_clique))) |
4373 | { |
4374 | if (!id->dependence_map) |
4375 | id->dependence_map = new hash_map<dependence_hash, |
4376 | unsigned short>; |
4377 | bool existed; |
4378 | unsigned short &newc = id->dependence_map->get_or_insert |
4379 | (MR_DEPENDENCE_CLIQUE (op)((tree_check2 ((op), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4379, __FUNCTION__, (MEM_REF), (TARGET_MEM_REF)))->base. u.dependence_info.clique), &existed); |
4380 | if (!existed) |
4381 | { |
4382 | gcc_assert((void)(!(((tree_check2 ((op), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4383, __FUNCTION__, (MEM_REF), (TARGET_MEM_REF)))->base. u.dependence_info.clique) <= (cfun + 0)->last_clique) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4383, __FUNCTION__), 0 : 0)) |
4383 | (MR_DEPENDENCE_CLIQUE (op) <= cfun->last_clique)((void)(!(((tree_check2 ((op), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4383, __FUNCTION__, (MEM_REF), (TARGET_MEM_REF)))->base. u.dependence_info.clique) <= (cfun + 0)->last_clique) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4383, __FUNCTION__), 0 : 0)); |
4384 | newc = ++cfun(cfun + 0)->last_clique; |
4385 | } |
4386 | /* We cannot adjust MR_DEPENDENCE_CLIQUE in-place |
4387 | since MEM_EXPR is shared so make a copy and |
4388 | walk to the subtree again. */ |
4389 | tree new_expr = unshare_expr (MEM_EXPR (*iter)(get_mem_attrs (*iter)->expr)); |
4390 | if (TREE_CODE (new_expr)((enum tree_code) (new_expr)->base.code) == WITH_SIZE_EXPR) |
4391 | new_expr = TREE_OPERAND (new_expr, 0)(*((const_cast<tree*> (tree_operand_check ((new_expr), ( 0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4391, __FUNCTION__))))); |
4392 | while (handled_component_p (new_expr)) |
4393 | new_expr = TREE_OPERAND (new_expr, 0)(*((const_cast<tree*> (tree_operand_check ((new_expr), ( 0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4393, __FUNCTION__))))); |
4394 | MR_DEPENDENCE_CLIQUE (new_expr)((tree_check2 ((new_expr), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4394, __FUNCTION__, (MEM_REF), (TARGET_MEM_REF)))->base. u.dependence_info.clique) = newc; |
4395 | set_mem_expr (const_cast <rtx> (*iter), new_expr); |
4396 | } |
4397 | } |
4398 | } |
4399 | break; |
4400 | |
4401 | case JUMP_TABLE_DATA: |
4402 | /* Avoid copying of dispatch tables. We never duplicate |
4403 | tablejumps, so this can hit only in case the table got |
4404 | moved far from original jump. |
4405 | Avoid copying following barrier as well if any |
4406 | (and debug insns in between). */ |
4407 | for (next = NEXT_INSN (insn); |
4408 | next != NEXT_INSN (to); |
4409 | next = NEXT_INSN (next)) |
4410 | if (!DEBUG_INSN_P (next)(((enum rtx_code) (next)->code) == DEBUG_INSN)) |
4411 | break; |
4412 | if (next != NEXT_INSN (to) && BARRIER_P (next)(((enum rtx_code) (next)->code) == BARRIER)) |
4413 | insn = next; |
4414 | break; |
4415 | |
4416 | case CODE_LABEL: |
4417 | break; |
4418 | |
4419 | case BARRIER: |
4420 | emit_barrier (); |
4421 | break; |
4422 | |
4423 | case NOTE: |
4424 | switch (NOTE_KIND (insn)(((insn)->u.fld[4]).rt_int)) |
4425 | { |
4426 | /* In case prologue is empty and function contain label |
4427 | in first BB, we may want to copy the block. */ |
4428 | case NOTE_INSN_PROLOGUE_END: |
4429 | |
4430 | case NOTE_INSN_DELETED: |
4431 | case NOTE_INSN_DELETED_LABEL: |
4432 | case NOTE_INSN_DELETED_DEBUG_LABEL: |
4433 | /* No problem to strip these. */ |
4434 | case NOTE_INSN_FUNCTION_BEG: |
4435 | /* There is always just single entry to function. */ |
4436 | case NOTE_INSN_BASIC_BLOCK: |
4437 | /* We should only switch text sections once. */ |
4438 | case NOTE_INSN_SWITCH_TEXT_SECTIONS: |
4439 | break; |
4440 | |
4441 | case NOTE_INSN_EPILOGUE_BEG: |
4442 | case NOTE_INSN_UPDATE_SJLJ_CONTEXT: |
4443 | emit_note_copy (as_a <rtx_note *> (insn)); |
4444 | break; |
4445 | |
4446 | default: |
4447 | /* All other notes should have already been eliminated. */ |
4448 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4448, __FUNCTION__)); |
4449 | } |
4450 | break; |
4451 | default: |
4452 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4452, __FUNCTION__)); |
4453 | } |
4454 | } |
4455 | insn = NEXT_INSN (last); |
4456 | delete_insn (last); |
4457 | return insn; |
4458 | } |
4459 | |
4460 | /* Create a duplicate of the basic block BB. */ |
4461 | |
4462 | static basic_block |
4463 | cfg_layout_duplicate_bb (basic_block bb, copy_bb_data *id) |
4464 | { |
4465 | rtx_insn *insn; |
4466 | basic_block new_bb; |
4467 | |
4468 | class loop *loop = (id && current_loops((cfun + 0)->x_current_loops)) ? bb->loop_father : NULLnullptr; |
4469 | |
4470 | insn = duplicate_insn_chain (BB_HEAD (bb)(bb)->il.x.head_, BB_END (bb)(bb)->il.x.rtl->end_, loop, id); |
4471 | new_bb = create_basic_block (insn, |
4472 | insn ? get_last_insn () : NULLnullptr, |
4473 | EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)->prev_bb); |
4474 | |
4475 | BB_COPY_PARTITION (new_bb, bb)do { basic_block bb_ = (new_bb); bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) | (((bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)))); } while (0); |
4476 | if (BB_HEADER (bb)(bb)->il.x.rtl->header_) |
4477 | { |
4478 | insn = BB_HEADER (bb)(bb)->il.x.rtl->header_; |
4479 | while (NEXT_INSN (insn)) |
4480 | insn = NEXT_INSN (insn); |
4481 | insn = duplicate_insn_chain (BB_HEADER (bb)(bb)->il.x.rtl->header_, insn, loop, id); |
4482 | if (insn) |
4483 | BB_HEADER (new_bb)(new_bb)->il.x.rtl->header_ = unlink_insn_chain (insn, get_last_insn ()); |
4484 | } |
4485 | |
4486 | if (BB_FOOTER (bb)(bb)->il.x.rtl->footer_) |
4487 | { |
4488 | insn = BB_FOOTER (bb)(bb)->il.x.rtl->footer_; |
4489 | while (NEXT_INSN (insn)) |
4490 | insn = NEXT_INSN (insn); |
4491 | insn = duplicate_insn_chain (BB_FOOTER (bb)(bb)->il.x.rtl->footer_, insn, loop, id); |
4492 | if (insn) |
4493 | BB_FOOTER (new_bb)(new_bb)->il.x.rtl->footer_ = unlink_insn_chain (insn, get_last_insn ()); |
4494 | } |
4495 | |
4496 | return new_bb; |
4497 | } |
4498 | |
4499 | |
4500 | /* Main entry point to this module - initialize the datastructures for |
4501 | CFG layout changes. It keeps LOOPS up-to-date if not null. |
4502 | |
4503 | FLAGS is a set of additional flags to pass to cleanup_cfg(). */ |
4504 | |
4505 | void |
4506 | cfg_layout_initialize (int flags) |
4507 | { |
4508 | rtx_insn_list *x; |
4509 | basic_block bb; |
4510 | |
4511 | /* Once bb partitioning is complete, cfg layout mode should not be |
4512 | re-entered. Entering cfg layout mode may require fixups. As an |
4513 | example, if edge forwarding performed when optimizing the cfg |
4514 | layout required moving a block from the hot to the cold |
4515 | section. This would create an illegal partitioning unless some |
4516 | manual fixup was performed. */ |
4517 | gcc_assert (!crtl->bb_reorder_complete || !crtl->has_bb_partition)((void)(!(!(&x_rtl)->bb_reorder_complete || !(&x_rtl )->has_bb_partition) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4517, __FUNCTION__), 0 : 0)); |
4518 | |
4519 | initialize_original_copy_tables (); |
4520 | |
4521 | cfg_layout_rtl_register_cfg_hooks (); |
4522 | |
4523 | record_effective_endpoints (); |
4524 | |
4525 | /* Make sure that the targets of non local gotos are marked. */ |
4526 | for (x = nonlocal_goto_handler_labels((&x_rtl)->x_nonlocal_goto_handler_labels); x; x = x->next ()) |
4527 | { |
4528 | bb = BLOCK_FOR_INSN (x->insn ()); |
4529 | bb->flags |= BB_NON_LOCAL_GOTO_TARGET; |
4530 | } |
4531 | |
4532 | cleanup_cfg (CLEANUP_CFGLAYOUT32 | flags); |
4533 | } |
4534 | |
4535 | /* Splits superblocks. */ |
4536 | void |
4537 | break_superblocks (void) |
4538 | { |
4539 | bool need = false; |
4540 | basic_block bb; |
4541 | |
4542 | auto_sbitmap superblocks (last_basic_block_for_fn (cfun)(((cfun + 0))->cfg->x_last_basic_block)); |
4543 | bitmap_clear (superblocks); |
4544 | |
4545 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
4546 | if (bb->flags & BB_SUPERBLOCK) |
4547 | { |
4548 | bb->flags &= ~BB_SUPERBLOCK; |
4549 | bitmap_set_bit (superblocks, bb->index); |
4550 | need = true; |
4551 | } |
4552 | |
4553 | if (need) |
4554 | { |
4555 | rebuild_jump_labels (get_insns ()); |
4556 | find_many_sub_basic_blocks (superblocks); |
4557 | } |
4558 | } |
4559 | |
4560 | /* Finalize the changes: reorder insn list according to the sequence specified |
4561 | by aux pointers, enter compensation code, rebuild scope forest. */ |
4562 | |
4563 | void |
4564 | cfg_layout_finalize (void) |
4565 | { |
4566 | free_dominance_info (CDI_DOMINATORS); |
4567 | force_one_exit_fallthru (); |
4568 | rtl_register_cfg_hooks (); |
4569 | if (reload_completed && !targetm.have_epilogue ()) |
4570 | fixup_fallthru_exit_predecessor (); |
4571 | fixup_reorder_chain (); |
4572 | |
4573 | rebuild_jump_labels (get_insns ()); |
4574 | delete_dead_jumptables (); |
4575 | |
4576 | if (flag_checkingglobal_options.x_flag_checking) |
4577 | verify_insn_chain (); |
4578 | checking_verify_flow_info (); |
4579 | } |
4580 | |
4581 | |
4582 | /* Same as split_block but update cfg_layout structures. */ |
4583 | |
4584 | static basic_block |
4585 | cfg_layout_split_block (basic_block bb, void *insnp) |
4586 | { |
4587 | rtx insn = (rtx) insnp; |
4588 | basic_block new_bb = rtl_split_block (bb, insn); |
4589 | |
4590 | BB_FOOTER (new_bb)(new_bb)->il.x.rtl->footer_ = BB_FOOTER (bb)(bb)->il.x.rtl->footer_; |
4591 | BB_FOOTER (bb)(bb)->il.x.rtl->footer_ = NULLnullptr; |
4592 | |
4593 | return new_bb; |
4594 | } |
4595 | |
4596 | /* Redirect Edge to DEST. */ |
4597 | static edge |
4598 | cfg_layout_redirect_edge_and_branch (edge e, basic_block dest) |
4599 | { |
4600 | basic_block src = e->src; |
4601 | edge ret; |
4602 | |
4603 | if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)) |
4604 | return NULLnullptr; |
4605 | |
4606 | if (e->dest == dest) |
4607 | return e; |
4608 | |
4609 | if (e->flags & EDGE_CROSSING |
4610 | && BB_PARTITION (e->src)((e->src)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION )) == BB_PARTITION (dest)((dest)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) |
4611 | && simplejump_p (BB_END (src)(src)->il.x.rtl->end_)) |
4612 | { |
4613 | if (dump_file) |
4614 | fprintf (dump_file, |
4615 | "Removing crossing jump while redirecting edge form %i to %i\n", |
4616 | e->src->index, dest->index); |
4617 | delete_insn (BB_END (src)(src)->il.x.rtl->end_); |
4618 | remove_barriers_from_footer (src); |
4619 | e->flags |= EDGE_FALLTHRU; |
4620 | } |
4621 | |
4622 | if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) |
4623 | && (ret = try_redirect_by_replacing_jump (e, dest, true))) |
4624 | { |
4625 | df_set_bb_dirty (src); |
4626 | return ret; |
4627 | } |
4628 | |
4629 | if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) |
4630 | && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX(EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE ))) |
4631 | { |
4632 | if (dump_file) |
4633 | fprintf (dump_file, "Redirecting entry edge from bb %i to %i\n", |
4634 | e->src->index, dest->index); |
4635 | |
4636 | df_set_bb_dirty (e->src); |
4637 | redirect_edge_succ (e, dest); |
4638 | return e; |
4639 | } |
4640 | |
4641 | /* Redirect_edge_and_branch may decide to turn branch into fallthru edge |
4642 | in the case the basic block appears to be in sequence. Avoid this |
4643 | transformation. */ |
4644 | |
4645 | if (e->flags & EDGE_FALLTHRU) |
4646 | { |
4647 | /* Redirect any branch edges unified with the fallthru one. */ |
4648 | if (JUMP_P (BB_END (src))(((enum rtx_code) ((src)->il.x.rtl->end_)->code) == JUMP_INSN ) |
4649 | && label_is_jump_target_p (BB_HEAD (e->dest)(e->dest)->il.x.head_, |
4650 | BB_END (src)(src)->il.x.rtl->end_)) |
4651 | { |
4652 | edge redirected; |
4653 | |
4654 | if (dump_file) |
4655 | fprintf (dump_file, "Fallthru edge unified with branch " |
4656 | "%i->%i redirected to %i\n", |
4657 | e->src->index, e->dest->index, dest->index); |
4658 | e->flags &= ~EDGE_FALLTHRU; |
4659 | redirected = redirect_branch_edge (e, dest); |
4660 | gcc_assert (redirected)((void)(!(redirected) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4660, __FUNCTION__), 0 : 0)); |
4661 | redirected->flags |= EDGE_FALLTHRU; |
4662 | df_set_bb_dirty (redirected->src); |
4663 | return redirected; |
4664 | } |
4665 | /* In case we are redirecting fallthru edge to the branch edge |
4666 | of conditional jump, remove it. */ |
4667 | if (EDGE_COUNT (src->succs)vec_safe_length (src->succs) == 2) |
4668 | { |
4669 | /* Find the edge that is different from E. */ |
4670 | edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)(*(src)->succs)[((*(src)->succs)[(0)] == e)]; |
4671 | |
4672 | if (s->dest == dest |
4673 | && any_condjump_p (BB_END (src)(src)->il.x.rtl->end_) |
4674 | && onlyjump_p (BB_END (src)(src)->il.x.rtl->end_)) |
4675 | delete_insn (BB_END (src)(src)->il.x.rtl->end_); |
4676 | } |
4677 | if (dump_file) |
4678 | fprintf (dump_file, "Redirecting fallthru edge %i->%i to %i\n", |
4679 | e->src->index, e->dest->index, dest->index); |
4680 | ret = redirect_edge_succ_nodup (e, dest); |
4681 | } |
4682 | else |
4683 | ret = redirect_branch_edge (e, dest); |
4684 | |
4685 | if (!ret) |
4686 | return NULLnullptr; |
4687 | |
4688 | fixup_partition_crossing (ret); |
4689 | /* We don't want simplejumps in the insn stream during cfglayout. */ |
4690 | gcc_assert (!simplejump_p (BB_END (src)) || CROSSING_JUMP_P (BB_END (src)))((void)(!(!simplejump_p ((src)->il.x.rtl->end_) || (__extension__ ({ __typeof (((src)->il.x.rtl->end_)) const _rtx = ((( src)->il.x.rtl->end_)); if (((enum rtx_code) (_rtx)-> code) != JUMP_INSN) rtl_check_failed_flag ("CROSSING_JUMP_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4690, __FUNCTION__); _rtx; })->jump)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4690, __FUNCTION__), 0 : 0)); |
4691 | |
4692 | df_set_bb_dirty (src); |
4693 | return ret; |
4694 | } |
4695 | |
4696 | /* Simple wrapper as we always can redirect fallthru edges. */ |
4697 | static basic_block |
4698 | cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest) |
4699 | { |
4700 | edge redirected = cfg_layout_redirect_edge_and_branch (e, dest); |
4701 | |
4702 | gcc_assert (redirected)((void)(!(redirected) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4702, __FUNCTION__), 0 : 0)); |
4703 | return NULLnullptr; |
4704 | } |
4705 | |
4706 | /* Same as delete_basic_block but update cfg_layout structures. */ |
4707 | |
4708 | static void |
4709 | cfg_layout_delete_block (basic_block bb) |
4710 | { |
4711 | rtx_insn *insn, *next, *prev = PREV_INSN (BB_HEAD (bb)(bb)->il.x.head_), *remaints; |
4712 | rtx_insn **to; |
4713 | |
4714 | if (BB_HEADER (bb)(bb)->il.x.rtl->header_) |
4715 | { |
4716 | next = BB_HEAD (bb)(bb)->il.x.head_; |
4717 | if (prev) |
4718 | SET_NEXT_INSN (prev) = BB_HEADER (bb)(bb)->il.x.rtl->header_; |
4719 | else |
4720 | set_first_insn (BB_HEADER (bb)(bb)->il.x.rtl->header_); |
4721 | SET_PREV_INSN (BB_HEADER (bb)(bb)->il.x.rtl->header_) = prev; |
4722 | insn = BB_HEADER (bb)(bb)->il.x.rtl->header_; |
4723 | while (NEXT_INSN (insn)) |
4724 | insn = NEXT_INSN (insn); |
4725 | SET_NEXT_INSN (insn) = next; |
4726 | SET_PREV_INSN (next) = insn; |
4727 | } |
4728 | next = NEXT_INSN (BB_END (bb)(bb)->il.x.rtl->end_); |
4729 | if (BB_FOOTER (bb)(bb)->il.x.rtl->footer_) |
4730 | { |
4731 | insn = BB_FOOTER (bb)(bb)->il.x.rtl->footer_; |
4732 | while (insn) |
4733 | { |
4734 | if (BARRIER_P (insn)(((enum rtx_code) (insn)->code) == BARRIER)) |
4735 | { |
4736 | if (PREV_INSN (insn)) |
4737 | SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn); |
4738 | else |
4739 | BB_FOOTER (bb)(bb)->il.x.rtl->footer_ = NEXT_INSN (insn); |
4740 | if (NEXT_INSN (insn)) |
4741 | SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn); |
4742 | } |
4743 | if (LABEL_P (insn)(((enum rtx_code) (insn)->code) == CODE_LABEL)) |
4744 | break; |
4745 | insn = NEXT_INSN (insn); |
4746 | } |
4747 | if (BB_FOOTER (bb)(bb)->il.x.rtl->footer_) |
4748 | { |
4749 | insn = BB_END (bb)(bb)->il.x.rtl->end_; |
4750 | SET_NEXT_INSN (insn) = BB_FOOTER (bb)(bb)->il.x.rtl->footer_; |
4751 | SET_PREV_INSN (BB_FOOTER (bb)(bb)->il.x.rtl->footer_) = insn; |
4752 | while (NEXT_INSN (insn)) |
4753 | insn = NEXT_INSN (insn); |
4754 | SET_NEXT_INSN (insn) = next; |
4755 | if (next) |
4756 | SET_PREV_INSN (next) = insn; |
4757 | else |
4758 | set_last_insn (insn); |
4759 | } |
4760 | } |
4761 | if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
4762 | to = &BB_HEADER (bb->next_bb)(bb->next_bb)->il.x.rtl->header_; |
4763 | else |
4764 | to = &cfg_layout_function_footer; |
4765 | |
4766 | rtl_delete_block (bb); |
4767 | |
4768 | if (prev) |
4769 | prev = NEXT_INSN (prev); |
4770 | else |
4771 | prev = get_insns (); |
4772 | if (next) |
4773 | next = PREV_INSN (next); |
4774 | else |
4775 | next = get_last_insn (); |
4776 | |
4777 | if (next && NEXT_INSN (next) != prev) |
4778 | { |
4779 | remaints = unlink_insn_chain (prev, next); |
4780 | insn = remaints; |
4781 | while (NEXT_INSN (insn)) |
4782 | insn = NEXT_INSN (insn); |
4783 | SET_NEXT_INSN (insn) = *to; |
4784 | if (*to) |
4785 | SET_PREV_INSN (*to) = insn; |
4786 | *to = remaints; |
4787 | } |
4788 | } |
4789 | |
4790 | /* Return true when blocks A and B can be safely merged. */ |
4791 | |
4792 | static bool |
4793 | cfg_layout_can_merge_blocks_p (basic_block a, basic_block b) |
4794 | { |
4795 | /* If we are partitioning hot/cold basic blocks, we don't want to |
4796 | mess up unconditional or indirect jumps that cross between hot |
4797 | and cold sections. |
4798 | |
4799 | Basic block partitioning may result in some jumps that appear to |
4800 | be optimizable (or blocks that appear to be mergeable), but which really |
4801 | must be left untouched (they are required to make it safely across |
4802 | partition boundaries). See the comments at the top of |
4803 | bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */ |
4804 | |
4805 | if (BB_PARTITION (a)((a)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) != BB_PARTITION (b)((b)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION))) |
4806 | return false; |
4807 | |
4808 | /* Protect the loop latches. */ |
4809 | if (current_loops((cfun + 0)->x_current_loops) && b->loop_father->latch == b) |
4810 | return false; |
4811 | |
4812 | /* If we would end up moving B's instructions, make sure it doesn't fall |
4813 | through into the exit block, since we cannot recover from a fallthrough |
4814 | edge into the exit block occurring in the middle of a function. */ |
4815 | if (NEXT_INSN (BB_END (a)(a)->il.x.rtl->end_) != BB_HEAD (b)(b)->il.x.head_) |
4816 | { |
4817 | edge e = find_fallthru_edge (b->succs); |
4818 | if (e && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
4819 | return false; |
4820 | } |
4821 | |
4822 | /* There must be exactly one edge in between the blocks. */ |
4823 | return (single_succ_p (a) |
4824 | && single_succ (a) == b |
4825 | && single_pred_p (b) == 1 |
4826 | && a != b |
4827 | /* Must be simple edge. */ |
4828 | && !(single_succ_edge (a)->flags & EDGE_COMPLEX(EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE )) |
4829 | && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) |
4830 | && b != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) |
4831 | /* If the jump insn has side effects, we can't kill the edge. |
4832 | When not optimizing, try_redirect_by_replacing_jump will |
4833 | not allow us to redirect an edge by replacing a table jump. */ |
4834 | && (!JUMP_P (BB_END (a))(((enum rtx_code) ((a)->il.x.rtl->end_)->code) == JUMP_INSN ) |
4835 | || ((!optimizeglobal_options.x_optimize || reload_completed) |
4836 | ? simplejump_p (BB_END (a)(a)->il.x.rtl->end_) : onlyjump_p (BB_END (a)(a)->il.x.rtl->end_)))); |
4837 | } |
4838 | |
4839 | /* Merge block A and B. The blocks must be mergeable. */ |
4840 | |
4841 | static void |
4842 | cfg_layout_merge_blocks (basic_block a, basic_block b) |
4843 | { |
4844 | /* If B is a forwarder block whose outgoing edge has no location, we'll |
4845 | propagate the locus of the edge between A and B onto it. */ |
4846 | const bool forward_edge_locus |
4847 | = (b->flags & BB_FORWARDER_BLOCK) != 0 |
4848 | && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus)((IS_ADHOC_LOC ((*(b)->succs)[(0)]->goto_locus)) ? get_location_from_adhoc_loc (line_table, (*(b)->succs)[(0)]->goto_locus) : ((*(b)-> succs)[(0)]->goto_locus)) == UNKNOWN_LOCATION((location_t) 0); |
4849 | rtx_insn *insn; |
4850 | |
4851 | gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b))((void)(!(cfg_layout_can_merge_blocks_p (a, b)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4851, __FUNCTION__), 0 : 0)); |
4852 | |
4853 | if (dump_file) |
4854 | fprintf (dump_file, "Merging block %d into block %d...\n", b->index, |
4855 | a->index); |
4856 | |
4857 | /* If there was a CODE_LABEL beginning B, delete it. */ |
4858 | if (LABEL_P (BB_HEAD (b))(((enum rtx_code) ((b)->il.x.head_)->code) == CODE_LABEL )) |
4859 | { |
4860 | delete_insn (BB_HEAD (b)(b)->il.x.head_); |
4861 | } |
4862 | |
4863 | /* We should have fallthru edge in a, or we can do dummy redirection to get |
4864 | it cleaned up. */ |
4865 | if (JUMP_P (BB_END (a))(((enum rtx_code) ((a)->il.x.rtl->end_)->code) == JUMP_INSN )) |
4866 | try_redirect_by_replacing_jump (EDGE_SUCC (a, 0)(*(a)->succs)[(0)], b, true); |
4867 | gcc_assert (!JUMP_P (BB_END (a)))((void)(!(!(((enum rtx_code) ((a)->il.x.rtl->end_)-> code) == JUMP_INSN)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4867, __FUNCTION__), 0 : 0)); |
4868 | |
4869 | /* If not optimizing, preserve the locus of the single edge between |
4870 | blocks A and B if necessary by emitting a nop. */ |
4871 | if (!optimizeglobal_options.x_optimize |
4872 | && !forward_edge_locus |
4873 | && !DECL_IGNORED_P (current_function_decl)((contains_struct_check ((current_function_decl), (TS_DECL_COMMON ), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4873, __FUNCTION__))->decl_common.ignored_flag)) |
4874 | emit_nop_for_unique_locus_between (a, b); |
4875 | |
4876 | /* Move things from b->footer after a->footer. */ |
4877 | if (BB_FOOTER (b)(b)->il.x.rtl->footer_) |
4878 | { |
4879 | if (!BB_FOOTER (a)(a)->il.x.rtl->footer_) |
4880 | BB_FOOTER (a)(a)->il.x.rtl->footer_ = BB_FOOTER (b)(b)->il.x.rtl->footer_; |
4881 | else |
4882 | { |
4883 | rtx_insn *last = BB_FOOTER (a)(a)->il.x.rtl->footer_; |
4884 | |
4885 | while (NEXT_INSN (last)) |
4886 | last = NEXT_INSN (last); |
4887 | SET_NEXT_INSN (last) = BB_FOOTER (b)(b)->il.x.rtl->footer_; |
4888 | SET_PREV_INSN (BB_FOOTER (b)(b)->il.x.rtl->footer_) = last; |
4889 | } |
4890 | BB_FOOTER (b)(b)->il.x.rtl->footer_ = NULLnullptr; |
4891 | } |
4892 | |
4893 | /* Move things from b->header before a->footer. |
4894 | Note that this may include dead tablejump data, but we don't clean |
4895 | those up until we go out of cfglayout mode. */ |
4896 | if (BB_HEADER (b)(b)->il.x.rtl->header_) |
4897 | { |
4898 | if (! BB_FOOTER (a)(a)->il.x.rtl->footer_) |
4899 | BB_FOOTER (a)(a)->il.x.rtl->footer_ = BB_HEADER (b)(b)->il.x.rtl->header_; |
4900 | else |
4901 | { |
4902 | rtx_insn *last = BB_HEADER (b)(b)->il.x.rtl->header_; |
4903 | |
4904 | while (NEXT_INSN (last)) |
4905 | last = NEXT_INSN (last); |
4906 | SET_NEXT_INSN (last) = BB_FOOTER (a)(a)->il.x.rtl->footer_; |
4907 | SET_PREV_INSN (BB_FOOTER (a)(a)->il.x.rtl->footer_) = last; |
4908 | BB_FOOTER (a)(a)->il.x.rtl->footer_ = BB_HEADER (b)(b)->il.x.rtl->header_; |
4909 | } |
4910 | BB_HEADER (b)(b)->il.x.rtl->header_ = NULLnullptr; |
4911 | } |
4912 | |
4913 | /* In the case basic blocks are not adjacent, move them around. */ |
4914 | if (NEXT_INSN (BB_END (a)(a)->il.x.rtl->end_) != BB_HEAD (b)(b)->il.x.head_) |
4915 | { |
4916 | insn = unlink_insn_chain (BB_HEAD (b)(b)->il.x.head_, BB_END (b)(b)->il.x.rtl->end_); |
4917 | |
4918 | emit_insn_after_noloc (insn, BB_END (a)(a)->il.x.rtl->end_, a); |
4919 | } |
4920 | /* Otherwise just re-associate the instructions. */ |
4921 | else |
4922 | { |
4923 | insn = BB_HEAD (b)(b)->il.x.head_; |
4924 | BB_END (a)(a)->il.x.rtl->end_ = BB_END (b)(b)->il.x.rtl->end_; |
4925 | } |
4926 | |
4927 | /* emit_insn_after_noloc doesn't call df_insn_change_bb. |
4928 | We need to explicitly call. */ |
4929 | update_bb_for_insn_chain (insn, BB_END (b)(b)->il.x.rtl->end_, a); |
4930 | |
4931 | /* Skip possible DELETED_LABEL insn. */ |
4932 | if (!NOTE_INSN_BASIC_BLOCK_P (insn)((((enum rtx_code) (insn)->code) == NOTE) && (((insn )->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) |
4933 | insn = NEXT_INSN (insn); |
4934 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn))((void)(!(((((enum rtx_code) (insn)->code) == NOTE) && (((insn)->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 4934, __FUNCTION__), 0 : 0)); |
4935 | BB_HEAD (b)(b)->il.x.head_ = BB_END (b)(b)->il.x.rtl->end_ = NULLnullptr; |
4936 | delete_insn (insn); |
4937 | |
4938 | df_bb_delete (b->index); |
4939 | |
4940 | if (forward_edge_locus) |
4941 | EDGE_SUCC (b, 0)(*(b)->succs)[(0)]->goto_locus = EDGE_SUCC (a, 0)(*(a)->succs)[(0)]->goto_locus; |
4942 | |
4943 | if (dump_file) |
4944 | fprintf (dump_file, "Merged blocks %d and %d.\n", a->index, b->index); |
4945 | } |
4946 | |
4947 | /* Split edge E. */ |
4948 | |
4949 | static basic_block |
4950 | cfg_layout_split_edge (edge e) |
4951 | { |
4952 | basic_block new_bb = |
4953 | create_basic_block (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) |
4954 | ? NEXT_INSN (BB_END (e->src)(e->src)->il.x.rtl->end_) : get_insns (), |
4955 | NULL_RTX(rtx) 0, e->src); |
4956 | |
4957 | if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
4958 | BB_COPY_PARTITION (new_bb, e->src)do { basic_block bb_ = (new_bb); bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) | (((e->src) ->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)))); } while (0); |
4959 | else |
4960 | BB_COPY_PARTITION (new_bb, e->dest)do { basic_block bb_ = (new_bb); bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) | (((e->dest )->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)))); } while (0); |
4961 | make_edge (new_bb, e->dest, EDGE_FALLTHRU); |
4962 | redirect_edge_and_branch_force (e, new_bb); |
4963 | |
4964 | return new_bb; |
4965 | } |
4966 | |
4967 | /* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */ |
4968 | |
4969 | static void |
4970 | rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED__attribute__ ((__unused__))) |
4971 | { |
4972 | } |
4973 | |
4974 | /* Return true if BB contains only labels or non-executable |
4975 | instructions. */ |
4976 | |
4977 | static bool |
4978 | rtl_block_empty_p (basic_block bb) |
4979 | { |
4980 | rtx_insn *insn; |
4981 | |
4982 | if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) |
4983 | || bb == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
4984 | return true; |
4985 | |
4986 | FOR_BB_INSNS (bb, insn)for ((insn) = (bb)->il.x.head_; (insn) && (insn) != NEXT_INSN ((bb)->il.x.rtl->end_); (insn) = NEXT_INSN ( insn)) |
4987 | if (NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) |
4988 | && (!any_uncondjump_p (insn) || !onlyjump_p (insn))) |
4989 | return false; |
4990 | |
4991 | return true; |
4992 | } |
4993 | |
4994 | /* Split a basic block if it ends with a conditional branch and if |
4995 | the other part of the block is not empty. */ |
4996 | |
4997 | static basic_block |
4998 | rtl_split_block_before_cond_jump (basic_block bb) |
4999 | { |
5000 | rtx_insn *insn; |
5001 | rtx_insn *split_point = NULLnullptr; |
5002 | rtx_insn *last = NULLnullptr; |
5003 | bool found_code = false; |
5004 | |
5005 | FOR_BB_INSNS (bb, insn)for ((insn) = (bb)->il.x.head_; (insn) && (insn) != NEXT_INSN ((bb)->il.x.rtl->end_); (insn) = NEXT_INSN ( insn)) |
5006 | { |
5007 | if (any_condjump_p (insn)) |
5008 | split_point = last; |
5009 | else if (NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN))) |
5010 | found_code = true; |
5011 | last = insn; |
5012 | } |
5013 | |
5014 | /* Did not find everything. */ |
5015 | if (found_code && split_point) |
5016 | return split_block (bb, split_point)->dest; |
5017 | else |
5018 | return NULLnullptr; |
5019 | } |
5020 | |
5021 | /* Return 1 if BB ends with a call, possibly followed by some |
5022 | instructions that must stay with the call, 0 otherwise. */ |
5023 | |
5024 | static bool |
5025 | rtl_block_ends_with_call_p (basic_block bb) |
5026 | { |
5027 | rtx_insn *insn = BB_END (bb)(bb)->il.x.rtl->end_; |
5028 | |
5029 | while (!CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN) |
5030 | && insn != BB_HEAD (bb)(bb)->il.x.head_ |
5031 | && (keep_with_call_p (insn) |
5032 | || NOTE_P (insn)(((enum rtx_code) (insn)->code) == NOTE) |
5033 | || DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
5034 | insn = PREV_INSN (insn); |
5035 | return (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)); |
5036 | } |
5037 | |
5038 | /* Return 1 if BB ends with a conditional branch, 0 otherwise. */ |
5039 | |
5040 | static bool |
5041 | rtl_block_ends_with_condjump_p (const_basic_block bb) |
5042 | { |
5043 | return any_condjump_p (BB_END (bb)(bb)->il.x.rtl->end_); |
5044 | } |
5045 | |
5046 | /* Return true if we need to add fake edge to exit. |
5047 | Helper function for rtl_flow_call_edges_add. */ |
5048 | |
5049 | static bool |
5050 | need_fake_edge_p (const rtx_insn *insn) |
5051 | { |
5052 | if (!INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
5053 | return false; |
5054 | |
5055 | if ((CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN) |
5056 | && !SIBLING_CALL_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("SIBLING_CALL_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 5056, __FUNCTION__); _rtx; })->jump) |
5057 | && !find_reg_note (insn, REG_NORETURN, NULLnullptr) |
5058 | && !(RTL_CONST_OR_PURE_CALL_P (insn)((__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("RTL_CONST_CALL_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 5058, __FUNCTION__); _rtx; })->unchanging) || (__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if (((enum rtx_code ) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag ("RTL_PURE_CALL_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 5058, __FUNCTION__); _rtx; })->return_val))))) |
5059 | return true; |
5060 | |
5061 | return ((GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == ASM_OPERANDS |
5062 | && MEM_VOLATILE_P (PATTERN (insn))(__extension__ ({ __typeof ((PATTERN (insn))) const _rtx = (( PATTERN (insn))); if (((enum rtx_code) (_rtx)->code) != MEM && ((enum rtx_code) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 5062, __FUNCTION__); _rtx; })->volatil)) |
5063 | || (GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == PARALLEL |
5064 | && asm_noperands (insn) != -1 |
5065 | && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0))(__extension__ ({ __typeof (((((((PATTERN (insn))->u.fld[0 ]).rt_rtvec))->elem[0]))) const _rtx = (((((((PATTERN (insn ))->u.fld[0]).rt_rtvec))->elem[0]))); if (((enum rtx_code ) (_rtx)->code) != MEM && ((enum rtx_code) (_rtx)-> code) != ASM_OPERANDS && ((enum rtx_code) (_rtx)-> code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P", _rtx , "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 5065, __FUNCTION__); _rtx; })->volatil)) |
5066 | || GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == ASM_INPUT); |
5067 | } |
5068 | |
5069 | /* Add fake edges to the function exit for any non constant and non noreturn |
5070 | calls, volatile inline assembly in the bitmap of blocks specified by |
5071 | BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks |
5072 | that were split. |
5073 | |
5074 | The goal is to expose cases in which entering a basic block does not imply |
5075 | that all subsequent instructions must be executed. */ |
5076 | |
5077 | static int |
5078 | rtl_flow_call_edges_add (sbitmap blocks) |
5079 | { |
5080 | int i; |
5081 | int blocks_split = 0; |
5082 | int last_bb = last_basic_block_for_fn (cfun)(((cfun + 0))->cfg->x_last_basic_block); |
5083 | bool check_last_block = false; |
5084 | |
5085 | if (n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks) == NUM_FIXED_BLOCKS(2)) |
5086 | return 0; |
5087 | |
5088 | if (! blocks) |
5089 | check_last_block = true; |
5090 | else |
5091 | check_last_block = bitmap_bit_p (blocks, |
5092 | EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)->prev_bb->index); |
5093 | |
5094 | /* In the last basic block, before epilogue generation, there will be |
5095 | a fallthru edge to EXIT. Special care is required if the last insn |
5096 | of the last basic block is a call because make_edge folds duplicate |
5097 | edges, which would result in the fallthru edge also being marked |
5098 | fake, which would result in the fallthru edge being removed by |
5099 | remove_fake_edges, which would result in an invalid CFG. |
5100 | |
5101 | Moreover, we can't elide the outgoing fake edge, since the block |
5102 | profiler needs to take this into account in order to solve the minimal |
5103 | spanning tree in the case that the call doesn't return. |
5104 | |
5105 | Handle this by adding a dummy instruction in a new last basic block. */ |
5106 | if (check_last_block) |
5107 | { |
5108 | basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)->prev_bb; |
5109 | rtx_insn *insn = BB_END (bb)(bb)->il.x.rtl->end_; |
5110 | |
5111 | /* Back up past insns that must be kept in the same block as a call. */ |
5112 | while (insn != BB_HEAD (bb)(bb)->il.x.head_ |
5113 | && keep_with_call_p (insn)) |
5114 | insn = PREV_INSN (insn); |
5115 | |
5116 | if (need_fake_edge_p (insn)) |
5117 | { |
5118 | edge e; |
5119 | |
5120 | e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)); |
5121 | if (e) |
5122 | { |
5123 | insert_insn_on_edge (gen_use (const0_rtx(const_int_rtx[64])), e); |
5124 | commit_edge_insertions (); |
5125 | } |
5126 | } |
5127 | } |
5128 | |
5129 | /* Now add fake edges to the function exit for any non constant |
5130 | calls since there is no way that we can determine if they will |
5131 | return or not... */ |
5132 | |
5133 | for (i = NUM_FIXED_BLOCKS(2); i < last_bb; i++) |
5134 | { |
5135 | basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i)((*(((cfun + 0))->cfg->x_basic_block_info))[(i)]); |
5136 | rtx_insn *insn; |
5137 | rtx_insn *prev_insn; |
5138 | |
5139 | if (!bb) |
5140 | continue; |
5141 | |
5142 | if (blocks && !bitmap_bit_p (blocks, i)) |
5143 | continue; |
5144 | |
5145 | for (insn = BB_END (bb)(bb)->il.x.rtl->end_; ; insn = prev_insn) |
5146 | { |
5147 | prev_insn = PREV_INSN (insn); |
5148 | if (need_fake_edge_p (insn)) |
5149 | { |
5150 | edge e; |
5151 | rtx_insn *split_at_insn = insn; |
5152 | |
5153 | /* Don't split the block between a call and an insn that should |
5154 | remain in the same block as the call. */ |
5155 | if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)) |
5156 | while (split_at_insn != BB_END (bb)(bb)->il.x.rtl->end_ |
5157 | && keep_with_call_p (NEXT_INSN (split_at_insn))) |
5158 | split_at_insn = NEXT_INSN (split_at_insn); |
5159 | |
5160 | /* The handling above of the final block before the epilogue |
5161 | should be enough to verify that there is no edge to the exit |
5162 | block in CFG already. Calling make_edge in such case would |
5163 | cause us to mark that edge as fake and remove it later. */ |
5164 | |
5165 | if (flag_checkingglobal_options.x_flag_checking && split_at_insn == BB_END (bb)(bb)->il.x.rtl->end_) |
5166 | { |
5167 | e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)); |
5168 | gcc_assert (e == NULL)((void)(!(e == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 5168, __FUNCTION__), 0 : 0)); |
5169 | } |
5170 | |
5171 | /* Note that the following may create a new basic block |
5172 | and renumber the existing basic blocks. */ |
5173 | if (split_at_insn != BB_END (bb)(bb)->il.x.rtl->end_) |
5174 | { |
5175 | e = split_block (bb, split_at_insn); |
5176 | if (e) |
5177 | blocks_split++; |
5178 | } |
5179 | |
5180 | edge ne = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr), EDGE_FAKE); |
5181 | ne->probability = profile_probability::guessed_never (); |
5182 | } |
5183 | |
5184 | if (insn == BB_HEAD (bb)(bb)->il.x.head_) |
5185 | break; |
5186 | } |
5187 | } |
5188 | |
5189 | if (blocks_split) |
5190 | verify_flow_info (); |
5191 | |
5192 | return blocks_split; |
5193 | } |
5194 | |
5195 | /* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is |
5196 | the conditional branch target, SECOND_HEAD should be the fall-thru |
5197 | there is no need to handle this here the loop versioning code handles |
5198 | this. the reason for SECON_HEAD is that it is needed for condition |
5199 | in trees, and this should be of the same type since it is a hook. */ |
5200 | static void |
5201 | rtl_lv_add_condition_to_bb (basic_block first_head , |
5202 | basic_block second_head ATTRIBUTE_UNUSED__attribute__ ((__unused__)), |
5203 | basic_block cond_bb, void *comp_rtx) |
5204 | { |
5205 | rtx_code_label *label; |
5206 | rtx_insn *seq, *jump; |
5207 | rtx op0 = XEXP ((rtx)comp_rtx, 0)((((rtx)comp_rtx)->u.fld[0]).rt_rtx); |
5208 | rtx op1 = XEXP ((rtx)comp_rtx, 1)((((rtx)comp_rtx)->u.fld[1]).rt_rtx); |
5209 | enum rtx_code comp = GET_CODE ((rtx)comp_rtx)((enum rtx_code) ((rtx)comp_rtx)->code); |
5210 | machine_mode mode; |
5211 | |
5212 | |
5213 | label = block_label (first_head); |
5214 | mode = GET_MODE (op0)((machine_mode) (op0)->mode); |
5215 | if (mode == VOIDmode((void) 0, E_VOIDmode)) |
5216 | mode = GET_MODE (op1)((machine_mode) (op1)->mode); |
5217 | |
5218 | start_sequence (); |
5219 | op0 = force_operand (op0, NULL_RTX(rtx) 0); |
5220 | op1 = force_operand (op1, NULL_RTX(rtx) 0); |
5221 | do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX(rtx) 0, NULLnullptr, label, |
5222 | profile_probability::uninitialized ()); |
5223 | jump = get_last_insn (); |
5224 | JUMP_LABEL (jump)(((jump)->u.fld[7]).rt_rtx) = label; |
5225 | LABEL_NUSES (label)(((label)->u.fld[4]).rt_int)++; |
5226 | seq = get_insns (); |
5227 | end_sequence (); |
5228 | |
5229 | /* Add the new cond, in the new head. */ |
5230 | emit_insn_after (seq, BB_END (cond_bb)(cond_bb)->il.x.rtl->end_); |
5231 | } |
5232 | |
5233 | |
5234 | /* Given a block B with unconditional branch at its end, get the |
5235 | store the return the branch edge and the fall-thru edge in |
5236 | BRANCH_EDGE and FALLTHRU_EDGE respectively. */ |
5237 | static void |
5238 | rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge, |
5239 | edge *fallthru_edge) |
5240 | { |
5241 | edge e = EDGE_SUCC (b, 0)(*(b)->succs)[(0)]; |
5242 | |
5243 | if (e->flags & EDGE_FALLTHRU) |
5244 | { |
5245 | *fallthru_edge = e; |
5246 | *branch_edge = EDGE_SUCC (b, 1)(*(b)->succs)[(1)]; |
5247 | } |
5248 | else |
5249 | { |
5250 | *branch_edge = e; |
5251 | *fallthru_edge = EDGE_SUCC (b, 1)(*(b)->succs)[(1)]; |
5252 | } |
5253 | } |
5254 | |
5255 | void |
5256 | init_rtl_bb_info (basic_block bb) |
5257 | { |
5258 | gcc_assert (!bb->il.x.rtl)((void)(!(!bb->il.x.rtl) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgrtl.cc" , 5258, __FUNCTION__), 0 : 0)); |
5259 | bb->il.x.head_ = NULLnullptr; |
5260 | bb->il.x.rtl = ggc_cleared_alloc<rtl_bb_info> (); |
5261 | } |
5262 | |
5263 | static bool |
5264 | rtl_bb_info_initialized_p (basic_block bb) |
5265 | { |
5266 | return bb->il.x.rtl; |
5267 | } |
5268 | |
5269 | /* Returns true if it is possible to remove edge E by redirecting |
5270 | it to the destination of the other edge from E->src. */ |
5271 | |
5272 | static bool |
5273 | rtl_can_remove_branch_p (const_edge e) |
5274 | { |
5275 | const_basic_block src = e->src; |
5276 | const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)(*(src)->succs)[((*(src)->succs)[(0)] == e)]->dest; |
5277 | const rtx_insn *insn = BB_END (src)(src)->il.x.rtl->end_; |
5278 | rtx set; |
5279 | |
5280 | /* The conditions are taken from try_redirect_by_replacing_jump. */ |
5281 | if (target == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
5282 | return false; |
5283 | |
5284 | if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)) |
5285 | return false; |
5286 | |
5287 | if (BB_PARTITION (src)((src)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) != BB_PARTITION (target)((target)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION ))) |
5288 | return false; |
5289 | |
5290 | if (!onlyjump_p (insn) |
5291 | || tablejump_p (insn, NULLnullptr, NULLnullptr)) |
5292 | return false; |
5293 | |
5294 | set = single_set (insn); |
5295 | if (!set || side_effects_p (set)) |
5296 | return false; |
5297 | |
5298 | return true; |
5299 | } |
5300 | |
5301 | static basic_block |
5302 | rtl_duplicate_bb (basic_block bb, copy_bb_data *id) |
5303 | { |
5304 | bb = cfg_layout_duplicate_bb (bb, id); |
5305 | bb->aux = NULLnullptr; |
5306 | return bb; |
5307 | } |
5308 | |
5309 | /* Do book-keeping of basic block BB for the profile consistency checker. |
5310 | Store the counting in RECORD. */ |
5311 | static void |
5312 | rtl_account_profile_record (basic_block bb, struct profile_record *record) |
5313 | { |
5314 | rtx_insn *insn; |
5315 | FOR_BB_INSNS (bb, insn)for ((insn) = (bb)->il.x.head_; (insn) && (insn) != NEXT_INSN ((bb)->il.x.rtl->end_); (insn) = NEXT_INSN ( insn)) |
5316 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
5317 | { |
5318 | record->size += insn_cost (insn, false); |
5319 | if (profile_info) |
5320 | { |
5321 | if (ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)->count.ipa ().initialized_p () |
5322 | && ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)->count.ipa ().nonzero_p () |
5323 | && bb->count.ipa ().initialized_p ()) |
5324 | record->time |
5325 | += insn_cost (insn, true) * bb->count.ipa ().to_gcov_type (); |
5326 | } |
5327 | else if (bb->count.initialized_p () |
5328 | && ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)->count.initialized_p ()) |
5329 | record->time |
5330 | += insn_cost (insn, true) |
5331 | * bb->count.to_sreal_scale |
5332 | (ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)->count).to_double (); |
5333 | else |
5334 | record->time += insn_cost (insn, true); |
5335 | } |
5336 | } |
5337 | |
5338 | /* Implementation of CFG manipulation for linearized RTL. */ |
5339 | struct cfg_hooks rtl_cfg_hooks = { |
5340 | "rtl", |
5341 | rtl_verify_flow_info, |
5342 | rtl_dump_bb, |
5343 | rtl_dump_bb_for_graph, |
5344 | rtl_create_basic_block, |
5345 | rtl_redirect_edge_and_branch, |
5346 | rtl_redirect_edge_and_branch_force, |
5347 | rtl_can_remove_branch_p, |
5348 | rtl_delete_block, |
5349 | rtl_split_block, |
5350 | rtl_move_block_after, |
5351 | rtl_can_merge_blocks, /* can_merge_blocks_p */ |
5352 | rtl_merge_blocks, |
5353 | rtl_predict_edge, |
5354 | rtl_predicted_by_p, |
5355 | cfg_layout_can_duplicate_bb_p, |
5356 | rtl_duplicate_bb, |
5357 | rtl_split_edge, |
5358 | rtl_make_forwarder_block, |
5359 | rtl_tidy_fallthru_edge, |
5360 | rtl_force_nonfallthru, |
5361 | rtl_block_ends_with_call_p, |
5362 | rtl_block_ends_with_condjump_p, |
5363 | rtl_flow_call_edges_add, |
5364 | NULLnullptr, /* execute_on_growing_pred */ |
5365 | NULLnullptr, /* execute_on_shrinking_pred */ |
5366 | NULLnullptr, /* duplicate loop for trees */ |
5367 | NULLnullptr, /* lv_add_condition_to_bb */ |
5368 | NULLnullptr, /* lv_adjust_loop_header_phi*/ |
5369 | NULLnullptr, /* extract_cond_bb_edges */ |
5370 | NULLnullptr, /* flush_pending_stmts */ |
5371 | rtl_block_empty_p, /* block_empty_p */ |
5372 | rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */ |
5373 | rtl_account_profile_record, |
5374 | }; |
5375 | |
5376 | /* Implementation of CFG manipulation for cfg layout RTL, where |
5377 | basic block connected via fallthru edges does not have to be adjacent. |
5378 | This representation will hopefully become the default one in future |
5379 | version of the compiler. */ |
5380 | |
5381 | struct cfg_hooks cfg_layout_rtl_cfg_hooks = { |
5382 | "cfglayout mode", |
5383 | rtl_verify_flow_info_1, |
5384 | rtl_dump_bb, |
5385 | rtl_dump_bb_for_graph, |
5386 | cfg_layout_create_basic_block, |
5387 | cfg_layout_redirect_edge_and_branch, |
5388 | cfg_layout_redirect_edge_and_branch_force, |
5389 | rtl_can_remove_branch_p, |
5390 | cfg_layout_delete_block, |
5391 | cfg_layout_split_block, |
5392 | rtl_move_block_after, |
5393 | cfg_layout_can_merge_blocks_p, |
5394 | cfg_layout_merge_blocks, |
5395 | rtl_predict_edge, |
5396 | rtl_predicted_by_p, |
5397 | cfg_layout_can_duplicate_bb_p, |
5398 | cfg_layout_duplicate_bb, |
5399 | cfg_layout_split_edge, |
5400 | rtl_make_forwarder_block, |
5401 | NULLnullptr, /* tidy_fallthru_edge */ |
5402 | rtl_force_nonfallthru, |
5403 | rtl_block_ends_with_call_p, |
5404 | rtl_block_ends_with_condjump_p, |
5405 | rtl_flow_call_edges_add, |
5406 | NULLnullptr, /* execute_on_growing_pred */ |
5407 | NULLnullptr, /* execute_on_shrinking_pred */ |
5408 | duplicate_loop_body_to_header_edge, /* duplicate loop for trees */ |
5409 | rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */ |
5410 | NULLnullptr, /* lv_adjust_loop_header_phi*/ |
5411 | rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */ |
5412 | NULLnullptr, /* flush_pending_stmts */ |
5413 | rtl_block_empty_p, /* block_empty_p */ |
5414 | rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */ |
5415 | rtl_account_profile_record, |
5416 | }; |
5417 | |
5418 | #include "gt-cfgrtl.h" |
5419 | |
5420 | #if __GNUC__4 >= 10 |
5421 | # pragma GCC diagnostic pop |
5422 | #endif |