File: | build/gcc/cfgloopmanip.cc |
Warning: | line 567, column 4 Called C++ object pointer is null |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* Loop manipulation code for GNU compiler. | |||
2 | Copyright (C) 2002-2023 Free Software Foundation, Inc. | |||
3 | ||||
4 | This file is part of GCC. | |||
5 | ||||
6 | GCC is free software; you can redistribute it and/or modify it under | |||
7 | the terms of the GNU General Public License as published by the Free | |||
8 | Software Foundation; either version 3, or (at your option) any later | |||
9 | version. | |||
10 | ||||
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |||
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |||
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |||
14 | for more details. | |||
15 | ||||
16 | You should have received a copy of the GNU General Public License | |||
17 | along with GCC; see the file COPYING3. If not see | |||
18 | <http://www.gnu.org/licenses/>. */ | |||
19 | ||||
20 | #include "config.h" | |||
21 | #include "system.h" | |||
22 | #include "coretypes.h" | |||
23 | #include "backend.h" | |||
24 | #include "rtl.h" | |||
25 | #include "tree.h" | |||
26 | #include "gimple.h" | |||
27 | #include "cfghooks.h" | |||
28 | #include "cfganal.h" | |||
29 | #include "cfgloop.h" | |||
30 | #include "gimple-iterator.h" | |||
31 | #include "gimplify-me.h" | |||
32 | #include "tree-ssa-loop-manip.h" | |||
33 | #include "dumpfile.h" | |||
34 | ||||
35 | static void copy_loops_to (class loop **, int, | |||
36 | class loop *); | |||
37 | static void loop_redirect_edge (edge, basic_block); | |||
38 | static void remove_bbs (basic_block *, int); | |||
39 | static bool rpe_enum_p (const_basic_block, const void *); | |||
40 | static int find_path (edge, basic_block **); | |||
41 | static void fix_loop_placements (class loop *, bool *); | |||
42 | static bool fix_bb_placement (basic_block); | |||
43 | static void fix_bb_placements (basic_block, bool *, bitmap); | |||
44 | ||||
45 | /* Checks whether basic block BB is dominated by DATA. */ | |||
46 | static bool | |||
47 | rpe_enum_p (const_basic_block bb, const void *data) | |||
48 | { | |||
49 | return dominated_by_p (CDI_DOMINATORS, bb, (const_basic_block) data); | |||
50 | } | |||
51 | ||||
52 | /* Remove basic blocks BBS. NBBS is the number of the basic blocks. */ | |||
53 | ||||
54 | static void | |||
55 | remove_bbs (basic_block *bbs, int nbbs) | |||
56 | { | |||
57 | int i; | |||
58 | ||||
59 | for (i = 0; i < nbbs; i++) | |||
60 | delete_basic_block (bbs[i]); | |||
61 | } | |||
62 | ||||
63 | /* Find path -- i.e. the basic blocks dominated by edge E and put them | |||
64 | into array BBS, that will be allocated large enough to contain them. | |||
65 | E->dest must have exactly one predecessor for this to work (it is | |||
66 | easy to achieve and we do not put it here because we do not want to | |||
67 | alter anything by this function). The number of basic blocks in the | |||
68 | path is returned. */ | |||
69 | static int | |||
70 | find_path (edge e, basic_block **bbs) | |||
71 | { | |||
72 | gcc_assert (EDGE_COUNT (e->dest->preds) <= 1)((void)(!(vec_safe_length (e->dest->preds) <= 1) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 72, __FUNCTION__), 0 : 0)); | |||
73 | ||||
74 | /* Find bbs in the path. */ | |||
75 | *bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun))((basic_block *) xmalloc (sizeof (basic_block) * ((((cfun + 0 ))->cfg->x_n_basic_blocks)))); | |||
76 | return dfs_enumerate_from (e->dest, 0, rpe_enum_p, *bbs, | |||
77 | n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks), e->dest); | |||
78 | } | |||
79 | ||||
80 | /* Fix placement of basic block BB inside loop hierarchy -- | |||
81 | Let L be a loop to that BB belongs. Then every successor of BB must either | |||
82 | 1) belong to some superloop of loop L, or | |||
83 | 2) be a header of loop K such that K->outer is superloop of L | |||
84 | Returns true if we had to move BB into other loop to enforce this condition, | |||
85 | false if the placement of BB was already correct (provided that placements | |||
86 | of its successors are correct). */ | |||
87 | static bool | |||
88 | fix_bb_placement (basic_block bb) | |||
89 | { | |||
90 | edge e; | |||
91 | edge_iterator ei; | |||
92 | class loop *loop = current_loops((cfun + 0)->x_current_loops)->tree_root, *act; | |||
93 | ||||
94 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) | |||
95 | { | |||
96 | if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) | |||
97 | continue; | |||
98 | ||||
99 | act = e->dest->loop_father; | |||
100 | if (act->header == e->dest) | |||
101 | act = loop_outer (act); | |||
102 | ||||
103 | if (flow_loop_nested_p (loop, act)) | |||
104 | loop = act; | |||
105 | } | |||
106 | ||||
107 | if (loop == bb->loop_father) | |||
108 | return false; | |||
109 | ||||
110 | remove_bb_from_loops (bb); | |||
111 | add_bb_to_loop (bb, loop); | |||
112 | ||||
113 | return true; | |||
114 | } | |||
115 | ||||
116 | /* Fix placement of LOOP inside loop tree, i.e. find the innermost superloop | |||
117 | of LOOP to that leads at least one exit edge of LOOP, and set it | |||
118 | as the immediate superloop of LOOP. Return true if the immediate superloop | |||
119 | of LOOP changed. | |||
120 | ||||
121 | IRRED_INVALIDATED is set to true if a change in the loop structures might | |||
122 | invalidate the information about irreducible regions. */ | |||
123 | ||||
124 | static bool | |||
125 | fix_loop_placement (class loop *loop, bool *irred_invalidated) | |||
126 | { | |||
127 | unsigned i; | |||
128 | edge e; | |||
129 | auto_vec<edge> exits = get_loop_exit_edges (loop); | |||
130 | class loop *father = current_loops((cfun + 0)->x_current_loops)->tree_root, *act; | |||
131 | bool ret = false; | |||
132 | ||||
133 | FOR_EACH_VEC_ELT (exits, i, e)for (i = 0; (exits).iterate ((i), &(e)); ++(i)) | |||
134 | { | |||
135 | act = find_common_loop (loop, e->dest->loop_father); | |||
136 | if (flow_loop_nested_p (father, act)) | |||
137 | father = act; | |||
138 | } | |||
139 | ||||
140 | if (father != loop_outer (loop)) | |||
141 | { | |||
142 | for (act = loop_outer (loop); act != father; act = loop_outer (act)) | |||
143 | act->num_nodes -= loop->num_nodes; | |||
144 | flow_loop_tree_node_remove (loop); | |||
145 | flow_loop_tree_node_add (father, loop); | |||
146 | ||||
147 | /* The exit edges of LOOP no longer exits its original immediate | |||
148 | superloops; remove them from the appropriate exit lists. */ | |||
149 | FOR_EACH_VEC_ELT (exits, i, e)for (i = 0; (exits).iterate ((i), &(e)); ++(i)) | |||
150 | { | |||
151 | /* We may need to recompute irreducible loops. */ | |||
152 | if (e->flags & EDGE_IRREDUCIBLE_LOOP) | |||
153 | *irred_invalidated = true; | |||
154 | rescan_loop_exit (e, false, false); | |||
155 | } | |||
156 | ||||
157 | ret = true; | |||
158 | } | |||
159 | ||||
160 | return ret; | |||
161 | } | |||
162 | ||||
163 | /* Fix placements of basic blocks inside loop hierarchy stored in loops; i.e. | |||
164 | enforce condition stated in description of fix_bb_placement. We | |||
165 | start from basic block FROM that had some of its successors removed, so that | |||
166 | his placement no longer has to be correct, and iteratively fix placement of | |||
167 | its predecessors that may change if placement of FROM changed. Also fix | |||
168 | placement of subloops of FROM->loop_father, that might also be altered due | |||
169 | to this change; the condition for them is similar, except that instead of | |||
170 | successors we consider edges coming out of the loops. | |||
171 | ||||
172 | If the changes may invalidate the information about irreducible regions, | |||
173 | IRRED_INVALIDATED is set to true. | |||
174 | ||||
175 | If LOOP_CLOSED_SSA_INVLIDATED is non-zero then all basic blocks with | |||
176 | changed loop_father are collected there. */ | |||
177 | ||||
178 | static void | |||
179 | fix_bb_placements (basic_block from, | |||
180 | bool *irred_invalidated, | |||
181 | bitmap loop_closed_ssa_invalidated) | |||
182 | { | |||
183 | basic_block *queue, *qtop, *qbeg, *qend; | |||
184 | class loop *base_loop, *target_loop; | |||
185 | edge e; | |||
186 | ||||
187 | /* We pass through blocks back-reachable from FROM, testing whether some | |||
188 | of their successors moved to outer loop. It may be necessary to | |||
189 | iterate several times, but it is finite, as we stop unless we move | |||
190 | the basic block up the loop structure. The whole story is a bit | |||
191 | more complicated due to presence of subloops, those are moved using | |||
192 | fix_loop_placement. */ | |||
193 | ||||
194 | base_loop = from->loop_father; | |||
195 | /* If we are already in the outermost loop, the basic blocks cannot be moved | |||
196 | outside of it. If FROM is the header of the base loop, it cannot be moved | |||
197 | outside of it, either. In both cases, we can end now. */ | |||
198 | if (base_loop == current_loops((cfun + 0)->x_current_loops)->tree_root | |||
199 | || from == base_loop->header) | |||
200 | return; | |||
201 | ||||
202 | auto_sbitmap in_queue (last_basic_block_for_fn (cfun)(((cfun + 0))->cfg->x_last_basic_block)); | |||
203 | bitmap_clear (in_queue); | |||
204 | bitmap_set_bit (in_queue, from->index); | |||
205 | /* Prevent us from going out of the base_loop. */ | |||
206 | bitmap_set_bit (in_queue, base_loop->header->index); | |||
207 | ||||
208 | queue = XNEWVEC (basic_block, base_loop->num_nodes + 1)((basic_block *) xmalloc (sizeof (basic_block) * (base_loop-> num_nodes + 1))); | |||
209 | qtop = queue + base_loop->num_nodes + 1; | |||
210 | qbeg = queue; | |||
211 | qend = queue + 1; | |||
212 | *qbeg = from; | |||
213 | ||||
214 | while (qbeg != qend) | |||
215 | { | |||
216 | edge_iterator ei; | |||
217 | from = *qbeg; | |||
218 | qbeg++; | |||
219 | if (qbeg == qtop) | |||
220 | qbeg = queue; | |||
221 | bitmap_clear_bit (in_queue, from->index); | |||
222 | ||||
223 | if (from->loop_father->header == from) | |||
224 | { | |||
225 | /* Subloop header, maybe move the loop upward. */ | |||
226 | if (!fix_loop_placement (from->loop_father, irred_invalidated)) | |||
227 | continue; | |||
228 | target_loop = loop_outer (from->loop_father); | |||
229 | if (loop_closed_ssa_invalidated) | |||
230 | { | |||
231 | basic_block *bbs = get_loop_body (from->loop_father); | |||
232 | for (unsigned i = 0; i < from->loop_father->num_nodes; ++i) | |||
233 | bitmap_set_bit (loop_closed_ssa_invalidated, bbs[i]->index); | |||
234 | free (bbs); | |||
235 | } | |||
236 | } | |||
237 | else | |||
238 | { | |||
239 | /* Ordinary basic block. */ | |||
240 | if (!fix_bb_placement (from)) | |||
241 | continue; | |||
242 | target_loop = from->loop_father; | |||
243 | if (loop_closed_ssa_invalidated) | |||
244 | bitmap_set_bit (loop_closed_ssa_invalidated, from->index); | |||
245 | } | |||
246 | ||||
247 | FOR_EACH_EDGE (e, ei, from->succs)for ((ei) = ei_start_1 (&((from->succs))); ei_cond ((ei ), &(e)); ei_next (&(ei))) | |||
248 | { | |||
249 | if (e->flags & EDGE_IRREDUCIBLE_LOOP) | |||
250 | *irred_invalidated = true; | |||
251 | } | |||
252 | ||||
253 | /* Something has changed, insert predecessors into queue. */ | |||
254 | FOR_EACH_EDGE (e, ei, from->preds)for ((ei) = ei_start_1 (&((from->preds))); ei_cond ((ei ), &(e)); ei_next (&(ei))) | |||
255 | { | |||
256 | basic_block pred = e->src; | |||
257 | class loop *nca; | |||
258 | ||||
259 | if (e->flags & EDGE_IRREDUCIBLE_LOOP) | |||
260 | *irred_invalidated = true; | |||
261 | ||||
262 | if (bitmap_bit_p (in_queue, pred->index)) | |||
263 | continue; | |||
264 | ||||
265 | /* If it is subloop, then it either was not moved, or | |||
266 | the path up the loop tree from base_loop do not contain | |||
267 | it. */ | |||
268 | nca = find_common_loop (pred->loop_father, base_loop); | |||
269 | if (pred->loop_father != base_loop | |||
270 | && (nca == base_loop | |||
271 | || nca != pred->loop_father)) | |||
272 | pred = pred->loop_father->header; | |||
273 | else if (!flow_loop_nested_p (target_loop, pred->loop_father)) | |||
274 | { | |||
275 | /* If PRED is already higher in the loop hierarchy than the | |||
276 | TARGET_LOOP to that we moved FROM, the change of the position | |||
277 | of FROM does not affect the position of PRED, so there is no | |||
278 | point in processing it. */ | |||
279 | continue; | |||
280 | } | |||
281 | ||||
282 | if (bitmap_bit_p (in_queue, pred->index)) | |||
283 | continue; | |||
284 | ||||
285 | /* Schedule the basic block. */ | |||
286 | *qend = pred; | |||
287 | qend++; | |||
288 | if (qend == qtop) | |||
289 | qend = queue; | |||
290 | bitmap_set_bit (in_queue, pred->index); | |||
291 | } | |||
292 | } | |||
293 | free (queue); | |||
294 | } | |||
295 | ||||
296 | /* Removes path beginning at edge E, i.e. remove basic blocks dominated by E | |||
297 | and update loop structures and dominators. Return true if we were able | |||
298 | to remove the path, false otherwise (and nothing is affected then). */ | |||
299 | bool | |||
300 | remove_path (edge e, bool *irred_invalidated, | |||
301 | bitmap loop_closed_ssa_invalidated) | |||
302 | { | |||
303 | edge ae; | |||
304 | basic_block *rem_bbs, *bord_bbs, from, bb; | |||
305 | vec<basic_block> dom_bbs; | |||
306 | int i, nrem, n_bord_bbs; | |||
307 | bool local_irred_invalidated = false; | |||
308 | edge_iterator ei; | |||
309 | class loop *l, *f; | |||
310 | ||||
311 | if (! irred_invalidated) | |||
312 | irred_invalidated = &local_irred_invalidated; | |||
313 | ||||
314 | if (!can_remove_branch_p (e)) | |||
315 | return false; | |||
316 | ||||
317 | /* Keep track of whether we need to update information about irreducible | |||
318 | regions. This is the case if the removed area is a part of the | |||
319 | irreducible region, or if the set of basic blocks that belong to a loop | |||
320 | that is inside an irreducible region is changed, or if such a loop is | |||
321 | removed. */ | |||
322 | if (e->flags & EDGE_IRREDUCIBLE_LOOP) | |||
323 | *irred_invalidated = true; | |||
324 | ||||
325 | /* We need to check whether basic blocks are dominated by the edge | |||
326 | e, but we only have basic block dominators. This is easy to | |||
327 | fix -- when e->dest has exactly one predecessor, this corresponds | |||
328 | to blocks dominated by e->dest, if not, split the edge. */ | |||
329 | if (!single_pred_p (e->dest)) | |||
330 | e = single_pred_edge (split_edge (e)); | |||
331 | ||||
332 | /* It may happen that by removing path we remove one or more loops | |||
333 | we belong to. In this case first unloop the loops, then proceed | |||
334 | normally. We may assume that e->dest is not a header of any loop, | |||
335 | as it now has exactly one predecessor. */ | |||
336 | for (l = e->src->loop_father; loop_outer (l); l = f) | |||
337 | { | |||
338 | f = loop_outer (l); | |||
339 | if (dominated_by_p (CDI_DOMINATORS, l->latch, e->dest)) | |||
340 | unloop (l, irred_invalidated, loop_closed_ssa_invalidated); | |||
341 | } | |||
342 | ||||
343 | /* Identify the path. */ | |||
344 | nrem = find_path (e, &rem_bbs); | |||
345 | ||||
346 | n_bord_bbs = 0; | |||
347 | bord_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun))((basic_block *) xmalloc (sizeof (basic_block) * ((((cfun + 0 ))->cfg->x_n_basic_blocks)))); | |||
348 | auto_sbitmap seen (last_basic_block_for_fn (cfun)(((cfun + 0))->cfg->x_last_basic_block)); | |||
349 | bitmap_clear (seen); | |||
350 | ||||
351 | /* Find "border" hexes -- i.e. those with predecessor in removed path. */ | |||
352 | for (i = 0; i < nrem; i++) | |||
353 | bitmap_set_bit (seen, rem_bbs[i]->index); | |||
354 | if (!*irred_invalidated) | |||
355 | FOR_EACH_EDGE (ae, ei, e->src->succs)for ((ei) = ei_start_1 (&((e->src->succs))); ei_cond ((ei), &(ae)); ei_next (&(ei))) | |||
356 | if (ae != e && ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) | |||
357 | && !bitmap_bit_p (seen, ae->dest->index) | |||
358 | && ae->flags & EDGE_IRREDUCIBLE_LOOP) | |||
359 | { | |||
360 | *irred_invalidated = true; | |||
361 | break; | |||
362 | } | |||
363 | ||||
364 | for (i = 0; i < nrem; i++) | |||
365 | { | |||
366 | FOR_EACH_EDGE (ae, ei, rem_bbs[i]->succs)for ((ei) = ei_start_1 (&((rem_bbs[i]->succs))); ei_cond ((ei), &(ae)); ei_next (&(ei))) | |||
367 | if (ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr) | |||
368 | && !bitmap_bit_p (seen, ae->dest->index)) | |||
369 | { | |||
370 | bitmap_set_bit (seen, ae->dest->index); | |||
371 | bord_bbs[n_bord_bbs++] = ae->dest; | |||
372 | ||||
373 | if (ae->flags & EDGE_IRREDUCIBLE_LOOP) | |||
374 | *irred_invalidated = true; | |||
375 | } | |||
376 | } | |||
377 | ||||
378 | /* Remove the path. */ | |||
379 | from = e->src; | |||
380 | remove_branch (e); | |||
381 | dom_bbs.create (0); | |||
382 | ||||
383 | /* Cancel loops contained in the path. */ | |||
384 | for (i = 0; i < nrem; i++) | |||
385 | if (rem_bbs[i]->loop_father->header == rem_bbs[i]) | |||
386 | cancel_loop_tree (rem_bbs[i]->loop_father); | |||
387 | ||||
388 | remove_bbs (rem_bbs, nrem); | |||
389 | free (rem_bbs); | |||
390 | ||||
391 | /* Find blocks whose dominators may be affected. */ | |||
392 | bitmap_clear (seen); | |||
393 | for (i = 0; i < n_bord_bbs; i++) | |||
394 | { | |||
395 | basic_block ldom; | |||
396 | ||||
397 | bb = get_immediate_dominator (CDI_DOMINATORS, bord_bbs[i]); | |||
398 | if (bitmap_bit_p (seen, bb->index)) | |||
399 | continue; | |||
400 | bitmap_set_bit (seen, bb->index); | |||
401 | ||||
402 | for (ldom = first_dom_son (CDI_DOMINATORS, bb); | |||
403 | ldom; | |||
404 | ldom = next_dom_son (CDI_DOMINATORS, ldom)) | |||
405 | if (!dominated_by_p (CDI_DOMINATORS, from, ldom)) | |||
406 | dom_bbs.safe_push (ldom); | |||
407 | } | |||
408 | ||||
409 | /* Recount dominators. */ | |||
410 | iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, true); | |||
411 | dom_bbs.release (); | |||
412 | free (bord_bbs); | |||
413 | ||||
414 | /* Fix placements of basic blocks inside loops and the placement of | |||
415 | loops in the loop tree. */ | |||
416 | fix_bb_placements (from, irred_invalidated, loop_closed_ssa_invalidated); | |||
417 | fix_loop_placements (from->loop_father, irred_invalidated); | |||
418 | ||||
419 | if (local_irred_invalidated | |||
420 | && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)) | |||
421 | mark_irreducible_loops (); | |||
422 | ||||
423 | return true; | |||
424 | } | |||
425 | ||||
426 | /* Creates place for a new LOOP in loops structure of FN. */ | |||
427 | ||||
428 | void | |||
429 | place_new_loop (struct function *fn, class loop *loop) | |||
430 | { | |||
431 | loop->num = number_of_loops (fn); | |||
432 | vec_safe_push (loops_for_fn (fn)->larray, loop); | |||
433 | } | |||
434 | ||||
435 | /* Given LOOP structure with filled header and latch, find the body of the | |||
436 | corresponding loop and add it to loops tree. Insert the LOOP as a son of | |||
437 | outer. */ | |||
438 | ||||
439 | void | |||
440 | add_loop (class loop *loop, class loop *outer) | |||
441 | { | |||
442 | basic_block *bbs; | |||
443 | int i, n; | |||
444 | class loop *subloop; | |||
445 | edge e; | |||
446 | edge_iterator ei; | |||
447 | ||||
448 | /* Add it to loop structure. */ | |||
449 | place_new_loop (cfun(cfun + 0), loop); | |||
450 | flow_loop_tree_node_add (outer, loop); | |||
451 | ||||
452 | /* Find its nodes. */ | |||
453 | bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun))((basic_block *) xmalloc (sizeof (basic_block) * ((((cfun + 0 ))->cfg->x_n_basic_blocks)))); | |||
454 | n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks)); | |||
455 | ||||
456 | for (i = 0; i < n; i++) | |||
457 | { | |||
458 | if (bbs[i]->loop_father == outer) | |||
459 | { | |||
460 | remove_bb_from_loops (bbs[i]); | |||
461 | add_bb_to_loop (bbs[i], loop); | |||
462 | continue; | |||
463 | } | |||
464 | ||||
465 | loop->num_nodes++; | |||
466 | ||||
467 | /* If we find a direct subloop of OUTER, move it to LOOP. */ | |||
468 | subloop = bbs[i]->loop_father; | |||
469 | if (loop_outer (subloop) == outer | |||
470 | && subloop->header == bbs[i]) | |||
471 | { | |||
472 | flow_loop_tree_node_remove (subloop); | |||
473 | flow_loop_tree_node_add (loop, subloop); | |||
474 | } | |||
475 | } | |||
476 | ||||
477 | /* Update the information about loop exit edges. */ | |||
478 | for (i = 0; i < n; i++) | |||
479 | { | |||
480 | FOR_EACH_EDGE (e, ei, bbs[i]->succs)for ((ei) = ei_start_1 (&((bbs[i]->succs))); ei_cond ( (ei), &(e)); ei_next (&(ei))) | |||
481 | { | |||
482 | rescan_loop_exit (e, false, false); | |||
483 | } | |||
484 | } | |||
485 | ||||
486 | free (bbs); | |||
487 | } | |||
488 | ||||
489 | /* Scale profile of loop by P. */ | |||
490 | ||||
491 | void | |||
492 | scale_loop_frequencies (class loop *loop, profile_probability p) | |||
493 | { | |||
494 | basic_block *bbs; | |||
495 | ||||
496 | bbs = get_loop_body (loop); | |||
497 | scale_bbs_frequencies (bbs, loop->num_nodes, p); | |||
498 | free (bbs); | |||
499 | } | |||
500 | ||||
501 | /* Scale profile in LOOP by P. | |||
502 | If ITERATION_BOUND is non-zero, scale even further if loop is predicted | |||
503 | to iterate too many times. | |||
504 | Before caling this function, preheader block profile should be already | |||
505 | scaled to final count. This is necessary because loop iterations are | |||
506 | determined by comparing header edge count to latch ege count and thus | |||
507 | they need to be scaled synchronously. */ | |||
508 | ||||
509 | void | |||
510 | scale_loop_profile (class loop *loop, profile_probability p, | |||
511 | gcov_type iteration_bound) | |||
512 | { | |||
513 | edge e, preheader_e; | |||
514 | edge_iterator ei; | |||
515 | ||||
516 | if (dump_file && (dump_flags & TDF_DETAILS)) | |||
| ||||
517 | { | |||
518 | fprintf (dump_file, ";; Scaling loop %i with scale ", | |||
519 | loop->num); | |||
520 | p.dump (dump_file); | |||
521 | fprintf (dump_file, " bounding iterations to %i\n", | |||
522 | (int)iteration_bound); | |||
523 | } | |||
524 | ||||
525 | /* Scale the probabilities. */ | |||
526 | scale_loop_frequencies (loop, p); | |||
527 | ||||
528 | if (iteration_bound == 0) | |||
529 | return; | |||
530 | ||||
531 | gcov_type iterations = expected_loop_iterations_unbounded (loop, NULLnullptr, true); | |||
532 | ||||
533 | if (dump_file && (dump_flags & TDF_DETAILS)) | |||
534 | { | |||
535 | fprintf (dump_file, ";; guessed iterations after scaling %i\n", | |||
536 | (int)iterations); | |||
537 | } | |||
538 | ||||
539 | /* See if loop is predicted to iterate too many times. */ | |||
540 | if (iterations <= iteration_bound) | |||
541 | return; | |||
542 | ||||
543 | preheader_e = loop_preheader_edge (loop); | |||
544 | ||||
545 | /* We could handle also loops without preheaders, but bounding is | |||
546 | currently used only by optimizers that have preheaders constructed. */ | |||
547 | gcc_checking_assert (preheader_e)((void)(!(preheader_e) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 547, __FUNCTION__), 0 : 0)); | |||
548 | profile_count count_in = preheader_e->count (); | |||
549 | ||||
550 | if (count_in > profile_count::zero () | |||
551 | && loop->header->count.initialized_p ()) | |||
552 | { | |||
553 | profile_count count_delta = profile_count::zero (); | |||
554 | ||||
555 | e = single_exit (loop); | |||
556 | if (e) | |||
557 | { | |||
558 | edge other_e; | |||
559 | FOR_EACH_EDGE (other_e, ei, e->src->succs)for ((ei) = ei_start_1 (&((e->src->succs))); ei_cond ((ei), &(other_e)); ei_next (&(ei))) | |||
560 | if (!(other_e->flags & (EDGE_ABNORMAL | EDGE_FAKE)) | |||
561 | && e != other_e) | |||
562 | break; | |||
563 | ||||
564 | /* Probability of exit must be 1/iterations. */ | |||
565 | count_delta = e->count (); | |||
566 | e->probability = profile_probability::always () / iteration_bound; | |||
567 | other_e->probability = e->probability.invert (); | |||
| ||||
568 | ||||
569 | /* In code below we only handle the following two updates. */ | |||
570 | if (other_e->dest != loop->header | |||
571 | && other_e->dest != loop->latch | |||
572 | && (dump_file && (dump_flags & TDF_DETAILS))) | |||
573 | { | |||
574 | fprintf (dump_file, ";; giving up on update of paths from " | |||
575 | "exit condition to latch\n"); | |||
576 | } | |||
577 | } | |||
578 | else | |||
579 | if (dump_file && (dump_flags & TDF_DETAILS)) | |||
580 | fprintf (dump_file, ";; Loop has multiple exit edges; " | |||
581 | "giving up on exit condition update\n"); | |||
582 | ||||
583 | /* Roughly speaking we want to reduce the loop body profile by the | |||
584 | difference of loop iterations. We however can do better if | |||
585 | we look at the actual profile, if it is available. */ | |||
586 | p = profile_probability::always (); | |||
587 | ||||
588 | count_in *= iteration_bound; | |||
589 | p = count_in.probability_in (loop->header->count); | |||
590 | if (!(p > profile_probability::never ())) | |||
591 | p = profile_probability::very_unlikely (); | |||
592 | ||||
593 | if (p == profile_probability::always () | |||
594 | || !p.initialized_p ()) | |||
595 | return; | |||
596 | ||||
597 | /* If latch exists, change its count, since we changed | |||
598 | probability of exit. Theoretically we should update everything from | |||
599 | source of exit edge to latch, but for vectorizer this is enough. */ | |||
600 | if (loop->latch && loop->latch != e->src) | |||
601 | loop->latch->count += count_delta; | |||
602 | ||||
603 | /* Scale the probabilities. */ | |||
604 | scale_loop_frequencies (loop, p); | |||
605 | ||||
606 | /* Change latch's count back. */ | |||
607 | if (loop->latch && loop->latch != e->src) | |||
608 | loop->latch->count -= count_delta; | |||
609 | ||||
610 | if (dump_file && (dump_flags & TDF_DETAILS)) | |||
611 | fprintf (dump_file, ";; guessed iterations are now %i\n", | |||
612 | (int)expected_loop_iterations_unbounded (loop, NULLnullptr, true)); | |||
613 | } | |||
614 | } | |||
615 | ||||
616 | /* Recompute dominance information for basic blocks outside LOOP. */ | |||
617 | ||||
618 | static void | |||
619 | update_dominators_in_loop (class loop *loop) | |||
620 | { | |||
621 | vec<basic_block> dom_bbs = vNULL; | |||
622 | basic_block *body; | |||
623 | unsigned i; | |||
624 | ||||
625 | auto_sbitmap seen (last_basic_block_for_fn (cfun)(((cfun + 0))->cfg->x_last_basic_block)); | |||
626 | bitmap_clear (seen); | |||
627 | body = get_loop_body (loop); | |||
628 | ||||
629 | for (i = 0; i < loop->num_nodes; i++) | |||
630 | bitmap_set_bit (seen, body[i]->index); | |||
631 | ||||
632 | for (i = 0; i < loop->num_nodes; i++) | |||
633 | { | |||
634 | basic_block ldom; | |||
635 | ||||
636 | for (ldom = first_dom_son (CDI_DOMINATORS, body[i]); | |||
637 | ldom; | |||
638 | ldom = next_dom_son (CDI_DOMINATORS, ldom)) | |||
639 | if (!bitmap_bit_p (seen, ldom->index)) | |||
640 | { | |||
641 | bitmap_set_bit (seen, ldom->index); | |||
642 | dom_bbs.safe_push (ldom); | |||
643 | } | |||
644 | } | |||
645 | ||||
646 | iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false); | |||
647 | free (body); | |||
648 | dom_bbs.release (); | |||
649 | } | |||
650 | ||||
651 | /* Creates an if region as shown above. CONDITION is used to create | |||
652 | the test for the if. | |||
653 | ||||
654 | | | |||
655 | | ------------- ------------- | |||
656 | | | pred_bb | | pred_bb | | |||
657 | | ------------- ------------- | |||
658 | | | | | |||
659 | | | | ENTRY_EDGE | |||
660 | | | ENTRY_EDGE V | |||
661 | | | ====> ------------- | |||
662 | | | | cond_bb | | |||
663 | | | | CONDITION | | |||
664 | | | ------------- | |||
665 | | V / \ | |||
666 | | ------------- e_false / \ e_true | |||
667 | | | succ_bb | V V | |||
668 | | ------------- ----------- ----------- | |||
669 | | | false_bb | | true_bb | | |||
670 | | ----------- ----------- | |||
671 | | \ / | |||
672 | | \ / | |||
673 | | V V | |||
674 | | ------------- | |||
675 | | | join_bb | | |||
676 | | ------------- | |||
677 | | | exit_edge (result) | |||
678 | | V | |||
679 | | ----------- | |||
680 | | | succ_bb | | |||
681 | | ----------- | |||
682 | | | |||
683 | */ | |||
684 | ||||
685 | edge | |||
686 | create_empty_if_region_on_edge (edge entry_edge, tree condition) | |||
687 | { | |||
688 | ||||
689 | basic_block cond_bb, true_bb, false_bb, join_bb; | |||
690 | edge e_true, e_false, exit_edge; | |||
691 | gcond *cond_stmt; | |||
692 | tree simple_cond; | |||
693 | gimple_stmt_iterator gsi; | |||
694 | ||||
695 | cond_bb = split_edge (entry_edge); | |||
696 | ||||
697 | /* Insert condition in cond_bb. */ | |||
698 | gsi = gsi_last_bb (cond_bb); | |||
699 | simple_cond = | |||
700 | force_gimple_operand_gsi (&gsi, condition, true, NULLnullptr, | |||
701 | false, GSI_NEW_STMT); | |||
702 | cond_stmt = gimple_build_cond_from_tree (simple_cond, NULL_TREE(tree) nullptr, NULL_TREE(tree) nullptr); | |||
703 | gsi = gsi_last_bb (cond_bb); | |||
704 | gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT); | |||
705 | ||||
706 | join_bb = split_edge (single_succ_edge (cond_bb)); | |||
707 | ||||
708 | e_true = single_succ_edge (cond_bb); | |||
709 | true_bb = split_edge (e_true); | |||
710 | ||||
711 | e_false = make_edge (cond_bb, join_bb, 0); | |||
712 | false_bb = split_edge (e_false); | |||
713 | ||||
714 | e_true->flags &= ~EDGE_FALLTHRU; | |||
715 | e_true->flags |= EDGE_TRUE_VALUE; | |||
716 | e_false->flags &= ~EDGE_FALLTHRU; | |||
717 | e_false->flags |= EDGE_FALSE_VALUE; | |||
718 | ||||
719 | set_immediate_dominator (CDI_DOMINATORS, cond_bb, entry_edge->src); | |||
720 | set_immediate_dominator (CDI_DOMINATORS, true_bb, cond_bb); | |||
721 | set_immediate_dominator (CDI_DOMINATORS, false_bb, cond_bb); | |||
722 | set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb); | |||
723 | ||||
724 | exit_edge = single_succ_edge (join_bb); | |||
725 | ||||
726 | if (single_pred_p (exit_edge->dest)) | |||
727 | set_immediate_dominator (CDI_DOMINATORS, exit_edge->dest, join_bb); | |||
728 | ||||
729 | return exit_edge; | |||
730 | } | |||
731 | ||||
732 | /* create_empty_loop_on_edge | |||
733 | | | |||
734 | | - pred_bb - ------ pred_bb ------ | |||
735 | | | | | iv0 = initial_value | | |||
736 | | -----|----- ---------|----------- | |||
737 | | | ______ | entry_edge | |||
738 | | | entry_edge / | | | |||
739 | | | ====> | -V---V- loop_header ------------- | |||
740 | | V | | iv_before = phi (iv0, iv_after) | | |||
741 | | - succ_bb - | ---|----------------------------- | |||
742 | | | | | | | |||
743 | | ----------- | ---V--- loop_body --------------- | |||
744 | | | | iv_after = iv_before + stride | | |||
745 | | | | if (iv_before < upper_bound) | | |||
746 | | | ---|--------------\-------------- | |||
747 | | | | \ exit_e | |||
748 | | | V \ | |||
749 | | | - loop_latch - V- succ_bb - | |||
750 | | | | | | | | |||
751 | | | /------------- ----------- | |||
752 | | \ ___ / | |||
753 | ||||
754 | Creates an empty loop as shown above, the IV_BEFORE is the SSA_NAME | |||
755 | that is used before the increment of IV. IV_BEFORE should be used for | |||
756 | adding code to the body that uses the IV. OUTER is the outer loop in | |||
757 | which the new loop should be inserted. | |||
758 | ||||
759 | Both INITIAL_VALUE and UPPER_BOUND expressions are gimplified and | |||
760 | inserted on the loop entry edge. This implies that this function | |||
761 | should be used only when the UPPER_BOUND expression is a loop | |||
762 | invariant. */ | |||
763 | ||||
764 | class loop * | |||
765 | create_empty_loop_on_edge (edge entry_edge, | |||
766 | tree initial_value, | |||
767 | tree stride, tree upper_bound, | |||
768 | tree iv, | |||
769 | tree *iv_before, | |||
770 | tree *iv_after, | |||
771 | class loop *outer) | |||
772 | { | |||
773 | basic_block loop_header, loop_latch, succ_bb, pred_bb; | |||
774 | class loop *loop; | |||
775 | gimple_stmt_iterator gsi; | |||
776 | gimple_seq stmts; | |||
777 | gcond *cond_expr; | |||
778 | tree exit_test; | |||
779 | edge exit_e; | |||
780 | ||||
781 | gcc_assert (entry_edge && initial_value && stride && upper_bound && iv)((void)(!(entry_edge && initial_value && stride && upper_bound && iv) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 781, __FUNCTION__), 0 : 0)); | |||
782 | ||||
783 | /* Create header, latch and wire up the loop. */ | |||
784 | pred_bb = entry_edge->src; | |||
785 | loop_header = split_edge (entry_edge); | |||
786 | loop_latch = split_edge (single_succ_edge (loop_header)); | |||
787 | succ_bb = single_succ (loop_latch); | |||
788 | make_edge (loop_header, succ_bb, 0); | |||
789 | redirect_edge_succ_nodup (single_succ_edge (loop_latch), loop_header); | |||
790 | ||||
791 | /* Set immediate dominator information. */ | |||
792 | set_immediate_dominator (CDI_DOMINATORS, loop_header, pred_bb); | |||
793 | set_immediate_dominator (CDI_DOMINATORS, loop_latch, loop_header); | |||
794 | set_immediate_dominator (CDI_DOMINATORS, succ_bb, loop_header); | |||
795 | ||||
796 | /* Initialize a loop structure and put it in a loop hierarchy. */ | |||
797 | loop = alloc_loop (); | |||
798 | loop->header = loop_header; | |||
799 | loop->latch = loop_latch; | |||
800 | add_loop (loop, outer); | |||
801 | ||||
802 | /* TODO: Fix counts. */ | |||
803 | scale_loop_frequencies (loop, profile_probability::even ()); | |||
804 | ||||
805 | /* Update dominators. */ | |||
806 | update_dominators_in_loop (loop); | |||
807 | ||||
808 | /* Modify edge flags. */ | |||
809 | exit_e = single_exit (loop); | |||
810 | exit_e->flags = EDGE_LOOP_EXIT | EDGE_FALSE_VALUE; | |||
811 | single_pred_edge (loop_latch)->flags = EDGE_TRUE_VALUE; | |||
812 | ||||
813 | /* Construct IV code in loop. */ | |||
814 | initial_value = force_gimple_operand (initial_value, &stmts, true, iv); | |||
815 | if (stmts) | |||
816 | { | |||
817 | gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts); | |||
818 | gsi_commit_edge_inserts (); | |||
819 | } | |||
820 | ||||
821 | upper_bound = force_gimple_operand (upper_bound, &stmts, true, NULLnullptr); | |||
822 | if (stmts) | |||
823 | { | |||
824 | gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts); | |||
825 | gsi_commit_edge_inserts (); | |||
826 | } | |||
827 | ||||
828 | gsi = gsi_last_bb (loop_header); | |||
829 | create_iv (initial_value, stride, iv, loop, &gsi, false, | |||
830 | iv_before, iv_after); | |||
831 | ||||
832 | /* Insert loop exit condition. */ | |||
833 | cond_expr = gimple_build_cond | |||
834 | (LT_EXPR, *iv_before, upper_bound, NULL_TREE(tree) nullptr, NULL_TREE(tree) nullptr); | |||
835 | ||||
836 | exit_test = gimple_cond_lhs (cond_expr); | |||
837 | exit_test = force_gimple_operand_gsi (&gsi, exit_test, true, NULLnullptr, | |||
838 | false, GSI_NEW_STMT); | |||
839 | gimple_cond_set_lhs (cond_expr, exit_test); | |||
840 | gsi = gsi_last_bb (exit_e->src); | |||
841 | gsi_insert_after (&gsi, cond_expr, GSI_NEW_STMT); | |||
842 | ||||
843 | split_block_after_labels (loop_header); | |||
844 | ||||
845 | return loop; | |||
846 | } | |||
847 | ||||
848 | /* Remove the latch edge of a LOOP and update loops to indicate that | |||
849 | the LOOP was removed. After this function, original loop latch will | |||
850 | have no successor, which caller is expected to fix somehow. | |||
851 | ||||
852 | If this may cause the information about irreducible regions to become | |||
853 | invalid, IRRED_INVALIDATED is set to true. | |||
854 | ||||
855 | LOOP_CLOSED_SSA_INVALIDATED, if non-NULL, is a bitmap where we store | |||
856 | basic blocks that had non-trivial update on their loop_father.*/ | |||
857 | ||||
858 | void | |||
859 | unloop (class loop *loop, bool *irred_invalidated, | |||
860 | bitmap loop_closed_ssa_invalidated) | |||
861 | { | |||
862 | basic_block *body; | |||
863 | class loop *ploop; | |||
864 | unsigned i, n; | |||
865 | basic_block latch = loop->latch; | |||
866 | bool dummy = false; | |||
867 | ||||
868 | if (loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP) | |||
869 | *irred_invalidated = true; | |||
870 | ||||
871 | /* This is relatively straightforward. The dominators are unchanged, as | |||
872 | loop header dominates loop latch, so the only thing we have to care of | |||
873 | is the placement of loops and basic blocks inside the loop tree. We | |||
874 | move them all to the loop->outer, and then let fix_bb_placements do | |||
875 | its work. */ | |||
876 | ||||
877 | body = get_loop_body (loop); | |||
878 | n = loop->num_nodes; | |||
879 | for (i = 0; i < n; i++) | |||
880 | if (body[i]->loop_father == loop) | |||
881 | { | |||
882 | remove_bb_from_loops (body[i]); | |||
883 | add_bb_to_loop (body[i], loop_outer (loop)); | |||
884 | } | |||
885 | free (body); | |||
886 | ||||
887 | while (loop->inner) | |||
888 | { | |||
889 | ploop = loop->inner; | |||
890 | flow_loop_tree_node_remove (ploop); | |||
891 | flow_loop_tree_node_add (loop_outer (loop), ploop); | |||
892 | } | |||
893 | ||||
894 | /* Remove the loop and free its data. */ | |||
895 | delete_loop (loop); | |||
896 | ||||
897 | remove_edge (single_succ_edge (latch)); | |||
898 | ||||
899 | /* We do not pass IRRED_INVALIDATED to fix_bb_placements here, as even if | |||
900 | there is an irreducible region inside the cancelled loop, the flags will | |||
901 | be still correct. */ | |||
902 | fix_bb_placements (latch, &dummy, loop_closed_ssa_invalidated); | |||
903 | } | |||
904 | ||||
905 | /* Fix placement of superloops of LOOP inside loop tree, i.e. ensure that | |||
906 | condition stated in description of fix_loop_placement holds for them. | |||
907 | It is used in case when we removed some edges coming out of LOOP, which | |||
908 | may cause the right placement of LOOP inside loop tree to change. | |||
909 | ||||
910 | IRRED_INVALIDATED is set to true if a change in the loop structures might | |||
911 | invalidate the information about irreducible regions. */ | |||
912 | ||||
913 | static void | |||
914 | fix_loop_placements (class loop *loop, bool *irred_invalidated) | |||
915 | { | |||
916 | class loop *outer; | |||
917 | ||||
918 | while (loop_outer (loop)) | |||
919 | { | |||
920 | outer = loop_outer (loop); | |||
921 | if (!fix_loop_placement (loop, irred_invalidated)) | |||
922 | break; | |||
923 | ||||
924 | /* Changing the placement of a loop in the loop tree may alter the | |||
925 | validity of condition 2) of the description of fix_bb_placement | |||
926 | for its preheader, because the successor is the header and belongs | |||
927 | to the loop. So call fix_bb_placements to fix up the placement | |||
928 | of the preheader and (possibly) of its predecessors. */ | |||
929 | fix_bb_placements (loop_preheader_edge (loop)->src, | |||
930 | irred_invalidated, NULLnullptr); | |||
931 | loop = outer; | |||
932 | } | |||
933 | } | |||
934 | ||||
935 | /* Duplicate loop bounds and other information we store about | |||
936 | the loop into its duplicate. */ | |||
937 | ||||
938 | void | |||
939 | copy_loop_info (class loop *loop, class loop *target) | |||
940 | { | |||
941 | gcc_checking_assert (!target->any_upper_bound && !target->any_estimate)((void)(!(!target->any_upper_bound && !target-> any_estimate) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 941, __FUNCTION__), 0 : 0)); | |||
942 | target->any_upper_bound = loop->any_upper_bound; | |||
943 | target->nb_iterations_upper_bound = loop->nb_iterations_upper_bound; | |||
944 | target->any_likely_upper_bound = loop->any_likely_upper_bound; | |||
945 | target->nb_iterations_likely_upper_bound | |||
946 | = loop->nb_iterations_likely_upper_bound; | |||
947 | target->any_estimate = loop->any_estimate; | |||
948 | target->nb_iterations_estimate = loop->nb_iterations_estimate; | |||
949 | target->estimate_state = loop->estimate_state; | |||
950 | target->safelen = loop->safelen; | |||
951 | target->simdlen = loop->simdlen; | |||
952 | target->constraints = loop->constraints; | |||
953 | target->can_be_parallel = loop->can_be_parallel; | |||
954 | target->warned_aggressive_loop_optimizations | |||
955 | |= loop->warned_aggressive_loop_optimizations; | |||
956 | target->dont_vectorize = loop->dont_vectorize; | |||
957 | target->force_vectorize = loop->force_vectorize; | |||
958 | target->in_oacc_kernels_region = loop->in_oacc_kernels_region; | |||
959 | target->finite_p = loop->finite_p; | |||
960 | target->unroll = loop->unroll; | |||
961 | target->owned_clique = loop->owned_clique; | |||
962 | } | |||
963 | ||||
964 | /* Copies copy of LOOP as subloop of TARGET loop, placing newly | |||
965 | created loop into loops structure. If AFTER is non-null | |||
966 | the new loop is added at AFTER->next, otherwise in front of TARGETs | |||
967 | sibling list. */ | |||
968 | class loop * | |||
969 | duplicate_loop (class loop *loop, class loop *target, class loop *after) | |||
970 | { | |||
971 | class loop *cloop; | |||
972 | cloop = alloc_loop (); | |||
973 | place_new_loop (cfun(cfun + 0), cloop); | |||
974 | ||||
975 | copy_loop_info (loop, cloop); | |||
976 | ||||
977 | /* Mark the new loop as copy of LOOP. */ | |||
978 | set_loop_copy (loop, cloop); | |||
979 | ||||
980 | /* Add it to target. */ | |||
981 | flow_loop_tree_node_add (target, cloop, after); | |||
982 | ||||
983 | return cloop; | |||
984 | } | |||
985 | ||||
986 | /* Copies structure of subloops of LOOP into TARGET loop, placing | |||
987 | newly created loops into loop tree at the end of TARGETs sibling | |||
988 | list in the original order. */ | |||
989 | void | |||
990 | duplicate_subloops (class loop *loop, class loop *target) | |||
991 | { | |||
992 | class loop *aloop, *cloop, *tail; | |||
993 | ||||
994 | for (tail = target->inner; tail && tail->next; tail = tail->next) | |||
995 | ; | |||
996 | for (aloop = loop->inner; aloop; aloop = aloop->next) | |||
997 | { | |||
998 | cloop = duplicate_loop (aloop, target, tail); | |||
999 | tail = cloop; | |||
1000 | gcc_assert(!tail->next)((void)(!(!tail->next) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1000, __FUNCTION__), 0 : 0)); | |||
1001 | duplicate_subloops (aloop, cloop); | |||
1002 | } | |||
1003 | } | |||
1004 | ||||
1005 | /* Copies structure of subloops of N loops, stored in array COPIED_LOOPS, | |||
1006 | into TARGET loop, placing newly created loops into loop tree adding | |||
1007 | them to TARGETs sibling list at the end in order. */ | |||
1008 | static void | |||
1009 | copy_loops_to (class loop **copied_loops, int n, class loop *target) | |||
1010 | { | |||
1011 | class loop *aloop, *tail; | |||
1012 | int i; | |||
1013 | ||||
1014 | for (tail = target->inner; tail && tail->next; tail = tail->next) | |||
1015 | ; | |||
1016 | for (i = 0; i < n; i++) | |||
1017 | { | |||
1018 | aloop = duplicate_loop (copied_loops[i], target, tail); | |||
1019 | tail = aloop; | |||
1020 | gcc_assert(!tail->next)((void)(!(!tail->next) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1020, __FUNCTION__), 0 : 0)); | |||
1021 | duplicate_subloops (copied_loops[i], aloop); | |||
1022 | } | |||
1023 | } | |||
1024 | ||||
1025 | /* Redirects edge E to basic block DEST. */ | |||
1026 | static void | |||
1027 | loop_redirect_edge (edge e, basic_block dest) | |||
1028 | { | |||
1029 | if (e->dest == dest) | |||
1030 | return; | |||
1031 | ||||
1032 | redirect_edge_and_branch_force (e, dest); | |||
1033 | } | |||
1034 | ||||
1035 | /* Check whether LOOP's body can be duplicated. */ | |||
1036 | bool | |||
1037 | can_duplicate_loop_p (const class loop *loop) | |||
1038 | { | |||
1039 | int ret; | |||
1040 | basic_block *bbs = get_loop_body (loop); | |||
1041 | ||||
1042 | ret = can_copy_bbs_p (bbs, loop->num_nodes); | |||
1043 | free (bbs); | |||
1044 | ||||
1045 | return ret; | |||
1046 | } | |||
1047 | ||||
1048 | /* Duplicates body of LOOP to given edge E NDUPL times. Takes care of updating | |||
1049 | loop structure and dominators (order of inner subloops is retained). | |||
1050 | E's destination must be LOOP header for this to work, i.e. it must be entry | |||
1051 | or latch edge of this loop; these are unique, as the loops must have | |||
1052 | preheaders for this function to work correctly (in case E is latch, the | |||
1053 | function unrolls the loop, if E is entry edge, it peels the loop). Store | |||
1054 | edges created by copying ORIG edge from copies corresponding to set bits in | |||
1055 | WONT_EXIT bitmap (bit 0 corresponds to original LOOP body, the other copies | |||
1056 | are numbered in order given by control flow through them) into TO_REMOVE | |||
1057 | array. Returns false if duplication is | |||
1058 | impossible. */ | |||
1059 | ||||
1060 | bool | |||
1061 | duplicate_loop_body_to_header_edge (class loop *loop, edge e, | |||
1062 | unsigned int ndupl, sbitmap wont_exit, | |||
1063 | edge orig, vec<edge> *to_remove, int flags) | |||
1064 | { | |||
1065 | class loop *target, *aloop; | |||
1066 | class loop **orig_loops; | |||
1067 | unsigned n_orig_loops; | |||
1068 | basic_block header = loop->header, latch = loop->latch; | |||
1069 | basic_block *new_bbs, *bbs, *first_active; | |||
1070 | basic_block new_bb, bb, first_active_latch = NULLnullptr; | |||
1071 | edge ae, latch_edge; | |||
1072 | edge spec_edges[2], new_spec_edges[2]; | |||
1073 | const int SE_LATCH = 0; | |||
1074 | const int SE_ORIG = 1; | |||
1075 | unsigned i, j, n; | |||
1076 | int is_latch = (latch == e->src); | |||
1077 | profile_probability *scale_step = NULLnullptr; | |||
1078 | profile_probability scale_main = profile_probability::always (); | |||
1079 | profile_probability scale_act = profile_probability::always (); | |||
1080 | profile_count after_exit_num = profile_count::zero (), | |||
1081 | after_exit_den = profile_count::zero (); | |||
1082 | bool scale_after_exit = false; | |||
1083 | int add_irreducible_flag; | |||
1084 | basic_block place_after; | |||
1085 | bitmap bbs_to_scale = NULLnullptr; | |||
1086 | bitmap_iterator bi; | |||
1087 | ||||
1088 | gcc_assert (e->dest == loop->header)((void)(!(e->dest == loop->header) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1088, __FUNCTION__), 0 : 0)); | |||
1089 | gcc_assert (ndupl > 0)((void)(!(ndupl > 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1089, __FUNCTION__), 0 : 0)); | |||
1090 | ||||
1091 | if (orig) | |||
1092 | { | |||
1093 | /* Orig must be edge out of the loop. */ | |||
1094 | gcc_assert (flow_bb_inside_loop_p (loop, orig->src))((void)(!(flow_bb_inside_loop_p (loop, orig->src)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1094, __FUNCTION__), 0 : 0)); | |||
1095 | gcc_assert (!flow_bb_inside_loop_p (loop, orig->dest))((void)(!(!flow_bb_inside_loop_p (loop, orig->dest)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1095, __FUNCTION__), 0 : 0)); | |||
1096 | } | |||
1097 | ||||
1098 | n = loop->num_nodes; | |||
1099 | bbs = get_loop_body_in_dom_order (loop); | |||
1100 | gcc_assert (bbs[0] == loop->header)((void)(!(bbs[0] == loop->header) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1100, __FUNCTION__), 0 : 0)); | |||
1101 | gcc_assert (bbs[n - 1] == loop->latch)((void)(!(bbs[n - 1] == loop->latch) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1101, __FUNCTION__), 0 : 0)); | |||
1102 | ||||
1103 | /* Check whether duplication is possible. */ | |||
1104 | if (!can_copy_bbs_p (bbs, loop->num_nodes)) | |||
1105 | { | |||
1106 | free (bbs); | |||
1107 | return false; | |||
1108 | } | |||
1109 | new_bbs = XNEWVEC (basic_block, loop->num_nodes)((basic_block *) xmalloc (sizeof (basic_block) * (loop->num_nodes ))); | |||
1110 | ||||
1111 | /* In case we are doing loop peeling and the loop is in the middle of | |||
1112 | irreducible region, the peeled copies will be inside it too. */ | |||
1113 | add_irreducible_flag = e->flags & EDGE_IRREDUCIBLE_LOOP; | |||
1114 | gcc_assert (!is_latch || !add_irreducible_flag)((void)(!(!is_latch || !add_irreducible_flag) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1114, __FUNCTION__), 0 : 0)); | |||
1115 | ||||
1116 | /* Find edge from latch. */ | |||
1117 | latch_edge = loop_latch_edge (loop); | |||
1118 | ||||
1119 | if (flags & DLTHE_FLAG_UPDATE_FREQ1) | |||
1120 | { | |||
1121 | /* Calculate coefficients by that we have to scale counts | |||
1122 | of duplicated loop bodies. */ | |||
1123 | profile_count count_in = header->count; | |||
1124 | profile_count count_le = latch_edge->count (); | |||
1125 | profile_count count_out_orig = orig ? orig->count () : count_in - count_le; | |||
1126 | profile_probability prob_pass_thru = count_le.probability_in (count_in); | |||
1127 | profile_probability prob_pass_wont_exit = | |||
1128 | (count_le + count_out_orig).probability_in (count_in); | |||
1129 | ||||
1130 | if (orig && orig->probability.initialized_p () | |||
1131 | && !(orig->probability == profile_probability::always ())) | |||
1132 | { | |||
1133 | /* The blocks that are dominated by a removed exit edge ORIG have | |||
1134 | frequencies scaled by this. */ | |||
1135 | if (orig->count ().initialized_p ()) | |||
1136 | { | |||
1137 | after_exit_num = orig->src->count; | |||
1138 | after_exit_den = after_exit_num - orig->count (); | |||
1139 | scale_after_exit = true; | |||
1140 | } | |||
1141 | bbs_to_scale = BITMAP_ALLOCbitmap_alloc (NULLnullptr); | |||
1142 | for (i = 0; i < n; i++) | |||
1143 | { | |||
1144 | if (bbs[i] != orig->src | |||
1145 | && dominated_by_p (CDI_DOMINATORS, bbs[i], orig->src)) | |||
1146 | bitmap_set_bit (bbs_to_scale, i); | |||
1147 | } | |||
1148 | } | |||
1149 | ||||
1150 | scale_step = XNEWVEC (profile_probability, ndupl)((profile_probability *) xmalloc (sizeof (profile_probability ) * (ndupl))); | |||
1151 | ||||
1152 | for (i = 1; i <= ndupl; i++) | |||
1153 | scale_step[i - 1] = bitmap_bit_p (wont_exit, i) | |||
1154 | ? prob_pass_wont_exit | |||
1155 | : prob_pass_thru; | |||
1156 | ||||
1157 | /* Complete peeling is special as the probability of exit in last | |||
1158 | copy becomes 1. */ | |||
1159 | if (flags & DLTHE_FLAG_COMPLETTE_PEEL4) | |||
1160 | { | |||
1161 | profile_count wanted_count = e->count (); | |||
1162 | ||||
1163 | gcc_assert (!is_latch)((void)(!(!is_latch) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1163, __FUNCTION__), 0 : 0)); | |||
1164 | /* First copy has count of incoming edge. Each subsequent | |||
1165 | count should be reduced by prob_pass_wont_exit. Caller | |||
1166 | should've managed the flags so all except for original loop | |||
1167 | has won't exist set. */ | |||
1168 | scale_act = wanted_count.probability_in (count_in); | |||
1169 | /* Now simulate the duplication adjustments and compute header | |||
1170 | frequency of the last copy. */ | |||
1171 | for (i = 0; i < ndupl; i++) | |||
1172 | wanted_count = wanted_count.apply_probability (scale_step [i]); | |||
1173 | scale_main = wanted_count.probability_in (count_in); | |||
1174 | } | |||
1175 | /* Here we insert loop bodies inside the loop itself (for loop unrolling). | |||
1176 | First iteration will be original loop followed by duplicated bodies. | |||
1177 | It is necessary to scale down the original so we get right overall | |||
1178 | number of iterations. */ | |||
1179 | else if (is_latch) | |||
1180 | { | |||
1181 | profile_probability prob_pass_main = bitmap_bit_p (wont_exit, 0) | |||
1182 | ? prob_pass_wont_exit | |||
1183 | : prob_pass_thru; | |||
1184 | profile_probability p = prob_pass_main; | |||
1185 | profile_count scale_main_den = count_in; | |||
1186 | for (i = 0; i < ndupl; i++) | |||
1187 | { | |||
1188 | scale_main_den += count_in.apply_probability (p); | |||
1189 | p = p * scale_step[i]; | |||
1190 | } | |||
1191 | /* If original loop is executed COUNT_IN times, the unrolled | |||
1192 | loop will account SCALE_MAIN_DEN times. */ | |||
1193 | scale_main = count_in.probability_in (scale_main_den); | |||
1194 | scale_act = scale_main * prob_pass_main; | |||
1195 | } | |||
1196 | else | |||
1197 | { | |||
1198 | profile_count preheader_count = e->count (); | |||
1199 | for (i = 0; i < ndupl; i++) | |||
1200 | scale_main = scale_main * scale_step[i]; | |||
1201 | scale_act = preheader_count.probability_in (count_in); | |||
1202 | } | |||
1203 | } | |||
1204 | ||||
1205 | /* Loop the new bbs will belong to. */ | |||
1206 | target = e->src->loop_father; | |||
1207 | ||||
1208 | /* Original loops. */ | |||
1209 | n_orig_loops = 0; | |||
1210 | for (aloop = loop->inner; aloop; aloop = aloop->next) | |||
1211 | n_orig_loops++; | |||
1212 | orig_loops = XNEWVEC (class loop *, n_orig_loops)((class loop * *) xmalloc (sizeof (class loop *) * (n_orig_loops ))); | |||
1213 | for (aloop = loop->inner, i = 0; aloop; aloop = aloop->next, i++) | |||
1214 | orig_loops[i] = aloop; | |||
1215 | ||||
1216 | set_loop_copy (loop, target); | |||
1217 | ||||
1218 | first_active = XNEWVEC (basic_block, n)((basic_block *) xmalloc (sizeof (basic_block) * (n))); | |||
1219 | if (is_latch) | |||
1220 | { | |||
1221 | memcpy (first_active, bbs, n * sizeof (basic_block)); | |||
1222 | first_active_latch = latch; | |||
1223 | } | |||
1224 | ||||
1225 | spec_edges[SE_ORIG] = orig; | |||
1226 | spec_edges[SE_LATCH] = latch_edge; | |||
1227 | ||||
1228 | place_after = e->src; | |||
1229 | for (j = 0; j < ndupl; j++) | |||
1230 | { | |||
1231 | /* Copy loops. */ | |||
1232 | copy_loops_to (orig_loops, n_orig_loops, target); | |||
1233 | ||||
1234 | /* Copy bbs. */ | |||
1235 | copy_bbs (bbs, n, new_bbs, spec_edges, 2, new_spec_edges, loop, | |||
1236 | place_after, true); | |||
1237 | place_after = new_spec_edges[SE_LATCH]->src; | |||
1238 | ||||
1239 | if (flags & DLTHE_RECORD_COPY_NUMBER2) | |||
1240 | for (i = 0; i < n; i++) | |||
1241 | { | |||
1242 | gcc_assert (!new_bbs[i]->aux)((void)(!(!new_bbs[i]->aux) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1242, __FUNCTION__), 0 : 0)); | |||
1243 | new_bbs[i]->aux = (void *)(size_t)(j + 1); | |||
1244 | } | |||
1245 | ||||
1246 | /* Note whether the blocks and edges belong to an irreducible loop. */ | |||
1247 | if (add_irreducible_flag) | |||
1248 | { | |||
1249 | for (i = 0; i < n; i++) | |||
1250 | new_bbs[i]->flags |= BB_DUPLICATED; | |||
1251 | for (i = 0; i < n; i++) | |||
1252 | { | |||
1253 | edge_iterator ei; | |||
1254 | new_bb = new_bbs[i]; | |||
1255 | if (new_bb->loop_father == target) | |||
1256 | new_bb->flags |= BB_IRREDUCIBLE_LOOP; | |||
1257 | ||||
1258 | FOR_EACH_EDGE (ae, ei, new_bb->succs)for ((ei) = ei_start_1 (&((new_bb->succs))); ei_cond ( (ei), &(ae)); ei_next (&(ei))) | |||
1259 | if ((ae->dest->flags & BB_DUPLICATED) | |||
1260 | && (ae->src->loop_father == target | |||
1261 | || ae->dest->loop_father == target)) | |||
1262 | ae->flags |= EDGE_IRREDUCIBLE_LOOP; | |||
1263 | } | |||
1264 | for (i = 0; i < n; i++) | |||
1265 | new_bbs[i]->flags &= ~BB_DUPLICATED; | |||
1266 | } | |||
1267 | ||||
1268 | /* Redirect the special edges. */ | |||
1269 | if (is_latch) | |||
1270 | { | |||
1271 | redirect_edge_and_branch_force (latch_edge, new_bbs[0]); | |||
1272 | redirect_edge_and_branch_force (new_spec_edges[SE_LATCH], | |||
1273 | loop->header); | |||
1274 | set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], latch); | |||
1275 | latch = loop->latch = new_bbs[n - 1]; | |||
1276 | e = latch_edge = new_spec_edges[SE_LATCH]; | |||
1277 | } | |||
1278 | else | |||
1279 | { | |||
1280 | redirect_edge_and_branch_force (new_spec_edges[SE_LATCH], | |||
1281 | loop->header); | |||
1282 | redirect_edge_and_branch_force (e, new_bbs[0]); | |||
1283 | set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], e->src); | |||
1284 | e = new_spec_edges[SE_LATCH]; | |||
1285 | } | |||
1286 | ||||
1287 | /* Record exit edge in this copy. */ | |||
1288 | if (orig && bitmap_bit_p (wont_exit, j + 1)) | |||
1289 | { | |||
1290 | if (to_remove) | |||
1291 | to_remove->safe_push (new_spec_edges[SE_ORIG]); | |||
1292 | force_edge_cold (new_spec_edges[SE_ORIG], true); | |||
1293 | ||||
1294 | /* Scale the frequencies of the blocks dominated by the exit. */ | |||
1295 | if (bbs_to_scale && scale_after_exit) | |||
1296 | { | |||
1297 | EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)for (bmp_iter_set_init (&(bi), (bbs_to_scale), (0), & (i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next (& (bi), &(i))) | |||
1298 | scale_bbs_frequencies_profile_count (new_bbs + i, 1, after_exit_num, | |||
1299 | after_exit_den); | |||
1300 | } | |||
1301 | } | |||
1302 | ||||
1303 | /* Record the first copy in the control flow order if it is not | |||
1304 | the original loop (i.e. in case of peeling). */ | |||
1305 | if (!first_active_latch) | |||
1306 | { | |||
1307 | memcpy (first_active, new_bbs, n * sizeof (basic_block)); | |||
1308 | first_active_latch = new_bbs[n - 1]; | |||
1309 | } | |||
1310 | ||||
1311 | /* Set counts and frequencies. */ | |||
1312 | if (flags & DLTHE_FLAG_UPDATE_FREQ1) | |||
1313 | { | |||
1314 | scale_bbs_frequencies (new_bbs, n, scale_act); | |||
1315 | scale_act = scale_act * scale_step[j]; | |||
1316 | } | |||
1317 | } | |||
1318 | free (new_bbs); | |||
1319 | free (orig_loops); | |||
1320 | ||||
1321 | /* Record the exit edge in the original loop body, and update the frequencies. */ | |||
1322 | if (orig && bitmap_bit_p (wont_exit, 0)) | |||
1323 | { | |||
1324 | if (to_remove) | |||
1325 | to_remove->safe_push (orig); | |||
1326 | force_edge_cold (orig, true); | |||
1327 | ||||
1328 | /* Scale the frequencies of the blocks dominated by the exit. */ | |||
1329 | if (bbs_to_scale && scale_after_exit) | |||
1330 | { | |||
1331 | EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)for (bmp_iter_set_init (&(bi), (bbs_to_scale), (0), & (i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next (& (bi), &(i))) | |||
1332 | scale_bbs_frequencies_profile_count (bbs + i, 1, after_exit_num, | |||
1333 | after_exit_den); | |||
1334 | } | |||
1335 | } | |||
1336 | ||||
1337 | /* Update the original loop. */ | |||
1338 | if (!is_latch) | |||
1339 | set_immediate_dominator (CDI_DOMINATORS, e->dest, e->src); | |||
1340 | if (flags & DLTHE_FLAG_UPDATE_FREQ1) | |||
1341 | { | |||
1342 | scale_bbs_frequencies (bbs, n, scale_main); | |||
1343 | free (scale_step); | |||
1344 | } | |||
1345 | ||||
1346 | /* Update dominators of outer blocks if affected. */ | |||
1347 | for (i = 0; i < n; i++) | |||
1348 | { | |||
1349 | basic_block dominated, dom_bb; | |||
1350 | unsigned j; | |||
1351 | ||||
1352 | bb = bbs[i]; | |||
1353 | ||||
1354 | auto_vec<basic_block> dom_bbs = get_dominated_by (CDI_DOMINATORS, bb); | |||
1355 | FOR_EACH_VEC_ELT (dom_bbs, j, dominated)for (j = 0; (dom_bbs).iterate ((j), &(dominated)); ++(j)) | |||
1356 | { | |||
1357 | if (flow_bb_inside_loop_p (loop, dominated)) | |||
1358 | continue; | |||
1359 | dom_bb = nearest_common_dominator ( | |||
1360 | CDI_DOMINATORS, first_active[i], first_active_latch); | |||
1361 | set_immediate_dominator (CDI_DOMINATORS, dominated, dom_bb); | |||
1362 | } | |||
1363 | } | |||
1364 | free (first_active); | |||
1365 | ||||
1366 | free (bbs); | |||
1367 | BITMAP_FREE (bbs_to_scale)((void) (bitmap_obstack_free ((bitmap) bbs_to_scale), (bbs_to_scale ) = (bitmap) nullptr)); | |||
1368 | ||||
1369 | return true; | |||
1370 | } | |||
1371 | ||||
1372 | /* A callback for make_forwarder block, to redirect all edges except for | |||
1373 | MFB_KJ_EDGE to the entry part. E is the edge for that we should decide | |||
1374 | whether to redirect it. */ | |||
1375 | ||||
1376 | edge mfb_kj_edge; | |||
1377 | bool | |||
1378 | mfb_keep_just (edge e) | |||
1379 | { | |||
1380 | return e != mfb_kj_edge; | |||
1381 | } | |||
1382 | ||||
1383 | /* True when a candidate preheader BLOCK has predecessors from LOOP. */ | |||
1384 | ||||
1385 | static bool | |||
1386 | has_preds_from_loop (basic_block block, class loop *loop) | |||
1387 | { | |||
1388 | edge e; | |||
1389 | edge_iterator ei; | |||
1390 | ||||
1391 | FOR_EACH_EDGE (e, ei, block->preds)for ((ei) = ei_start_1 (&((block->preds))); ei_cond (( ei), &(e)); ei_next (&(ei))) | |||
1392 | if (e->src->loop_father == loop) | |||
1393 | return true; | |||
1394 | return false; | |||
1395 | } | |||
1396 | ||||
1397 | /* Creates a pre-header for a LOOP. Returns newly created block. Unless | |||
1398 | CP_SIMPLE_PREHEADERS is set in FLAGS, we only force LOOP to have single | |||
1399 | entry; otherwise we also force preheader block to have only one successor. | |||
1400 | When CP_FALLTHRU_PREHEADERS is set in FLAGS, we force the preheader block | |||
1401 | to be a fallthru predecessor to the loop header and to have only | |||
1402 | predecessors from outside of the loop. | |||
1403 | The function also updates dominators. */ | |||
1404 | ||||
1405 | basic_block | |||
1406 | create_preheader (class loop *loop, int flags) | |||
1407 | { | |||
1408 | edge e; | |||
1409 | basic_block dummy; | |||
1410 | int nentry = 0; | |||
1411 | bool irred = false; | |||
1412 | bool latch_edge_was_fallthru; | |||
1413 | edge one_succ_pred = NULLnullptr, single_entry = NULLnullptr; | |||
1414 | edge_iterator ei; | |||
1415 | ||||
1416 | FOR_EACH_EDGE (e, ei, loop->header->preds)for ((ei) = ei_start_1 (&((loop->header->preds))); ei_cond ((ei), &(e)); ei_next (&(ei))) | |||
1417 | { | |||
1418 | if (e->src == loop->latch) | |||
1419 | continue; | |||
1420 | irred |= (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0; | |||
1421 | nentry++; | |||
1422 | single_entry = e; | |||
1423 | if (single_succ_p (e->src)) | |||
1424 | one_succ_pred = e; | |||
1425 | } | |||
1426 | gcc_assert (nentry)((void)(!(nentry) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1426, __FUNCTION__), 0 : 0)); | |||
1427 | if (nentry == 1) | |||
1428 | { | |||
1429 | bool need_forwarder_block = false; | |||
1430 | ||||
1431 | /* We do not allow entry block to be the loop preheader, since we | |||
1432 | cannot emit code there. */ | |||
1433 | if (single_entry->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)) | |||
1434 | need_forwarder_block = true; | |||
1435 | else | |||
1436 | { | |||
1437 | /* If we want simple preheaders, also force the preheader to have | |||
1438 | just a single successor and a normal edge. */ | |||
1439 | if ((flags & CP_SIMPLE_PREHEADERS) | |||
1440 | && ((single_entry->flags & EDGE_COMPLEX(EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE )) | |||
1441 | || !single_succ_p (single_entry->src))) | |||
1442 | need_forwarder_block = true; | |||
1443 | /* If we want fallthru preheaders, also create forwarder block when | |||
1444 | preheader ends with a jump or has predecessors from loop. */ | |||
1445 | else if ((flags & CP_FALLTHRU_PREHEADERS) | |||
1446 | && (JUMP_P (BB_END (single_entry->src))(((enum rtx_code) ((single_entry->src)->il.x.rtl->end_ )->code) == JUMP_INSN) | |||
1447 | || has_preds_from_loop (single_entry->src, loop))) | |||
1448 | need_forwarder_block = true; | |||
1449 | } | |||
1450 | if (! need_forwarder_block) | |||
1451 | return NULLnullptr; | |||
1452 | } | |||
1453 | ||||
1454 | mfb_kj_edge = loop_latch_edge (loop); | |||
1455 | latch_edge_was_fallthru = (mfb_kj_edge->flags & EDGE_FALLTHRU) != 0; | |||
1456 | if (nentry == 1 | |||
1457 | && ((flags & CP_FALLTHRU_PREHEADERS) == 0 | |||
1458 | || (single_entry->flags & EDGE_CROSSING) == 0)) | |||
1459 | dummy = split_edge (single_entry); | |||
1460 | else | |||
1461 | { | |||
1462 | edge fallthru = make_forwarder_block (loop->header, mfb_keep_just, NULLnullptr); | |||
1463 | dummy = fallthru->src; | |||
1464 | loop->header = fallthru->dest; | |||
1465 | } | |||
1466 | ||||
1467 | /* Try to be clever in placing the newly created preheader. The idea is to | |||
1468 | avoid breaking any "fallthruness" relationship between blocks. | |||
1469 | ||||
1470 | The preheader was created just before the header and all incoming edges | |||
1471 | to the header were redirected to the preheader, except the latch edge. | |||
1472 | So the only problematic case is when this latch edge was a fallthru | |||
1473 | edge: it is not anymore after the preheader creation so we have broken | |||
1474 | the fallthruness. We're therefore going to look for a better place. */ | |||
1475 | if (latch_edge_was_fallthru) | |||
1476 | { | |||
1477 | if (one_succ_pred) | |||
1478 | e = one_succ_pred; | |||
1479 | else | |||
1480 | e = EDGE_PRED (dummy, 0)(*(dummy)->preds)[(0)]; | |||
1481 | ||||
1482 | move_block_after (dummy, e->src); | |||
1483 | } | |||
1484 | ||||
1485 | if (irred) | |||
1486 | { | |||
1487 | dummy->flags |= BB_IRREDUCIBLE_LOOP; | |||
1488 | single_succ_edge (dummy)->flags |= EDGE_IRREDUCIBLE_LOOP; | |||
1489 | } | |||
1490 | ||||
1491 | if (dump_file) | |||
1492 | fprintf (dump_file, "Created preheader block for loop %i\n", | |||
1493 | loop->num); | |||
1494 | ||||
1495 | if (flags & CP_FALLTHRU_PREHEADERS) | |||
1496 | gcc_assert ((single_succ_edge (dummy)->flags & EDGE_FALLTHRU)((void)(!((single_succ_edge (dummy)->flags & EDGE_FALLTHRU ) && !(((enum rtx_code) ((dummy)->il.x.rtl->end_ )->code) == JUMP_INSN)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1497, __FUNCTION__), 0 : 0)) | |||
1497 | && !JUMP_P (BB_END (dummy)))((void)(!((single_succ_edge (dummy)->flags & EDGE_FALLTHRU ) && !(((enum rtx_code) ((dummy)->il.x.rtl->end_ )->code) == JUMP_INSN)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1497, __FUNCTION__), 0 : 0)); | |||
1498 | ||||
1499 | return dummy; | |||
1500 | } | |||
1501 | ||||
1502 | /* Create preheaders for each loop; for meaning of FLAGS see create_preheader. */ | |||
1503 | ||||
1504 | void | |||
1505 | create_preheaders (int flags) | |||
1506 | { | |||
1507 | if (!current_loops((cfun + 0)->x_current_loops)) | |||
1508 | return; | |||
1509 | ||||
1510 | for (auto loop : loops_list (cfun(cfun + 0), 0)) | |||
1511 | create_preheader (loop, flags); | |||
1512 | loops_state_set (LOOPS_HAVE_PREHEADERS); | |||
1513 | } | |||
1514 | ||||
1515 | /* Forces all loop latches to have only single successor. */ | |||
1516 | ||||
1517 | void | |||
1518 | force_single_succ_latches (void) | |||
1519 | { | |||
1520 | edge e; | |||
1521 | ||||
1522 | for (auto loop : loops_list (cfun(cfun + 0), 0)) | |||
1523 | { | |||
1524 | if (loop->latch != loop->header && single_succ_p (loop->latch)) | |||
1525 | continue; | |||
1526 | ||||
1527 | e = find_edge (loop->latch, loop->header); | |||
1528 | gcc_checking_assert (e != NULL)((void)(!(e != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1528, __FUNCTION__), 0 : 0)); | |||
1529 | ||||
1530 | split_edge (e); | |||
1531 | } | |||
1532 | loops_state_set (LOOPS_HAVE_SIMPLE_LATCHES); | |||
1533 | } | |||
1534 | ||||
1535 | /* This function is called from loop_version. It splits the entry edge | |||
1536 | of the loop we want to version, adds the versioning condition, and | |||
1537 | adjust the edges to the two versions of the loop appropriately. | |||
1538 | e is an incoming edge. Returns the basic block containing the | |||
1539 | condition. | |||
1540 | ||||
1541 | --- edge e ---- > [second_head] | |||
1542 | ||||
1543 | Split it and insert new conditional expression and adjust edges. | |||
1544 | ||||
1545 | --- edge e ---> [cond expr] ---> [first_head] | |||
1546 | | | |||
1547 | +---------> [second_head] | |||
1548 | ||||
1549 | THEN_PROB is the probability of then branch of the condition. | |||
1550 | ELSE_PROB is the probability of else branch. Note that they may be both | |||
1551 | REG_BR_PROB_BASE when condition is IFN_LOOP_VECTORIZED or | |||
1552 | IFN_LOOP_DIST_ALIAS. */ | |||
1553 | ||||
1554 | static basic_block | |||
1555 | lv_adjust_loop_entry_edge (basic_block first_head, basic_block second_head, | |||
1556 | edge e, void *cond_expr, | |||
1557 | profile_probability then_prob, | |||
1558 | profile_probability else_prob) | |||
1559 | { | |||
1560 | basic_block new_head = NULLnullptr; | |||
1561 | edge e1; | |||
1562 | ||||
1563 | gcc_assert (e->dest == second_head)((void)(!(e->dest == second_head) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.cc" , 1563, __FUNCTION__), 0 : 0)); | |||
1564 | ||||
1565 | /* Split edge 'e'. This will create a new basic block, where we can | |||
1566 | insert conditional expr. */ | |||
1567 | new_head = split_edge (e); | |||
1568 | ||||
1569 | lv_add_condition_to_bb (first_head, second_head, new_head, | |||
1570 | cond_expr); | |||
1571 | ||||
1572 | /* Don't set EDGE_TRUE_VALUE in RTL mode, as it's invalid there. */ | |||
1573 | e = single_succ_edge (new_head); | |||
1574 | e1 = make_edge (new_head, first_head, | |||
1575 | current_ir_type () == IR_GIMPLE ? EDGE_TRUE_VALUE : 0); | |||
1576 | e1->probability = then_prob; | |||
1577 | e->probability = else_prob; | |||
1578 | ||||
1579 | set_immediate_dominator (CDI_DOMINATORS, first_head, new_head); | |||
1580 | set_immediate_dominator (CDI_DOMINATORS, second_head, new_head); | |||
1581 | ||||
1582 | /* Adjust loop header phi nodes. */ | |||
1583 | lv_adjust_loop_header_phi (first_head, second_head, new_head, e1); | |||
1584 | ||||
1585 | return new_head; | |||
1586 | } | |||
1587 | ||||
1588 | /* Main entry point for Loop Versioning transformation. | |||
1589 | ||||
1590 | This transformation given a condition and a loop, creates | |||
1591 | -if (condition) { loop_copy1 } else { loop_copy2 }, | |||
1592 | where loop_copy1 is the loop transformed in one way, and loop_copy2 | |||
1593 | is the loop transformed in another way (or unchanged). COND_EXPR | |||
1594 | may be a run time test for things that were not resolved by static | |||
1595 | analysis (overlapping ranges (anti-aliasing), alignment, etc.). | |||
1596 | ||||
1597 | If non-NULL, CONDITION_BB is set to the basic block containing the | |||
1598 | condition. | |||
1599 | ||||
1600 | THEN_PROB is the probability of the then edge of the if. THEN_SCALE | |||
1601 | is the ratio by that the frequencies in the original loop should | |||
1602 | be scaled. ELSE_SCALE is the ratio by that the frequencies in the | |||
1603 | new loop should be scaled. | |||
1604 | ||||
1605 | If PLACE_AFTER is true, we place the new loop after LOOP in the | |||
1606 | instruction stream, otherwise it is placed before LOOP. */ | |||
1607 | ||||
1608 | class loop * | |||
1609 | loop_version (class loop *loop, | |||
1610 | void *cond_expr, basic_block *condition_bb, | |||
1611 | profile_probability then_prob, profile_probability else_prob, | |||
1612 | profile_probability then_scale, profile_probability else_scale, | |||
1613 | bool place_after) | |||
1614 | { | |||
1615 | basic_block first_head, second_head; | |||
1616 | edge entry, latch_edge; | |||
1617 | int irred_flag; | |||
1618 | class loop *nloop; | |||
1619 | basic_block cond_bb; | |||
1620 | ||||
1621 | /* Record entry and latch edges for the loop */ | |||
1622 | entry = loop_preheader_edge (loop); | |||
1623 | irred_flag = entry->flags & EDGE_IRREDUCIBLE_LOOP; | |||
1624 | entry->flags &= ~EDGE_IRREDUCIBLE_LOOP; | |||
1625 | ||||
1626 | /* Note down head of loop as first_head. */ | |||
1627 | first_head = entry->dest; | |||
1628 | ||||
1629 | /* 1) Duplicate loop on the entry edge. */ | |||
1630 | if (!cfg_hook_duplicate_loop_body_to_header_edge (loop, entry, 1, NULLnullptr, NULLnullptr, | |||
1631 | NULLnullptr, 0)) | |||
1632 | { | |||
1633 | entry->flags |= irred_flag; | |||
1634 | return NULLnullptr; | |||
1635 | } | |||
1636 | ||||
1637 | /* 2) loopify the duplicated new loop. */ | |||
1638 | latch_edge = single_succ_edge (get_bb_copy (loop->latch)); | |||
1639 | nloop = alloc_loop (); | |||
1640 | class loop *outer = loop_outer (latch_edge->dest->loop_father); | |||
1641 | edge new_header_edge = single_pred_edge (get_bb_copy (loop->header)); | |||
1642 | nloop->header = new_header_edge->dest; | |||
1643 | nloop->latch = latch_edge->src; | |||
1644 | loop_redirect_edge (latch_edge, nloop->header); | |||
1645 | ||||
1646 | /* Compute new loop. */ | |||
1647 | add_loop (nloop, outer); | |||
1648 | copy_loop_info (loop, nloop); | |||
1649 | set_loop_copy (loop, nloop); | |||
1650 | ||||
1651 | /* loopify redirected latch_edge. Update its PENDING_STMTS. */ | |||
1652 | lv_flush_pending_stmts (latch_edge); | |||
1653 | ||||
1654 | /* After duplication entry edge now points to new loop head block. | |||
1655 | Note down new head as second_head. */ | |||
1656 | second_head = entry->dest; | |||
1657 | ||||
1658 | /* 3) Split loop entry edge and insert new block with cond expr. */ | |||
1659 | cond_bb = lv_adjust_loop_entry_edge (first_head, second_head, | |||
1660 | entry, cond_expr, then_prob, else_prob); | |||
1661 | if (condition_bb) | |||
1662 | *condition_bb = cond_bb; | |||
1663 | ||||
1664 | if (!cond_bb) | |||
1665 | { | |||
1666 | entry->flags |= irred_flag; | |||
1667 | return NULLnullptr; | |||
1668 | } | |||
1669 | ||||
1670 | /* Add cond_bb to appropriate loop. */ | |||
1671 | if (cond_bb->loop_father) | |||
1672 | remove_bb_from_loops (cond_bb); | |||
1673 | add_bb_to_loop (cond_bb, outer); | |||
1674 | ||||
1675 | /* 4) Scale the original loop and new loop frequency. */ | |||
1676 | scale_loop_frequencies (loop, then_scale); | |||
1677 | scale_loop_frequencies (nloop, else_scale); | |||
1678 | update_dominators_in_loop (loop); | |||
1679 | update_dominators_in_loop (nloop); | |||
1680 | ||||
1681 | /* Adjust irreducible flag. */ | |||
1682 | if (irred_flag) | |||
1683 | { | |||
1684 | cond_bb->flags |= BB_IRREDUCIBLE_LOOP; | |||
1685 | loop_preheader_edge (loop)->flags |= EDGE_IRREDUCIBLE_LOOP; | |||
1686 | loop_preheader_edge (nloop)->flags |= EDGE_IRREDUCIBLE_LOOP; | |||
1687 | single_pred_edge (cond_bb)->flags |= EDGE_IRREDUCIBLE_LOOP; | |||
1688 | } | |||
1689 | ||||
1690 | if (place_after) | |||
1691 | { | |||
1692 | basic_block *bbs = get_loop_body_in_dom_order (nloop), after; | |||
1693 | unsigned i; | |||
1694 | ||||
1695 | after = loop->latch; | |||
1696 | ||||
1697 | for (i = 0; i < nloop->num_nodes; i++) | |||
1698 | { | |||
1699 | move_block_after (bbs[i], after); | |||
1700 | after = bbs[i]; | |||
1701 | } | |||
1702 | free (bbs); | |||
1703 | } | |||
1704 | ||||
1705 | /* At this point condition_bb is loop preheader with two successors, | |||
1706 | first_head and second_head. Make sure that loop preheader has only | |||
1707 | one successor. */ | |||
1708 | split_edge (loop_preheader_edge (loop)); | |||
1709 | split_edge (loop_preheader_edge (nloop)); | |||
1710 | ||||
1711 | return nloop; | |||
1712 | } |
1 | /* Define control flow data structures for the CFG. |
2 | Copyright (C) 1987-2023 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free |
8 | Software Foundation; either version 3, or (at your option) any later |
9 | version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | #ifndef GCC_BASIC_BLOCK_H |
21 | #define GCC_BASIC_BLOCK_H |
22 | |
23 | #include <profile-count.h> |
24 | |
25 | /* Control flow edge information. */ |
26 | class GTY((user)) edge_def { |
27 | public: |
28 | /* The two blocks at the ends of the edge. */ |
29 | basic_block src; |
30 | basic_block dest; |
31 | |
32 | /* Instructions queued on the edge. */ |
33 | union edge_def_insns { |
34 | gimple_seq g; |
35 | rtx_insn *r; |
36 | } insns; |
37 | |
38 | /* Auxiliary info specific to a pass. */ |
39 | void *aux; |
40 | |
41 | /* Location of any goto implicit in the edge. */ |
42 | location_t goto_locus; |
43 | |
44 | /* The index number corresponding to this edge in the edge vector |
45 | dest->preds. */ |
46 | unsigned int dest_idx; |
47 | |
48 | int flags; /* see cfg-flags.def */ |
49 | profile_probability probability; |
50 | |
51 | /* Return count of edge E. */ |
52 | inline profile_count count () const; |
53 | }; |
54 | |
55 | /* Masks for edge.flags. */ |
56 | #define DEF_EDGE_FLAG(NAME,IDX) EDGE_##NAME = 1 << IDX , |
57 | enum cfg_edge_flags { |
58 | #include "cfg-flags.def" |
59 | LAST_CFG_EDGE_FLAG /* this is only used for EDGE_ALL_FLAGS */ |
60 | }; |
61 | #undef DEF_EDGE_FLAG |
62 | |
63 | /* Bit mask for all edge flags. */ |
64 | #define EDGE_ALL_FLAGS((LAST_CFG_EDGE_FLAG - 1) * 2 - 1) ((LAST_CFG_EDGE_FLAG - 1) * 2 - 1) |
65 | |
66 | /* The following four flags all indicate something special about an edge. |
67 | Test the edge flags on EDGE_COMPLEX to detect all forms of "strange" |
68 | control flow transfers. */ |
69 | #define EDGE_COMPLEX(EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE ) \ |
70 | (EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE) |
71 | |
72 | struct GTY(()) rtl_bb_info { |
73 | /* The first insn of the block is embedded into bb->il.x. */ |
74 | /* The last insn of the block. */ |
75 | rtx_insn *end_; |
76 | |
77 | /* In CFGlayout mode points to insn notes/jumptables to be placed just before |
78 | and after the block. */ |
79 | rtx_insn *header_; |
80 | rtx_insn *footer_; |
81 | }; |
82 | |
83 | struct GTY(()) gimple_bb_info { |
84 | /* Sequence of statements in this block. */ |
85 | gimple_seq seq; |
86 | |
87 | /* PHI nodes for this block. */ |
88 | gimple_seq phi_nodes; |
89 | }; |
90 | |
91 | /* A basic block is a sequence of instructions with only one entry and |
92 | only one exit. If any one of the instructions are executed, they |
93 | will all be executed, and in sequence from first to last. |
94 | |
95 | There may be COND_EXEC instructions in the basic block. The |
96 | COND_EXEC *instructions* will be executed -- but if the condition |
97 | is false the conditionally executed *expressions* will of course |
98 | not be executed. We don't consider the conditionally executed |
99 | expression (which might have side-effects) to be in a separate |
100 | basic block because the program counter will always be at the same |
101 | location after the COND_EXEC instruction, regardless of whether the |
102 | condition is true or not. |
103 | |
104 | Basic blocks need not start with a label nor end with a jump insn. |
105 | For example, a previous basic block may just "conditionally fall" |
106 | into the succeeding basic block, and the last basic block need not |
107 | end with a jump insn. Block 0 is a descendant of the entry block. |
108 | |
109 | A basic block beginning with two labels cannot have notes between |
110 | the labels. |
111 | |
112 | Data for jump tables are stored in jump_insns that occur in no |
113 | basic block even though these insns can follow or precede insns in |
114 | basic blocks. */ |
115 | |
116 | /* Basic block information indexed by block number. */ |
117 | struct GTY((chain_next ("%h.next_bb"), chain_prev ("%h.prev_bb"))) basic_block_def { |
118 | /* The edges into and out of the block. */ |
119 | vec<edge, va_gc> *preds; |
120 | vec<edge, va_gc> *succs; |
121 | |
122 | /* Auxiliary info specific to a pass. */ |
123 | void *GTY ((skip (""))) aux; |
124 | |
125 | /* Innermost loop containing the block. */ |
126 | class loop *loop_father; |
127 | |
128 | /* The dominance and postdominance information node. */ |
129 | struct et_node * GTY ((skip (""))) dom[2]; |
130 | |
131 | /* Previous and next blocks in the chain. */ |
132 | basic_block prev_bb; |
133 | basic_block next_bb; |
134 | |
135 | union basic_block_il_dependent { |
136 | struct gimple_bb_info GTY ((tag ("0"))) gimple; |
137 | struct { |
138 | rtx_insn *head_; |
139 | struct rtl_bb_info * rtl; |
140 | } GTY ((tag ("1"))) x; |
141 | } GTY ((desc ("((%1.flags & BB_RTL) != 0)"))) il; |
142 | |
143 | /* Various flags. See cfg-flags.def. */ |
144 | int flags; |
145 | |
146 | /* The index of this block. */ |
147 | int index; |
148 | |
149 | /* Expected number of executions: calculated in profile.cc. */ |
150 | profile_count count; |
151 | }; |
152 | |
153 | /* This ensures that struct gimple_bb_info is smaller than |
154 | struct rtl_bb_info, so that inlining the former into basic_block_def |
155 | is the better choice. */ |
156 | STATIC_ASSERT (sizeof (rtl_bb_info) >= sizeof (gimple_bb_info))static_assert ((sizeof (rtl_bb_info) >= sizeof (gimple_bb_info )), "sizeof (rtl_bb_info) >= sizeof (gimple_bb_info)"); |
157 | |
158 | #define BB_FREQ_MAX10000 10000 |
159 | |
160 | /* Masks for basic_block.flags. */ |
161 | #define DEF_BASIC_BLOCK_FLAG(NAME,IDX) BB_##NAME = 1 << IDX , |
162 | enum cfg_bb_flags |
163 | { |
164 | #include "cfg-flags.def" |
165 | LAST_CFG_BB_FLAG /* this is only used for BB_ALL_FLAGS */ |
166 | }; |
167 | #undef DEF_BASIC_BLOCK_FLAG |
168 | |
169 | /* Bit mask for all basic block flags. */ |
170 | #define BB_ALL_FLAGS((LAST_CFG_BB_FLAG - 1) * 2 - 1) ((LAST_CFG_BB_FLAG - 1) * 2 - 1) |
171 | |
172 | /* Bit mask for all basic block flags that must be preserved. These are |
173 | the bit masks that are *not* cleared by clear_bb_flags. */ |
174 | #define BB_FLAGS_TO_PRESERVE(BB_DISABLE_SCHEDULE | BB_RTL | BB_NON_LOCAL_GOTO_TARGET | BB_HOT_PARTITION | BB_COLD_PARTITION) \ |
175 | (BB_DISABLE_SCHEDULE | BB_RTL | BB_NON_LOCAL_GOTO_TARGET \ |
176 | | BB_HOT_PARTITION | BB_COLD_PARTITION) |
177 | |
178 | /* Dummy bitmask for convenience in the hot/cold partitioning code. */ |
179 | #define BB_UNPARTITIONED0 0 |
180 | |
181 | /* Partitions, to be used when partitioning hot and cold basic blocks into |
182 | separate sections. */ |
183 | #define BB_PARTITION(bb)((bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) ((bb)->flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)) |
184 | #define BB_SET_PARTITION(bb, part)do { basic_block bb_ = (bb); bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) | (part)); } while (0 ) do { \ |
185 | basic_block bb_ = (bb); \ |
186 | bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) \ |
187 | | (part)); \ |
188 | } while (0) |
189 | |
190 | #define BB_COPY_PARTITION(dstbb, srcbb)do { basic_block bb_ = (dstbb); bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) | (((srcbb)-> flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)))); } while ( 0) \ |
191 | BB_SET_PARTITION (dstbb, BB_PARTITION (srcbb))do { basic_block bb_ = (dstbb); bb_->flags = ((bb_->flags & ~(BB_HOT_PARTITION|BB_COLD_PARTITION)) | (((srcbb)-> flags & (BB_HOT_PARTITION|BB_COLD_PARTITION)))); } while ( 0) |
192 | |
193 | /* Defines for accessing the fields of the CFG structure for function FN. */ |
194 | #define ENTRY_BLOCK_PTR_FOR_FN(FN)((FN)->cfg->x_entry_block_ptr) ((FN)->cfg->x_entry_block_ptr) |
195 | #define EXIT_BLOCK_PTR_FOR_FN(FN)((FN)->cfg->x_exit_block_ptr) ((FN)->cfg->x_exit_block_ptr) |
196 | #define basic_block_info_for_fn(FN)((FN)->cfg->x_basic_block_info) ((FN)->cfg->x_basic_block_info) |
197 | #define n_basic_blocks_for_fn(FN)((FN)->cfg->x_n_basic_blocks) ((FN)->cfg->x_n_basic_blocks) |
198 | #define n_edges_for_fn(FN)((FN)->cfg->x_n_edges) ((FN)->cfg->x_n_edges) |
199 | #define last_basic_block_for_fn(FN)((FN)->cfg->x_last_basic_block) ((FN)->cfg->x_last_basic_block) |
200 | #define label_to_block_map_for_fn(FN)((FN)->cfg->x_label_to_block_map) ((FN)->cfg->x_label_to_block_map) |
201 | #define profile_status_for_fn(FN)((FN)->cfg->x_profile_status) ((FN)->cfg->x_profile_status) |
202 | |
203 | #define BASIC_BLOCK_FOR_FN(FN,N)((*((FN)->cfg->x_basic_block_info))[(N)]) \ |
204 | ((*basic_block_info_for_fn (FN)((FN)->cfg->x_basic_block_info))[(N)]) |
205 | #define SET_BASIC_BLOCK_FOR_FN(FN,N,BB)((*((FN)->cfg->x_basic_block_info))[(N)] = (BB)) \ |
206 | ((*basic_block_info_for_fn (FN)((FN)->cfg->x_basic_block_info))[(N)] = (BB)) |
207 | |
208 | /* For iterating over basic blocks. */ |
209 | #define FOR_BB_BETWEEN(BB, FROM, TO, DIR)for (BB = FROM; BB != TO; BB = BB->DIR) \ |
210 | for (BB = FROM; BB != TO; BB = BB->DIR) |
211 | |
212 | #define FOR_EACH_BB_FN(BB, FN)for (BB = (FN)->cfg->x_entry_block_ptr->next_bb; BB != (FN)->cfg->x_exit_block_ptr; BB = BB->next_bb) \ |
213 | FOR_BB_BETWEEN (BB, (FN)->cfg->x_entry_block_ptr->next_bb, (FN)->cfg->x_exit_block_ptr, next_bb)for (BB = (FN)->cfg->x_entry_block_ptr->next_bb; BB != (FN)->cfg->x_exit_block_ptr; BB = BB->next_bb) |
214 | |
215 | #define FOR_EACH_BB_REVERSE_FN(BB, FN)for (BB = (FN)->cfg->x_exit_block_ptr->prev_bb; BB != (FN)->cfg->x_entry_block_ptr; BB = BB->prev_bb) \ |
216 | FOR_BB_BETWEEN (BB, (FN)->cfg->x_exit_block_ptr->prev_bb, (FN)->cfg->x_entry_block_ptr, prev_bb)for (BB = (FN)->cfg->x_exit_block_ptr->prev_bb; BB != (FN)->cfg->x_entry_block_ptr; BB = BB->prev_bb) |
217 | |
218 | /* For iterating over insns in basic block. */ |
219 | #define FOR_BB_INSNS(BB, INSN)for ((INSN) = (BB)->il.x.head_; (INSN) && (INSN) != NEXT_INSN ((BB)->il.x.rtl->end_); (INSN) = NEXT_INSN ( INSN)) \ |
220 | for ((INSN) = BB_HEAD (BB)(BB)->il.x.head_; \ |
221 | (INSN) && (INSN) != NEXT_INSN (BB_END (BB)(BB)->il.x.rtl->end_); \ |
222 | (INSN) = NEXT_INSN (INSN)) |
223 | |
224 | /* For iterating over insns in basic block when we might remove the |
225 | current insn. */ |
226 | #define FOR_BB_INSNS_SAFE(BB, INSN, CURR)for ((INSN) = (BB)->il.x.head_, (CURR) = (INSN) ? NEXT_INSN ((INSN)): nullptr; (INSN) && (INSN) != NEXT_INSN ((BB )->il.x.rtl->end_); (INSN) = (CURR), (CURR) = (INSN) ? NEXT_INSN ((INSN)) : nullptr) \ |
227 | for ((INSN) = BB_HEAD (BB)(BB)->il.x.head_, (CURR) = (INSN) ? NEXT_INSN ((INSN)): NULLnullptr; \ |
228 | (INSN) && (INSN) != NEXT_INSN (BB_END (BB)(BB)->il.x.rtl->end_); \ |
229 | (INSN) = (CURR), (CURR) = (INSN) ? NEXT_INSN ((INSN)) : NULLnullptr) |
230 | |
231 | #define FOR_BB_INSNS_REVERSE(BB, INSN)for ((INSN) = (BB)->il.x.rtl->end_; (INSN) && ( INSN) != PREV_INSN ((BB)->il.x.head_); (INSN) = PREV_INSN ( INSN)) \ |
232 | for ((INSN) = BB_END (BB)(BB)->il.x.rtl->end_; \ |
233 | (INSN) && (INSN) != PREV_INSN (BB_HEAD (BB)(BB)->il.x.head_); \ |
234 | (INSN) = PREV_INSN (INSN)) |
235 | |
236 | #define FOR_BB_INSNS_REVERSE_SAFE(BB, INSN, CURR)for ((INSN) = (BB)->il.x.rtl->end_,(CURR) = (INSN) ? PREV_INSN ((INSN)) : nullptr; (INSN) && (INSN) != PREV_INSN (( BB)->il.x.head_); (INSN) = (CURR), (CURR) = (INSN) ? PREV_INSN ((INSN)) : nullptr) \ |
237 | for ((INSN) = BB_END (BB)(BB)->il.x.rtl->end_,(CURR) = (INSN) ? PREV_INSN ((INSN)) : NULLnullptr; \ |
238 | (INSN) && (INSN) != PREV_INSN (BB_HEAD (BB)(BB)->il.x.head_); \ |
239 | (INSN) = (CURR), (CURR) = (INSN) ? PREV_INSN ((INSN)) : NULLnullptr) |
240 | |
241 | /* Cycles through _all_ basic blocks, even the fake ones (entry and |
242 | exit block). */ |
243 | |
244 | #define FOR_ALL_BB_FN(BB, FN)for (BB = ((FN)->cfg->x_entry_block_ptr); BB; BB = BB-> next_bb) \ |
245 | for (BB = ENTRY_BLOCK_PTR_FOR_FN (FN)((FN)->cfg->x_entry_block_ptr); BB; BB = BB->next_bb) |
246 | |
247 | |
248 | /* Stuff for recording basic block info. */ |
249 | |
250 | /* For now, these will be functions (so that they can include checked casts |
251 | to rtx_insn. Once the underlying fields are converted from rtx |
252 | to rtx_insn, these can be converted back to macros. */ |
253 | |
254 | #define BB_HEAD(B)(B)->il.x.head_ (B)->il.x.head_ |
255 | #define BB_END(B)(B)->il.x.rtl->end_ (B)->il.x.rtl->end_ |
256 | #define BB_HEADER(B)(B)->il.x.rtl->header_ (B)->il.x.rtl->header_ |
257 | #define BB_FOOTER(B)(B)->il.x.rtl->footer_ (B)->il.x.rtl->footer_ |
258 | |
259 | /* Special block numbers [markers] for entry and exit. |
260 | Neither of them is supposed to hold actual statements. */ |
261 | #define ENTRY_BLOCK(0) (0) |
262 | #define EXIT_BLOCK(1) (1) |
263 | |
264 | /* The two blocks that are always in the cfg. */ |
265 | #define NUM_FIXED_BLOCKS(2) (2) |
266 | |
267 | /* This is the value which indicates no edge is present. */ |
268 | #define EDGE_INDEX_NO_EDGE-1 -1 |
269 | |
270 | /* EDGE_INDEX returns an integer index for an edge, or EDGE_INDEX_NO_EDGE |
271 | if there is no edge between the 2 basic blocks. */ |
272 | #define EDGE_INDEX(el, pred, succ)(find_edge_index ((el), (pred), (succ))) (find_edge_index ((el), (pred), (succ))) |
273 | |
274 | /* INDEX_EDGE_PRED_BB and INDEX_EDGE_SUCC_BB return a pointer to the basic |
275 | block which is either the pred or succ end of the indexed edge. */ |
276 | #define INDEX_EDGE_PRED_BB(el, index)((el)->index_to_edge[(index)]->src) ((el)->index_to_edge[(index)]->src) |
277 | #define INDEX_EDGE_SUCC_BB(el, index)((el)->index_to_edge[(index)]->dest) ((el)->index_to_edge[(index)]->dest) |
278 | |
279 | /* INDEX_EDGE returns a pointer to the edge. */ |
280 | #define INDEX_EDGE(el, index)((el)->index_to_edge[(index)]) ((el)->index_to_edge[(index)]) |
281 | |
282 | /* Number of edges in the compressed edge list. */ |
283 | #define NUM_EDGES(el)((el)->num_edges) ((el)->num_edges) |
284 | |
285 | /* BB is assumed to contain conditional jump. Return the fallthru edge. */ |
286 | #define FALLTHRU_EDGE(bb)((*((bb))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*( (bb))->succs)[(0)] : (*((bb))->succs)[(1)]) (EDGE_SUCC ((bb), 0)(*((bb))->succs)[(0)]->flags & EDGE_FALLTHRU \ |
287 | ? EDGE_SUCC ((bb), 0)(*((bb))->succs)[(0)] : EDGE_SUCC ((bb), 1)(*((bb))->succs)[(1)]) |
288 | |
289 | /* BB is assumed to contain conditional jump. Return the branch edge. */ |
290 | #define BRANCH_EDGE(bb)((*((bb))->succs)[(0)]->flags & EDGE_FALLTHRU ? (*( (bb))->succs)[(1)] : (*((bb))->succs)[(0)]) (EDGE_SUCC ((bb), 0)(*((bb))->succs)[(0)]->flags & EDGE_FALLTHRU \ |
291 | ? EDGE_SUCC ((bb), 1)(*((bb))->succs)[(1)] : EDGE_SUCC ((bb), 0)(*((bb))->succs)[(0)]) |
292 | |
293 | /* Return expected execution frequency of the edge E. */ |
294 | #define EDGE_FREQUENCY(e)e->count ().to_frequency ((cfun + 0)) e->count ().to_frequency (cfun(cfun + 0)) |
295 | |
296 | /* Compute a scale factor (or probability) suitable for scaling of |
297 | gcov_type values via apply_probability() and apply_scale(). */ |
298 | #define GCOV_COMPUTE_SCALE(num,den)((den) ? ((((num) * 10000) + ((den)) / 2) / ((den))) : 10000) \ |
299 | ((den) ? RDIV ((num) * REG_BR_PROB_BASE, (den))((((num) * 10000) + ((den)) / 2) / ((den))) : REG_BR_PROB_BASE10000) |
300 | |
301 | /* Return nonzero if edge is critical. */ |
302 | #define EDGE_CRITICAL_P(e)(vec_safe_length ((e)->src->succs) >= 2 && vec_safe_length ((e)->dest->preds) >= 2) (EDGE_COUNT ((e)->src->succs)vec_safe_length ((e)->src->succs) >= 2 \ |
303 | && EDGE_COUNT ((e)->dest->preds)vec_safe_length ((e)->dest->preds) >= 2) |
304 | |
305 | #define EDGE_COUNT(ev)vec_safe_length (ev) vec_safe_length (ev) |
306 | #define EDGE_I(ev,i)(*ev)[(i)] (*ev)[(i)] |
307 | #define EDGE_PRED(bb,i)(*(bb)->preds)[(i)] (*(bb)->preds)[(i)] |
308 | #define EDGE_SUCC(bb,i)(*(bb)->succs)[(i)] (*(bb)->succs)[(i)] |
309 | |
310 | /* Returns true if BB has precisely one successor. */ |
311 | |
312 | inline bool |
313 | single_succ_p (const_basic_block bb) |
314 | { |
315 | return EDGE_COUNT (bb->succs)vec_safe_length (bb->succs) == 1; |
316 | } |
317 | |
318 | /* Returns true if BB has precisely one predecessor. */ |
319 | |
320 | inline bool |
321 | single_pred_p (const_basic_block bb) |
322 | { |
323 | return EDGE_COUNT (bb->preds)vec_safe_length (bb->preds) == 1; |
324 | } |
325 | |
326 | /* Returns the single successor edge of basic block BB. Aborts if |
327 | BB does not have exactly one successor. */ |
328 | |
329 | inline edge |
330 | single_succ_edge (const_basic_block bb) |
331 | { |
332 | gcc_checking_assert (single_succ_p (bb))((void)(!(single_succ_p (bb)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/basic-block.h" , 332, __FUNCTION__), 0 : 0)); |
333 | return EDGE_SUCC (bb, 0)(*(bb)->succs)[(0)]; |
334 | } |
335 | |
336 | /* Returns the single predecessor edge of basic block BB. Aborts |
337 | if BB does not have exactly one predecessor. */ |
338 | |
339 | inline edge |
340 | single_pred_edge (const_basic_block bb) |
341 | { |
342 | gcc_checking_assert (single_pred_p (bb))((void)(!(single_pred_p (bb)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/basic-block.h" , 342, __FUNCTION__), 0 : 0)); |
343 | return EDGE_PRED (bb, 0)(*(bb)->preds)[(0)]; |
344 | } |
345 | |
346 | /* Returns the single successor block of basic block BB. Aborts |
347 | if BB does not have exactly one successor. */ |
348 | |
349 | inline basic_block |
350 | single_succ (const_basic_block bb) |
351 | { |
352 | return single_succ_edge (bb)->dest; |
353 | } |
354 | |
355 | /* Returns the single predecessor block of basic block BB. Aborts |
356 | if BB does not have exactly one predecessor.*/ |
357 | |
358 | inline basic_block |
359 | single_pred (const_basic_block bb) |
360 | { |
361 | return single_pred_edge (bb)->src; |
362 | } |
363 | |
364 | /* Iterator object for edges. */ |
365 | |
366 | struct edge_iterator { |
367 | unsigned index; |
368 | vec<edge, va_gc> **container; |
369 | }; |
370 | |
371 | inline vec<edge, va_gc> * |
372 | ei_container (edge_iterator i) |
373 | { |
374 | gcc_checking_assert (i.container)((void)(!(i.container) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/basic-block.h" , 374, __FUNCTION__), 0 : 0)); |
375 | return *i.container; |
376 | } |
377 | |
378 | #define ei_start(iter)ei_start_1 (&(iter)) ei_start_1 (&(iter)) |
379 | #define ei_last(iter)ei_last_1 (&(iter)) ei_last_1 (&(iter)) |
380 | |
381 | /* Return an iterator pointing to the start of an edge vector. */ |
382 | inline edge_iterator |
383 | ei_start_1 (vec<edge, va_gc> **ev) |
384 | { |
385 | edge_iterator i; |
386 | |
387 | i.index = 0; |
388 | i.container = ev; |
389 | |
390 | return i; |
391 | } |
392 | |
393 | /* Return an iterator pointing to the last element of an edge |
394 | vector. */ |
395 | inline edge_iterator |
396 | ei_last_1 (vec<edge, va_gc> **ev) |
397 | { |
398 | edge_iterator i; |
399 | |
400 | i.index = EDGE_COUNT (*ev)vec_safe_length (*ev) - 1; |
401 | i.container = ev; |
402 | |
403 | return i; |
404 | } |
405 | |
406 | /* Is the iterator `i' at the end of the sequence? */ |
407 | inline bool |
408 | ei_end_p (edge_iterator i) |
409 | { |
410 | return (i.index == EDGE_COUNT (ei_container (i))vec_safe_length (ei_container (i))); |
411 | } |
412 | |
413 | /* Is the iterator `i' at one position before the end of the |
414 | sequence? */ |
415 | inline bool |
416 | ei_one_before_end_p (edge_iterator i) |
417 | { |
418 | return (i.index + 1 == EDGE_COUNT (ei_container (i))vec_safe_length (ei_container (i))); |
419 | } |
420 | |
421 | /* Advance the iterator to the next element. */ |
422 | inline void |
423 | ei_next (edge_iterator *i) |
424 | { |
425 | gcc_checking_assert (i->index < EDGE_COUNT (ei_container (*i)))((void)(!(i->index < vec_safe_length (ei_container (*i) )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/basic-block.h" , 425, __FUNCTION__), 0 : 0)); |
426 | i->index++; |
427 | } |
428 | |
429 | /* Move the iterator to the previous element. */ |
430 | inline void |
431 | ei_prev (edge_iterator *i) |
432 | { |
433 | gcc_checking_assert (i->index > 0)((void)(!(i->index > 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/basic-block.h" , 433, __FUNCTION__), 0 : 0)); |
434 | i->index--; |
435 | } |
436 | |
437 | /* Return the edge pointed to by the iterator `i'. */ |
438 | inline edge |
439 | ei_edge (edge_iterator i) |
440 | { |
441 | return EDGE_I (ei_container (i), i.index)(*ei_container (i))[(i.index)]; |
442 | } |
443 | |
444 | /* Return an edge pointed to by the iterator. Do it safely so that |
445 | NULL is returned when the iterator is pointing at the end of the |
446 | sequence. */ |
447 | inline edge |
448 | ei_safe_edge (edge_iterator i) |
449 | { |
450 | return !ei_end_p (i) ? ei_edge (i) : NULLnullptr; |
451 | } |
452 | |
453 | /* Return 1 if we should continue to iterate. Return 0 otherwise. |
454 | *Edge P is set to the next edge if we are to continue to iterate |
455 | and NULL otherwise. */ |
456 | |
457 | inline bool |
458 | ei_cond (edge_iterator ei, edge *p) |
459 | { |
460 | if (!ei_end_p (ei)) |
461 | { |
462 | *p = ei_edge (ei); |
463 | return 1; |
464 | } |
465 | else |
466 | { |
467 | *p = NULLnullptr; |
468 | return 0; |
469 | } |
470 | } |
471 | |
472 | /* This macro serves as a convenient way to iterate each edge in a |
473 | vector of predecessor or successor edges. It must not be used when |
474 | an element might be removed during the traversal, otherwise |
475 | elements will be missed. Instead, use a for-loop like that shown |
476 | in the following pseudo-code: |
477 | |
478 | FOR (ei = ei_start (bb->succs); (e = ei_safe_edge (ei)); ) |
479 | { |
480 | IF (e != taken_edge) |
481 | remove_edge (e); |
482 | ELSE |
483 | ei_next (&ei); |
484 | } |
485 | */ |
486 | |
487 | #define FOR_EACH_EDGE(EDGE,ITER,EDGE_VEC)for ((ITER) = ei_start_1 (&((EDGE_VEC))); ei_cond ((ITER) , &(EDGE)); ei_next (&(ITER))) \ |
488 | for ((ITER) = ei_start ((EDGE_VEC))ei_start_1 (&((EDGE_VEC))); \ |
489 | ei_cond ((ITER), &(EDGE)); \ |
490 | ei_next (&(ITER))) |
491 | |
492 | #define CLEANUP_EXPENSIVE1 1 /* Do relatively expensive optimizations |
493 | except for edge forwarding */ |
494 | #define CLEANUP_CROSSJUMP2 2 /* Do crossjumping. */ |
495 | #define CLEANUP_POST_REGSTACK4 4 /* We run after reg-stack and need |
496 | to care REG_DEAD notes. */ |
497 | #define CLEANUP_THREADING8 8 /* Do jump threading. */ |
498 | #define CLEANUP_NO_INSN_DEL16 16 /* Do not try to delete trivially dead |
499 | insns. */ |
500 | #define CLEANUP_CFGLAYOUT32 32 /* Do cleanup in cfglayout mode. */ |
501 | #define CLEANUP_CFG_CHANGED64 64 /* The caller changed the CFG. */ |
502 | #define CLEANUP_NO_PARTITIONING128 128 /* Do not try to fix partitions. */ |
503 | #define CLEANUP_FORCE_FAST_DCE0x100 0x100 /* Force run_fast_dce to be called |
504 | at least once. */ |
505 | |
506 | /* Return true if BB is in a transaction. */ |
507 | |
508 | inline bool |
509 | bb_in_transaction (basic_block bb) |
510 | { |
511 | return bb->flags & BB_IN_TRANSACTION; |
512 | } |
513 | |
514 | /* Return true when one of the predecessor edges of BB is marked with EDGE_EH. */ |
515 | inline bool |
516 | bb_has_eh_pred (basic_block bb) |
517 | { |
518 | edge e; |
519 | edge_iterator ei; |
520 | |
521 | FOR_EACH_EDGE (e, ei, bb->preds)for ((ei) = ei_start_1 (&((bb->preds))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
522 | { |
523 | if (e->flags & EDGE_EH) |
524 | return true; |
525 | } |
526 | return false; |
527 | } |
528 | |
529 | /* Return true when one of the predecessor edges of BB is marked with EDGE_ABNORMAL. */ |
530 | inline bool |
531 | bb_has_abnormal_pred (basic_block bb) |
532 | { |
533 | edge e; |
534 | edge_iterator ei; |
535 | |
536 | FOR_EACH_EDGE (e, ei, bb->preds)for ((ei) = ei_start_1 (&((bb->preds))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
537 | { |
538 | if (e->flags & EDGE_ABNORMAL) |
539 | return true; |
540 | } |
541 | return false; |
542 | } |
543 | |
544 | /* Return the fallthru edge in EDGES if it exists, NULL otherwise. */ |
545 | inline edge |
546 | find_fallthru_edge (vec<edge, va_gc> *edges) |
547 | { |
548 | edge e; |
549 | edge_iterator ei; |
550 | |
551 | FOR_EACH_EDGE (e, ei, edges)for ((ei) = ei_start_1 (&((edges))); ei_cond ((ei), & (e)); ei_next (&(ei))) |
552 | if (e->flags & EDGE_FALLTHRU) |
553 | break; |
554 | |
555 | return e; |
556 | } |
557 | |
558 | /* Check tha probability is sane. */ |
559 | |
560 | inline void |
561 | check_probability (int prob) |
562 | { |
563 | gcc_checking_assert (prob >= 0 && prob <= REG_BR_PROB_BASE)((void)(!(prob >= 0 && prob <= 10000) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/basic-block.h" , 563, __FUNCTION__), 0 : 0)); |
564 | } |
565 | |
566 | /* Given PROB1 and PROB2, return PROB1*PROB2/REG_BR_PROB_BASE. |
567 | Used to combine BB probabilities. */ |
568 | |
569 | inline int |
570 | combine_probabilities (int prob1, int prob2) |
571 | { |
572 | check_probability (prob1); |
573 | check_probability (prob2); |
574 | return RDIV (prob1 * prob2, REG_BR_PROB_BASE)(((prob1 * prob2) + (10000) / 2) / (10000)); |
575 | } |
576 | |
577 | /* Apply scale factor SCALE on frequency or count FREQ. Use this |
578 | interface when potentially scaling up, so that SCALE is not |
579 | constrained to be < REG_BR_PROB_BASE. */ |
580 | |
581 | inline gcov_type |
582 | apply_scale (gcov_type freq, gcov_type scale) |
583 | { |
584 | return RDIV (freq * scale, REG_BR_PROB_BASE)(((freq * scale) + (10000) / 2) / (10000)); |
585 | } |
586 | |
587 | /* Apply probability PROB on frequency or count FREQ. */ |
588 | |
589 | inline gcov_type |
590 | apply_probability (gcov_type freq, int prob) |
591 | { |
592 | check_probability (prob); |
593 | return apply_scale (freq, prob); |
594 | } |
595 | |
596 | /* Return inverse probability for PROB. */ |
597 | |
598 | inline int |
599 | inverse_probability (int prob1) |
600 | { |
601 | check_probability (prob1); |
602 | return REG_BR_PROB_BASE10000 - prob1; |
603 | } |
604 | |
605 | /* Return true if BB has at least one abnormal outgoing edge. */ |
606 | |
607 | inline bool |
608 | has_abnormal_or_eh_outgoing_edge_p (basic_block bb) |
609 | { |
610 | edge e; |
611 | edge_iterator ei; |
612 | |
613 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
614 | if (e->flags & (EDGE_ABNORMAL | EDGE_EH)) |
615 | return true; |
616 | |
617 | return false; |
618 | } |
619 | |
620 | /* Return true when one of the predecessor edges of BB is marked with |
621 | EDGE_ABNORMAL_CALL or EDGE_EH. */ |
622 | |
623 | inline bool |
624 | has_abnormal_call_or_eh_pred_edge_p (basic_block bb) |
625 | { |
626 | edge e; |
627 | edge_iterator ei; |
628 | |
629 | FOR_EACH_EDGE (e, ei, bb->preds)for ((ei) = ei_start_1 (&((bb->preds))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
630 | if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH)) |
631 | return true; |
632 | |
633 | return false; |
634 | } |
635 | |
636 | /* Return count of edge E. */ |
637 | inline profile_count edge_def::count () const |
638 | { |
639 | return src->count.apply_probability (probability); |
640 | } |
641 | |
642 | #endif /* GCC_BASIC_BLOCK_H */ |