Bug Summary

File:build/gcc/vec.h
Warning:line 814, column 10
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name cfgloopmanip.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model static -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -resource-dir /usr/lib64/clang/13.0.0 -D IN_GCC -D HAVE_CONFIG_H -I . -I . -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/. -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../include -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcpp/include -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcody -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber/bid -I ../libdecnumber -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libbacktrace -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/11/../../../../include/c++/11 -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/11/../../../../include/c++/11/x86_64-suse-linux -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/11/../../../../include/c++/11/backward -internal-isystem /usr/lib64/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/11/../../../../x86_64-suse-linux/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-narrowing -Wwrite-strings -Wno-error=format-diag -Wno-long-long -Wno-variadic-macros -Wno-overlength-strings -fdeprecated-macro -fdebug-compilation-dir=/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -ferror-limit 19 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=plist-html -analyzer-config silence-checkers=core.NullDereference -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/clang-static-analyzer/2021-11-20-133755-20252-1/report-Mk12LS.plist -x c++ /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c

/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c

1/* Loop manipulation code for GNU compiler.
2 Copyright (C) 2002-2021 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "backend.h"
24#include "rtl.h"
25#include "tree.h"
26#include "gimple.h"
27#include "cfghooks.h"
28#include "cfganal.h"
29#include "cfgloop.h"
30#include "gimple-iterator.h"
31#include "gimplify-me.h"
32#include "tree-ssa-loop-manip.h"
33#include "dumpfile.h"
34
35static void copy_loops_to (class loop **, int,
36 class loop *);
37static void loop_redirect_edge (edge, basic_block);
38static void remove_bbs (basic_block *, int);
39static bool rpe_enum_p (const_basic_block, const void *);
40static int find_path (edge, basic_block **);
41static void fix_loop_placements (class loop *, bool *);
42static bool fix_bb_placement (basic_block);
43static void fix_bb_placements (basic_block, bool *, bitmap);
44
45/* Checks whether basic block BB is dominated by DATA. */
46static bool
47rpe_enum_p (const_basic_block bb, const void *data)
48{
49 return dominated_by_p (CDI_DOMINATORS, bb, (const_basic_block) data);
50}
51
52/* Remove basic blocks BBS. NBBS is the number of the basic blocks. */
53
54static void
55remove_bbs (basic_block *bbs, int nbbs)
56{
57 int i;
58
59 for (i = 0; i < nbbs; i++)
60 delete_basic_block (bbs[i]);
61}
62
63/* Find path -- i.e. the basic blocks dominated by edge E and put them
64 into array BBS, that will be allocated large enough to contain them.
65 E->dest must have exactly one predecessor for this to work (it is
66 easy to achieve and we do not put it here because we do not want to
67 alter anything by this function). The number of basic blocks in the
68 path is returned. */
69static int
70find_path (edge e, basic_block **bbs)
71{
72 gcc_assert (EDGE_COUNT (e->dest->preds) <= 1)((void)(!(vec_safe_length (e->dest->preds) <= 1) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 72, __FUNCTION__), 0 : 0))
;
73
74 /* Find bbs in the path. */
75 *bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun))((basic_block *) xmalloc (sizeof (basic_block) * ((((cfun + 0
))->cfg->x_n_basic_blocks))))
;
76 return dfs_enumerate_from (e->dest, 0, rpe_enum_p, *bbs,
77 n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks), e->dest);
78}
79
80/* Fix placement of basic block BB inside loop hierarchy --
81 Let L be a loop to that BB belongs. Then every successor of BB must either
82 1) belong to some superloop of loop L, or
83 2) be a header of loop K such that K->outer is superloop of L
84 Returns true if we had to move BB into other loop to enforce this condition,
85 false if the placement of BB was already correct (provided that placements
86 of its successors are correct). */
87static bool
88fix_bb_placement (basic_block bb)
89{
90 edge e;
91 edge_iterator ei;
92 class loop *loop = current_loops((cfun + 0)->x_current_loops)->tree_root, *act;
93
94 FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei)
, &(e)); ei_next (&(ei)))
95 {
96 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr))
97 continue;
98
99 act = e->dest->loop_father;
100 if (act->header == e->dest)
101 act = loop_outer (act);
102
103 if (flow_loop_nested_p (loop, act))
104 loop = act;
105 }
106
107 if (loop == bb->loop_father)
108 return false;
109
110 remove_bb_from_loops (bb);
111 add_bb_to_loop (bb, loop);
112
113 return true;
114}
115
116/* Fix placement of LOOP inside loop tree, i.e. find the innermost superloop
117 of LOOP to that leads at least one exit edge of LOOP, and set it
118 as the immediate superloop of LOOP. Return true if the immediate superloop
119 of LOOP changed.
120
121 IRRED_INVALIDATED is set to true if a change in the loop structures might
122 invalidate the information about irreducible regions. */
123
124static bool
125fix_loop_placement (class loop *loop, bool *irred_invalidated)
126{
127 unsigned i;
128 edge e;
129 auto_vec<edge> exits = get_loop_exit_edges (loop);
130 class loop *father = current_loops((cfun + 0)->x_current_loops)->tree_root, *act;
131 bool ret = false;
132
133 FOR_EACH_VEC_ELT (exits, i, e)for (i = 0; (exits).iterate ((i), &(e)); ++(i))
134 {
135 act = find_common_loop (loop, e->dest->loop_father);
136 if (flow_loop_nested_p (father, act))
137 father = act;
138 }
139
140 if (father != loop_outer (loop))
141 {
142 for (act = loop_outer (loop); act != father; act = loop_outer (act))
143 act->num_nodes -= loop->num_nodes;
144 flow_loop_tree_node_remove (loop);
145 flow_loop_tree_node_add (father, loop);
146
147 /* The exit edges of LOOP no longer exits its original immediate
148 superloops; remove them from the appropriate exit lists. */
149 FOR_EACH_VEC_ELT (exits, i, e)for (i = 0; (exits).iterate ((i), &(e)); ++(i))
150 {
151 /* We may need to recompute irreducible loops. */
152 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
153 *irred_invalidated = true;
154 rescan_loop_exit (e, false, false);
155 }
156
157 ret = true;
158 }
159
160 return ret;
161}
162
163/* Fix placements of basic blocks inside loop hierarchy stored in loops; i.e.
164 enforce condition stated in description of fix_bb_placement. We
165 start from basic block FROM that had some of its successors removed, so that
166 his placement no longer has to be correct, and iteratively fix placement of
167 its predecessors that may change if placement of FROM changed. Also fix
168 placement of subloops of FROM->loop_father, that might also be altered due
169 to this change; the condition for them is similar, except that instead of
170 successors we consider edges coming out of the loops.
171
172 If the changes may invalidate the information about irreducible regions,
173 IRRED_INVALIDATED is set to true.
174
175 If LOOP_CLOSED_SSA_INVLIDATED is non-zero then all basic blocks with
176 changed loop_father are collected there. */
177
178static void
179fix_bb_placements (basic_block from,
180 bool *irred_invalidated,
181 bitmap loop_closed_ssa_invalidated)
182{
183 basic_block *queue, *qtop, *qbeg, *qend;
184 class loop *base_loop, *target_loop;
185 edge e;
186
187 /* We pass through blocks back-reachable from FROM, testing whether some
188 of their successors moved to outer loop. It may be necessary to
189 iterate several times, but it is finite, as we stop unless we move
190 the basic block up the loop structure. The whole story is a bit
191 more complicated due to presence of subloops, those are moved using
192 fix_loop_placement. */
193
194 base_loop = from->loop_father;
195 /* If we are already in the outermost loop, the basic blocks cannot be moved
196 outside of it. If FROM is the header of the base loop, it cannot be moved
197 outside of it, either. In both cases, we can end now. */
198 if (base_loop == current_loops((cfun + 0)->x_current_loops)->tree_root
199 || from == base_loop->header)
200 return;
201
202 auto_sbitmap in_queue (last_basic_block_for_fn (cfun)(((cfun + 0))->cfg->x_last_basic_block));
203 bitmap_clear (in_queue);
204 bitmap_set_bit (in_queue, from->index);
205 /* Prevent us from going out of the base_loop. */
206 bitmap_set_bit (in_queue, base_loop->header->index);
207
208 queue = XNEWVEC (basic_block, base_loop->num_nodes + 1)((basic_block *) xmalloc (sizeof (basic_block) * (base_loop->
num_nodes + 1)))
;
209 qtop = queue + base_loop->num_nodes + 1;
210 qbeg = queue;
211 qend = queue + 1;
212 *qbeg = from;
213
214 while (qbeg != qend)
215 {
216 edge_iterator ei;
217 from = *qbeg;
218 qbeg++;
219 if (qbeg == qtop)
220 qbeg = queue;
221 bitmap_clear_bit (in_queue, from->index);
222
223 if (from->loop_father->header == from)
224 {
225 /* Subloop header, maybe move the loop upward. */
226 if (!fix_loop_placement (from->loop_father, irred_invalidated))
227 continue;
228 target_loop = loop_outer (from->loop_father);
229 if (loop_closed_ssa_invalidated)
230 {
231 basic_block *bbs = get_loop_body (from->loop_father);
232 for (unsigned i = 0; i < from->loop_father->num_nodes; ++i)
233 bitmap_set_bit (loop_closed_ssa_invalidated, bbs[i]->index);
234 free (bbs);
235 }
236 }
237 else
238 {
239 /* Ordinary basic block. */
240 if (!fix_bb_placement (from))
241 continue;
242 target_loop = from->loop_father;
243 if (loop_closed_ssa_invalidated)
244 bitmap_set_bit (loop_closed_ssa_invalidated, from->index);
245 }
246
247 FOR_EACH_EDGE (e, ei, from->succs)for ((ei) = ei_start_1 (&((from->succs))); ei_cond ((ei
), &(e)); ei_next (&(ei)))
248 {
249 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
250 *irred_invalidated = true;
251 }
252
253 /* Something has changed, insert predecessors into queue. */
254 FOR_EACH_EDGE (e, ei, from->preds)for ((ei) = ei_start_1 (&((from->preds))); ei_cond ((ei
), &(e)); ei_next (&(ei)))
255 {
256 basic_block pred = e->src;
257 class loop *nca;
258
259 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
260 *irred_invalidated = true;
261
262 if (bitmap_bit_p (in_queue, pred->index))
263 continue;
264
265 /* If it is subloop, then it either was not moved, or
266 the path up the loop tree from base_loop do not contain
267 it. */
268 nca = find_common_loop (pred->loop_father, base_loop);
269 if (pred->loop_father != base_loop
270 && (nca == base_loop
271 || nca != pred->loop_father))
272 pred = pred->loop_father->header;
273 else if (!flow_loop_nested_p (target_loop, pred->loop_father))
274 {
275 /* If PRED is already higher in the loop hierarchy than the
276 TARGET_LOOP to that we moved FROM, the change of the position
277 of FROM does not affect the position of PRED, so there is no
278 point in processing it. */
279 continue;
280 }
281
282 if (bitmap_bit_p (in_queue, pred->index))
283 continue;
284
285 /* Schedule the basic block. */
286 *qend = pred;
287 qend++;
288 if (qend == qtop)
289 qend = queue;
290 bitmap_set_bit (in_queue, pred->index);
291 }
292 }
293 free (queue);
294}
295
296/* Removes path beginning at edge E, i.e. remove basic blocks dominated by E
297 and update loop structures and dominators. Return true if we were able
298 to remove the path, false otherwise (and nothing is affected then). */
299bool
300remove_path (edge e, bool *irred_invalidated,
301 bitmap loop_closed_ssa_invalidated)
302{
303 edge ae;
304 basic_block *rem_bbs, *bord_bbs, from, bb;
305 vec<basic_block> dom_bbs;
306 int i, nrem, n_bord_bbs;
307 bool local_irred_invalidated = false;
308 edge_iterator ei;
309 class loop *l, *f;
310
311 if (! irred_invalidated)
312 irred_invalidated = &local_irred_invalidated;
313
314 if (!can_remove_branch_p (e))
315 return false;
316
317 /* Keep track of whether we need to update information about irreducible
318 regions. This is the case if the removed area is a part of the
319 irreducible region, or if the set of basic blocks that belong to a loop
320 that is inside an irreducible region is changed, or if such a loop is
321 removed. */
322 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
323 *irred_invalidated = true;
324
325 /* We need to check whether basic blocks are dominated by the edge
326 e, but we only have basic block dominators. This is easy to
327 fix -- when e->dest has exactly one predecessor, this corresponds
328 to blocks dominated by e->dest, if not, split the edge. */
329 if (!single_pred_p (e->dest))
330 e = single_pred_edge (split_edge (e));
331
332 /* It may happen that by removing path we remove one or more loops
333 we belong to. In this case first unloop the loops, then proceed
334 normally. We may assume that e->dest is not a header of any loop,
335 as it now has exactly one predecessor. */
336 for (l = e->src->loop_father; loop_outer (l); l = f)
337 {
338 f = loop_outer (l);
339 if (dominated_by_p (CDI_DOMINATORS, l->latch, e->dest))
340 unloop (l, irred_invalidated, loop_closed_ssa_invalidated);
341 }
342
343 /* Identify the path. */
344 nrem = find_path (e, &rem_bbs);
345
346 n_bord_bbs = 0;
347 bord_bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun))((basic_block *) xmalloc (sizeof (basic_block) * ((((cfun + 0
))->cfg->x_n_basic_blocks))))
;
348 auto_sbitmap seen (last_basic_block_for_fn (cfun)(((cfun + 0))->cfg->x_last_basic_block));
349 bitmap_clear (seen);
350
351 /* Find "border" hexes -- i.e. those with predecessor in removed path. */
352 for (i = 0; i < nrem; i++)
353 bitmap_set_bit (seen, rem_bbs[i]->index);
354 if (!*irred_invalidated)
355 FOR_EACH_EDGE (ae, ei, e->src->succs)for ((ei) = ei_start_1 (&((e->src->succs))); ei_cond
((ei), &(ae)); ei_next (&(ei)))
356 if (ae != e && ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)
357 && !bitmap_bit_p (seen, ae->dest->index)
358 && ae->flags & EDGE_IRREDUCIBLE_LOOP)
359 {
360 *irred_invalidated = true;
361 break;
362 }
363
364 for (i = 0; i < nrem; i++)
365 {
366 FOR_EACH_EDGE (ae, ei, rem_bbs[i]->succs)for ((ei) = ei_start_1 (&((rem_bbs[i]->succs))); ei_cond
((ei), &(ae)); ei_next (&(ei)))
367 if (ae->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)
368 && !bitmap_bit_p (seen, ae->dest->index))
369 {
370 bitmap_set_bit (seen, ae->dest->index);
371 bord_bbs[n_bord_bbs++] = ae->dest;
372
373 if (ae->flags & EDGE_IRREDUCIBLE_LOOP)
374 *irred_invalidated = true;
375 }
376 }
377
378 /* Remove the path. */
379 from = e->src;
380 remove_branch (e);
381 dom_bbs.create (0);
382
383 /* Cancel loops contained in the path. */
384 for (i = 0; i < nrem; i++)
385 if (rem_bbs[i]->loop_father->header == rem_bbs[i])
386 cancel_loop_tree (rem_bbs[i]->loop_father);
387
388 remove_bbs (rem_bbs, nrem);
389 free (rem_bbs);
390
391 /* Find blocks whose dominators may be affected. */
392 bitmap_clear (seen);
393 for (i = 0; i < n_bord_bbs; i++)
394 {
395 basic_block ldom;
396
397 bb = get_immediate_dominator (CDI_DOMINATORS, bord_bbs[i]);
398 if (bitmap_bit_p (seen, bb->index))
399 continue;
400 bitmap_set_bit (seen, bb->index);
401
402 for (ldom = first_dom_son (CDI_DOMINATORS, bb);
403 ldom;
404 ldom = next_dom_son (CDI_DOMINATORS, ldom))
405 if (!dominated_by_p (CDI_DOMINATORS, from, ldom))
406 dom_bbs.safe_push (ldom);
407 }
408
409 /* Recount dominators. */
410 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, true);
411 dom_bbs.release ();
412 free (bord_bbs);
413
414 /* Fix placements of basic blocks inside loops and the placement of
415 loops in the loop tree. */
416 fix_bb_placements (from, irred_invalidated, loop_closed_ssa_invalidated);
417 fix_loop_placements (from->loop_father, irred_invalidated);
418
419 if (local_irred_invalidated
420 && loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
421 mark_irreducible_loops ();
422
423 return true;
424}
425
426/* Creates place for a new LOOP in loops structure of FN. */
427
428void
429place_new_loop (struct function *fn, class loop *loop)
430{
431 loop->num = number_of_loops (fn);
5
Calling 'number_of_loops'
12
Returning from 'number_of_loops'
432 vec_safe_push (loops_for_fn (fn)->larray, loop);
13
Passing value via 1st parameter 'v'
14
Calling 'vec_safe_push<loop *, va_gc>'
433}
434
435/* Given LOOP structure with filled header and latch, find the body of the
436 corresponding loop and add it to loops tree. Insert the LOOP as a son of
437 outer. */
438
439void
440add_loop (class loop *loop, class loop *outer)
441{
442 basic_block *bbs;
443 int i, n;
444 class loop *subloop;
445 edge e;
446 edge_iterator ei;
447
448 /* Add it to loop structure. */
449 place_new_loop (cfun(cfun + 0), loop);
4
Calling 'place_new_loop'
450 flow_loop_tree_node_add (outer, loop);
451
452 /* Find its nodes. */
453 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun))((basic_block *) xmalloc (sizeof (basic_block) * ((((cfun + 0
))->cfg->x_n_basic_blocks))))
;
454 n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks));
455
456 for (i = 0; i < n; i++)
457 {
458 if (bbs[i]->loop_father == outer)
459 {
460 remove_bb_from_loops (bbs[i]);
461 add_bb_to_loop (bbs[i], loop);
462 continue;
463 }
464
465 loop->num_nodes++;
466
467 /* If we find a direct subloop of OUTER, move it to LOOP. */
468 subloop = bbs[i]->loop_father;
469 if (loop_outer (subloop) == outer
470 && subloop->header == bbs[i])
471 {
472 flow_loop_tree_node_remove (subloop);
473 flow_loop_tree_node_add (loop, subloop);
474 }
475 }
476
477 /* Update the information about loop exit edges. */
478 for (i = 0; i < n; i++)
479 {
480 FOR_EACH_EDGE (e, ei, bbs[i]->succs)for ((ei) = ei_start_1 (&((bbs[i]->succs))); ei_cond (
(ei), &(e)); ei_next (&(ei)))
481 {
482 rescan_loop_exit (e, false, false);
483 }
484 }
485
486 free (bbs);
487}
488
489/* Scale profile of loop by P. */
490
491void
492scale_loop_frequencies (class loop *loop, profile_probability p)
493{
494 basic_block *bbs;
495
496 bbs = get_loop_body (loop);
497 scale_bbs_frequencies (bbs, loop->num_nodes, p);
498 free (bbs);
499}
500
501/* Scale profile in LOOP by P.
502 If ITERATION_BOUND is non-zero, scale even further if loop is predicted
503 to iterate too many times.
504 Before caling this function, preheader block profile should be already
505 scaled to final count. This is necessary because loop iterations are
506 determined by comparing header edge count to latch ege count and thus
507 they need to be scaled synchronously. */
508
509void
510scale_loop_profile (class loop *loop, profile_probability p,
511 gcov_type iteration_bound)
512{
513 edge e, preheader_e;
514 edge_iterator ei;
515
516 if (dump_file && (dump_flags & TDF_DETAILS))
517 {
518 fprintf (dump_file, ";; Scaling loop %i with scale ",
519 loop->num);
520 p.dump (dump_file);
521 fprintf (dump_file, " bounding iterations to %i\n",
522 (int)iteration_bound);
523 }
524
525 /* Scale the probabilities. */
526 scale_loop_frequencies (loop, p);
527
528 if (iteration_bound == 0)
529 return;
530
531 gcov_type iterations = expected_loop_iterations_unbounded (loop, NULLnullptr, true);
532
533 if (dump_file && (dump_flags & TDF_DETAILS))
534 {
535 fprintf (dump_file, ";; guessed iterations after scaling %i\n",
536 (int)iterations);
537 }
538
539 /* See if loop is predicted to iterate too many times. */
540 if (iterations <= iteration_bound)
541 return;
542
543 preheader_e = loop_preheader_edge (loop);
544
545 /* We could handle also loops without preheaders, but bounding is
546 currently used only by optimizers that have preheaders constructed. */
547 gcc_checking_assert (preheader_e)((void)(!(preheader_e) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 547, __FUNCTION__), 0 : 0))
;
548 profile_count count_in = preheader_e->count ();
549
550 if (count_in > profile_count::zero ()
551 && loop->header->count.initialized_p ())
552 {
553 profile_count count_delta = profile_count::zero ();
554
555 e = single_exit (loop);
556 if (e)
557 {
558 edge other_e;
559 FOR_EACH_EDGE (other_e, ei, e->src->succs)for ((ei) = ei_start_1 (&((e->src->succs))); ei_cond
((ei), &(other_e)); ei_next (&(ei)))
560 if (!(other_e->flags & (EDGE_ABNORMAL | EDGE_FAKE))
561 && e != other_e)
562 break;
563
564 /* Probability of exit must be 1/iterations. */
565 count_delta = e->count ();
566 e->probability = profile_probability::always ()
567 .apply_scale (1, iteration_bound);
568 other_e->probability = e->probability.invert ();
569
570 /* In code below we only handle the following two updates. */
571 if (other_e->dest != loop->header
572 && other_e->dest != loop->latch
573 && (dump_file && (dump_flags & TDF_DETAILS)))
574 {
575 fprintf (dump_file, ";; giving up on update of paths from "
576 "exit condition to latch\n");
577 }
578 }
579 else
580 if (dump_file && (dump_flags & TDF_DETAILS))
581 fprintf (dump_file, ";; Loop has multiple exit edges; "
582 "giving up on exit condition update\n");
583
584 /* Roughly speaking we want to reduce the loop body profile by the
585 difference of loop iterations. We however can do better if
586 we look at the actual profile, if it is available. */
587 p = profile_probability::always ();
588
589 count_in = count_in.apply_scale (iteration_bound, 1);
590 p = count_in.probability_in (loop->header->count);
591 if (!(p > profile_probability::never ()))
592 p = profile_probability::very_unlikely ();
593
594 if (p == profile_probability::always ()
595 || !p.initialized_p ())
596 return;
597
598 /* If latch exists, change its count, since we changed
599 probability of exit. Theoretically we should update everything from
600 source of exit edge to latch, but for vectorizer this is enough. */
601 if (loop->latch && loop->latch != e->src)
602 loop->latch->count += count_delta;
603
604 /* Scale the probabilities. */
605 scale_loop_frequencies (loop, p);
606
607 /* Change latch's count back. */
608 if (loop->latch && loop->latch != e->src)
609 loop->latch->count -= count_delta;
610
611 if (dump_file && (dump_flags & TDF_DETAILS))
612 fprintf (dump_file, ";; guessed iterations are now %i\n",
613 (int)expected_loop_iterations_unbounded (loop, NULLnullptr, true));
614 }
615}
616
617/* Recompute dominance information for basic blocks outside LOOP. */
618
619static void
620update_dominators_in_loop (class loop *loop)
621{
622 vec<basic_block> dom_bbs = vNULL;
623 basic_block *body;
624 unsigned i;
625
626 auto_sbitmap seen (last_basic_block_for_fn (cfun)(((cfun + 0))->cfg->x_last_basic_block));
627 bitmap_clear (seen);
628 body = get_loop_body (loop);
629
630 for (i = 0; i < loop->num_nodes; i++)
631 bitmap_set_bit (seen, body[i]->index);
632
633 for (i = 0; i < loop->num_nodes; i++)
634 {
635 basic_block ldom;
636
637 for (ldom = first_dom_son (CDI_DOMINATORS, body[i]);
638 ldom;
639 ldom = next_dom_son (CDI_DOMINATORS, ldom))
640 if (!bitmap_bit_p (seen, ldom->index))
641 {
642 bitmap_set_bit (seen, ldom->index);
643 dom_bbs.safe_push (ldom);
644 }
645 }
646
647 iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false);
648 free (body);
649 dom_bbs.release ();
650}
651
652/* Creates an if region as shown above. CONDITION is used to create
653 the test for the if.
654
655 |
656 | ------------- -------------
657 | | pred_bb | | pred_bb |
658 | ------------- -------------
659 | | |
660 | | | ENTRY_EDGE
661 | | ENTRY_EDGE V
662 | | ====> -------------
663 | | | cond_bb |
664 | | | CONDITION |
665 | | -------------
666 | V / \
667 | ------------- e_false / \ e_true
668 | | succ_bb | V V
669 | ------------- ----------- -----------
670 | | false_bb | | true_bb |
671 | ----------- -----------
672 | \ /
673 | \ /
674 | V V
675 | -------------
676 | | join_bb |
677 | -------------
678 | | exit_edge (result)
679 | V
680 | -----------
681 | | succ_bb |
682 | -----------
683 |
684 */
685
686edge
687create_empty_if_region_on_edge (edge entry_edge, tree condition)
688{
689
690 basic_block cond_bb, true_bb, false_bb, join_bb;
691 edge e_true, e_false, exit_edge;
692 gcond *cond_stmt;
693 tree simple_cond;
694 gimple_stmt_iterator gsi;
695
696 cond_bb = split_edge (entry_edge);
697
698 /* Insert condition in cond_bb. */
699 gsi = gsi_last_bb (cond_bb);
700 simple_cond =
701 force_gimple_operand_gsi (&gsi, condition, true, NULLnullptr,
702 false, GSI_NEW_STMT);
703 cond_stmt = gimple_build_cond_from_tree (simple_cond, NULL_TREE(tree) nullptr, NULL_TREE(tree) nullptr);
704 gsi = gsi_last_bb (cond_bb);
705 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
706
707 join_bb = split_edge (single_succ_edge (cond_bb));
708
709 e_true = single_succ_edge (cond_bb);
710 true_bb = split_edge (e_true);
711
712 e_false = make_edge (cond_bb, join_bb, 0);
713 false_bb = split_edge (e_false);
714
715 e_true->flags &= ~EDGE_FALLTHRU;
716 e_true->flags |= EDGE_TRUE_VALUE;
717 e_false->flags &= ~EDGE_FALLTHRU;
718 e_false->flags |= EDGE_FALSE_VALUE;
719
720 set_immediate_dominator (CDI_DOMINATORS, cond_bb, entry_edge->src);
721 set_immediate_dominator (CDI_DOMINATORS, true_bb, cond_bb);
722 set_immediate_dominator (CDI_DOMINATORS, false_bb, cond_bb);
723 set_immediate_dominator (CDI_DOMINATORS, join_bb, cond_bb);
724
725 exit_edge = single_succ_edge (join_bb);
726
727 if (single_pred_p (exit_edge->dest))
728 set_immediate_dominator (CDI_DOMINATORS, exit_edge->dest, join_bb);
729
730 return exit_edge;
731}
732
733/* create_empty_loop_on_edge
734 |
735 | - pred_bb - ------ pred_bb ------
736 | | | | iv0 = initial_value |
737 | -----|----- ---------|-----------
738 | | ______ | entry_edge
739 | | entry_edge / | |
740 | | ====> | -V---V- loop_header -------------
741 | V | | iv_before = phi (iv0, iv_after) |
742 | - succ_bb - | ---|-----------------------------
743 | | | | |
744 | ----------- | ---V--- loop_body ---------------
745 | | | iv_after = iv_before + stride |
746 | | | if (iv_before < upper_bound) |
747 | | ---|--------------\--------------
748 | | | \ exit_e
749 | | V \
750 | | - loop_latch - V- succ_bb -
751 | | | | | |
752 | | /------------- -----------
753 | \ ___ /
754
755 Creates an empty loop as shown above, the IV_BEFORE is the SSA_NAME
756 that is used before the increment of IV. IV_BEFORE should be used for
757 adding code to the body that uses the IV. OUTER is the outer loop in
758 which the new loop should be inserted.
759
760 Both INITIAL_VALUE and UPPER_BOUND expressions are gimplified and
761 inserted on the loop entry edge. This implies that this function
762 should be used only when the UPPER_BOUND expression is a loop
763 invariant. */
764
765class loop *
766create_empty_loop_on_edge (edge entry_edge,
767 tree initial_value,
768 tree stride, tree upper_bound,
769 tree iv,
770 tree *iv_before,
771 tree *iv_after,
772 class loop *outer)
773{
774 basic_block loop_header, loop_latch, succ_bb, pred_bb;
775 class loop *loop;
776 gimple_stmt_iterator gsi;
777 gimple_seq stmts;
778 gcond *cond_expr;
779 tree exit_test;
780 edge exit_e;
781
782 gcc_assert (entry_edge && initial_value && stride && upper_bound && iv)((void)(!(entry_edge && initial_value && stride
&& upper_bound && iv) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 782, __FUNCTION__), 0 : 0))
;
783
784 /* Create header, latch and wire up the loop. */
785 pred_bb = entry_edge->src;
786 loop_header = split_edge (entry_edge);
787 loop_latch = split_edge (single_succ_edge (loop_header));
788 succ_bb = single_succ (loop_latch);
789 make_edge (loop_header, succ_bb, 0);
790 redirect_edge_succ_nodup (single_succ_edge (loop_latch), loop_header);
791
792 /* Set immediate dominator information. */
793 set_immediate_dominator (CDI_DOMINATORS, loop_header, pred_bb);
794 set_immediate_dominator (CDI_DOMINATORS, loop_latch, loop_header);
795 set_immediate_dominator (CDI_DOMINATORS, succ_bb, loop_header);
796
797 /* Initialize a loop structure and put it in a loop hierarchy. */
798 loop = alloc_loop ();
799 loop->header = loop_header;
800 loop->latch = loop_latch;
801 add_loop (loop, outer);
802
803 /* TODO: Fix counts. */
804 scale_loop_frequencies (loop, profile_probability::even ());
805
806 /* Update dominators. */
807 update_dominators_in_loop (loop);
808
809 /* Modify edge flags. */
810 exit_e = single_exit (loop);
811 exit_e->flags = EDGE_LOOP_EXIT | EDGE_FALSE_VALUE;
812 single_pred_edge (loop_latch)->flags = EDGE_TRUE_VALUE;
813
814 /* Construct IV code in loop. */
815 initial_value = force_gimple_operand (initial_value, &stmts, true, iv);
816 if (stmts)
817 {
818 gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts);
819 gsi_commit_edge_inserts ();
820 }
821
822 upper_bound = force_gimple_operand (upper_bound, &stmts, true, NULLnullptr);
823 if (stmts)
824 {
825 gsi_insert_seq_on_edge (loop_preheader_edge (loop), stmts);
826 gsi_commit_edge_inserts ();
827 }
828
829 gsi = gsi_last_bb (loop_header);
830 create_iv (initial_value, stride, iv, loop, &gsi, false,
831 iv_before, iv_after);
832
833 /* Insert loop exit condition. */
834 cond_expr = gimple_build_cond
835 (LT_EXPR, *iv_before, upper_bound, NULL_TREE(tree) nullptr, NULL_TREE(tree) nullptr);
836
837 exit_test = gimple_cond_lhs (cond_expr);
838 exit_test = force_gimple_operand_gsi (&gsi, exit_test, true, NULLnullptr,
839 false, GSI_NEW_STMT);
840 gimple_cond_set_lhs (cond_expr, exit_test);
841 gsi = gsi_last_bb (exit_e->src);
842 gsi_insert_after (&gsi, cond_expr, GSI_NEW_STMT);
843
844 split_block_after_labels (loop_header);
845
846 return loop;
847}
848
849/* Remove the latch edge of a LOOP and update loops to indicate that
850 the LOOP was removed. After this function, original loop latch will
851 have no successor, which caller is expected to fix somehow.
852
853 If this may cause the information about irreducible regions to become
854 invalid, IRRED_INVALIDATED is set to true.
855
856 LOOP_CLOSED_SSA_INVALIDATED, if non-NULL, is a bitmap where we store
857 basic blocks that had non-trivial update on their loop_father.*/
858
859void
860unloop (class loop *loop, bool *irred_invalidated,
861 bitmap loop_closed_ssa_invalidated)
862{
863 basic_block *body;
864 class loop *ploop;
865 unsigned i, n;
866 basic_block latch = loop->latch;
867 bool dummy = false;
868
869 if (loop_preheader_edge (loop)->flags & EDGE_IRREDUCIBLE_LOOP)
870 *irred_invalidated = true;
871
872 /* This is relatively straightforward. The dominators are unchanged, as
873 loop header dominates loop latch, so the only thing we have to care of
874 is the placement of loops and basic blocks inside the loop tree. We
875 move them all to the loop->outer, and then let fix_bb_placements do
876 its work. */
877
878 body = get_loop_body (loop);
879 n = loop->num_nodes;
880 for (i = 0; i < n; i++)
881 if (body[i]->loop_father == loop)
882 {
883 remove_bb_from_loops (body[i]);
884 add_bb_to_loop (body[i], loop_outer (loop));
885 }
886 free (body);
887
888 while (loop->inner)
889 {
890 ploop = loop->inner;
891 flow_loop_tree_node_remove (ploop);
892 flow_loop_tree_node_add (loop_outer (loop), ploop);
893 }
894
895 /* Remove the loop and free its data. */
896 delete_loop (loop);
897
898 remove_edge (single_succ_edge (latch));
899
900 /* We do not pass IRRED_INVALIDATED to fix_bb_placements here, as even if
901 there is an irreducible region inside the cancelled loop, the flags will
902 be still correct. */
903 fix_bb_placements (latch, &dummy, loop_closed_ssa_invalidated);
904}
905
906/* Fix placement of superloops of LOOP inside loop tree, i.e. ensure that
907 condition stated in description of fix_loop_placement holds for them.
908 It is used in case when we removed some edges coming out of LOOP, which
909 may cause the right placement of LOOP inside loop tree to change.
910
911 IRRED_INVALIDATED is set to true if a change in the loop structures might
912 invalidate the information about irreducible regions. */
913
914static void
915fix_loop_placements (class loop *loop, bool *irred_invalidated)
916{
917 class loop *outer;
918
919 while (loop_outer (loop))
920 {
921 outer = loop_outer (loop);
922 if (!fix_loop_placement (loop, irred_invalidated))
923 break;
924
925 /* Changing the placement of a loop in the loop tree may alter the
926 validity of condition 2) of the description of fix_bb_placement
927 for its preheader, because the successor is the header and belongs
928 to the loop. So call fix_bb_placements to fix up the placement
929 of the preheader and (possibly) of its predecessors. */
930 fix_bb_placements (loop_preheader_edge (loop)->src,
931 irred_invalidated, NULLnullptr);
932 loop = outer;
933 }
934}
935
936/* Duplicate loop bounds and other information we store about
937 the loop into its duplicate. */
938
939void
940copy_loop_info (class loop *loop, class loop *target)
941{
942 gcc_checking_assert (!target->any_upper_bound && !target->any_estimate)((void)(!(!target->any_upper_bound && !target->
any_estimate) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 942, __FUNCTION__), 0 : 0))
;
943 target->any_upper_bound = loop->any_upper_bound;
944 target->nb_iterations_upper_bound = loop->nb_iterations_upper_bound;
945 target->any_likely_upper_bound = loop->any_likely_upper_bound;
946 target->nb_iterations_likely_upper_bound
947 = loop->nb_iterations_likely_upper_bound;
948 target->any_estimate = loop->any_estimate;
949 target->nb_iterations_estimate = loop->nb_iterations_estimate;
950 target->estimate_state = loop->estimate_state;
951 target->safelen = loop->safelen;
952 target->simdlen = loop->simdlen;
953 target->constraints = loop->constraints;
954 target->can_be_parallel = loop->can_be_parallel;
955 target->warned_aggressive_loop_optimizations
956 |= loop->warned_aggressive_loop_optimizations;
957 target->dont_vectorize = loop->dont_vectorize;
958 target->force_vectorize = loop->force_vectorize;
959 target->in_oacc_kernels_region = loop->in_oacc_kernels_region;
960 target->finite_p = loop->finite_p;
961 target->unroll = loop->unroll;
962 target->owned_clique = loop->owned_clique;
963}
964
965/* Copies copy of LOOP as subloop of TARGET loop, placing newly
966 created loop into loops structure. If AFTER is non-null
967 the new loop is added at AFTER->next, otherwise in front of TARGETs
968 sibling list. */
969class loop *
970duplicate_loop (class loop *loop, class loop *target, class loop *after)
971{
972 class loop *cloop;
973 cloop = alloc_loop ();
974 place_new_loop (cfun(cfun + 0), cloop);
975
976 copy_loop_info (loop, cloop);
977
978 /* Mark the new loop as copy of LOOP. */
979 set_loop_copy (loop, cloop);
980
981 /* Add it to target. */
982 flow_loop_tree_node_add (target, cloop, after);
983
984 return cloop;
985}
986
987/* Copies structure of subloops of LOOP into TARGET loop, placing
988 newly created loops into loop tree at the end of TARGETs sibling
989 list in the original order. */
990void
991duplicate_subloops (class loop *loop, class loop *target)
992{
993 class loop *aloop, *cloop, *tail;
994
995 for (tail = target->inner; tail && tail->next; tail = tail->next)
996 ;
997 for (aloop = loop->inner; aloop; aloop = aloop->next)
998 {
999 cloop = duplicate_loop (aloop, target, tail);
1000 tail = cloop;
1001 gcc_assert(!tail->next)((void)(!(!tail->next) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1001, __FUNCTION__), 0 : 0))
;
1002 duplicate_subloops (aloop, cloop);
1003 }
1004}
1005
1006/* Copies structure of subloops of N loops, stored in array COPIED_LOOPS,
1007 into TARGET loop, placing newly created loops into loop tree adding
1008 them to TARGETs sibling list at the end in order. */
1009static void
1010copy_loops_to (class loop **copied_loops, int n, class loop *target)
1011{
1012 class loop *aloop, *tail;
1013 int i;
1014
1015 for (tail = target->inner; tail && tail->next; tail = tail->next)
1016 ;
1017 for (i = 0; i < n; i++)
1018 {
1019 aloop = duplicate_loop (copied_loops[i], target, tail);
1020 tail = aloop;
1021 gcc_assert(!tail->next)((void)(!(!tail->next) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1021, __FUNCTION__), 0 : 0))
;
1022 duplicate_subloops (copied_loops[i], aloop);
1023 }
1024}
1025
1026/* Redirects edge E to basic block DEST. */
1027static void
1028loop_redirect_edge (edge e, basic_block dest)
1029{
1030 if (e->dest == dest)
1031 return;
1032
1033 redirect_edge_and_branch_force (e, dest);
1034}
1035
1036/* Check whether LOOP's body can be duplicated. */
1037bool
1038can_duplicate_loop_p (const class loop *loop)
1039{
1040 int ret;
1041 basic_block *bbs = get_loop_body (loop);
1042
1043 ret = can_copy_bbs_p (bbs, loop->num_nodes);
1044 free (bbs);
1045
1046 return ret;
1047}
1048
1049/* Duplicates body of LOOP to given edge E NDUPL times. Takes care of updating
1050 loop structure and dominators (order of inner subloops is retained).
1051 E's destination must be LOOP header for this to work, i.e. it must be entry
1052 or latch edge of this loop; these are unique, as the loops must have
1053 preheaders for this function to work correctly (in case E is latch, the
1054 function unrolls the loop, if E is entry edge, it peels the loop). Store
1055 edges created by copying ORIG edge from copies corresponding to set bits in
1056 WONT_EXIT bitmap (bit 0 corresponds to original LOOP body, the other copies
1057 are numbered in order given by control flow through them) into TO_REMOVE
1058 array. Returns false if duplication is
1059 impossible. */
1060
1061bool
1062duplicate_loop_body_to_header_edge (class loop *loop, edge e,
1063 unsigned int ndupl, sbitmap wont_exit,
1064 edge orig, vec<edge> *to_remove, int flags)
1065{
1066 class loop *target, *aloop;
1067 class loop **orig_loops;
1068 unsigned n_orig_loops;
1069 basic_block header = loop->header, latch = loop->latch;
1070 basic_block *new_bbs, *bbs, *first_active;
1071 basic_block new_bb, bb, first_active_latch = NULLnullptr;
1072 edge ae, latch_edge;
1073 edge spec_edges[2], new_spec_edges[2];
1074 const int SE_LATCH = 0;
1075 const int SE_ORIG = 1;
1076 unsigned i, j, n;
1077 int is_latch = (latch == e->src);
1078 profile_probability *scale_step = NULLnullptr;
1079 profile_probability scale_main = profile_probability::always ();
1080 profile_probability scale_act = profile_probability::always ();
1081 profile_count after_exit_num = profile_count::zero (),
1082 after_exit_den = profile_count::zero ();
1083 bool scale_after_exit = false;
1084 int add_irreducible_flag;
1085 basic_block place_after;
1086 bitmap bbs_to_scale = NULLnullptr;
1087 bitmap_iterator bi;
1088
1089 gcc_assert (e->dest == loop->header)((void)(!(e->dest == loop->header) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1089, __FUNCTION__), 0 : 0))
;
1090 gcc_assert (ndupl > 0)((void)(!(ndupl > 0) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1090, __FUNCTION__), 0 : 0))
;
1091
1092 if (orig)
1093 {
1094 /* Orig must be edge out of the loop. */
1095 gcc_assert (flow_bb_inside_loop_p (loop, orig->src))((void)(!(flow_bb_inside_loop_p (loop, orig->src)) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1095, __FUNCTION__), 0 : 0))
;
1096 gcc_assert (!flow_bb_inside_loop_p (loop, orig->dest))((void)(!(!flow_bb_inside_loop_p (loop, orig->dest)) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1096, __FUNCTION__), 0 : 0))
;
1097 }
1098
1099 n = loop->num_nodes;
1100 bbs = get_loop_body_in_dom_order (loop);
1101 gcc_assert (bbs[0] == loop->header)((void)(!(bbs[0] == loop->header) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1101, __FUNCTION__), 0 : 0))
;
1102 gcc_assert (bbs[n - 1] == loop->latch)((void)(!(bbs[n - 1] == loop->latch) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1102, __FUNCTION__), 0 : 0))
;
1103
1104 /* Check whether duplication is possible. */
1105 if (!can_copy_bbs_p (bbs, loop->num_nodes))
1106 {
1107 free (bbs);
1108 return false;
1109 }
1110 new_bbs = XNEWVEC (basic_block, loop->num_nodes)((basic_block *) xmalloc (sizeof (basic_block) * (loop->num_nodes
)))
;
1111
1112 /* In case we are doing loop peeling and the loop is in the middle of
1113 irreducible region, the peeled copies will be inside it too. */
1114 add_irreducible_flag = e->flags & EDGE_IRREDUCIBLE_LOOP;
1115 gcc_assert (!is_latch || !add_irreducible_flag)((void)(!(!is_latch || !add_irreducible_flag) ? fancy_abort (
"/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1115, __FUNCTION__), 0 : 0))
;
1116
1117 /* Find edge from latch. */
1118 latch_edge = loop_latch_edge (loop);
1119
1120 if (flags & DLTHE_FLAG_UPDATE_FREQ1)
1121 {
1122 /* Calculate coefficients by that we have to scale counts
1123 of duplicated loop bodies. */
1124 profile_count count_in = header->count;
1125 profile_count count_le = latch_edge->count ();
1126 profile_count count_out_orig = orig ? orig->count () : count_in - count_le;
1127 profile_probability prob_pass_thru = count_le.probability_in (count_in);
1128 profile_probability prob_pass_wont_exit =
1129 (count_le + count_out_orig).probability_in (count_in);
1130
1131 if (orig && orig->probability.initialized_p ()
1132 && !(orig->probability == profile_probability::always ()))
1133 {
1134 /* The blocks that are dominated by a removed exit edge ORIG have
1135 frequencies scaled by this. */
1136 if (orig->count ().initialized_p ())
1137 {
1138 after_exit_num = orig->src->count;
1139 after_exit_den = after_exit_num - orig->count ();
1140 scale_after_exit = true;
1141 }
1142 bbs_to_scale = BITMAP_ALLOCbitmap_alloc (NULLnullptr);
1143 for (i = 0; i < n; i++)
1144 {
1145 if (bbs[i] != orig->src
1146 && dominated_by_p (CDI_DOMINATORS, bbs[i], orig->src))
1147 bitmap_set_bit (bbs_to_scale, i);
1148 }
1149 }
1150
1151 scale_step = XNEWVEC (profile_probability, ndupl)((profile_probability *) xmalloc (sizeof (profile_probability
) * (ndupl)))
;
1152
1153 for (i = 1; i <= ndupl; i++)
1154 scale_step[i - 1] = bitmap_bit_p (wont_exit, i)
1155 ? prob_pass_wont_exit
1156 : prob_pass_thru;
1157
1158 /* Complete peeling is special as the probability of exit in last
1159 copy becomes 1. */
1160 if (flags & DLTHE_FLAG_COMPLETTE_PEEL4)
1161 {
1162 profile_count wanted_count = e->count ();
1163
1164 gcc_assert (!is_latch)((void)(!(!is_latch) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1164, __FUNCTION__), 0 : 0))
;
1165 /* First copy has count of incoming edge. Each subsequent
1166 count should be reduced by prob_pass_wont_exit. Caller
1167 should've managed the flags so all except for original loop
1168 has won't exist set. */
1169 scale_act = wanted_count.probability_in (count_in);
1170 /* Now simulate the duplication adjustments and compute header
1171 frequency of the last copy. */
1172 for (i = 0; i < ndupl; i++)
1173 wanted_count = wanted_count.apply_probability (scale_step [i]);
1174 scale_main = wanted_count.probability_in (count_in);
1175 }
1176 /* Here we insert loop bodies inside the loop itself (for loop unrolling).
1177 First iteration will be original loop followed by duplicated bodies.
1178 It is necessary to scale down the original so we get right overall
1179 number of iterations. */
1180 else if (is_latch)
1181 {
1182 profile_probability prob_pass_main = bitmap_bit_p (wont_exit, 0)
1183 ? prob_pass_wont_exit
1184 : prob_pass_thru;
1185 profile_probability p = prob_pass_main;
1186 profile_count scale_main_den = count_in;
1187 for (i = 0; i < ndupl; i++)
1188 {
1189 scale_main_den += count_in.apply_probability (p);
1190 p = p * scale_step[i];
1191 }
1192 /* If original loop is executed COUNT_IN times, the unrolled
1193 loop will account SCALE_MAIN_DEN times. */
1194 scale_main = count_in.probability_in (scale_main_den);
1195 scale_act = scale_main * prob_pass_main;
1196 }
1197 else
1198 {
1199 profile_count preheader_count = e->count ();
1200 for (i = 0; i < ndupl; i++)
1201 scale_main = scale_main * scale_step[i];
1202 scale_act = preheader_count.probability_in (count_in);
1203 }
1204 }
1205
1206 /* Loop the new bbs will belong to. */
1207 target = e->src->loop_father;
1208
1209 /* Original loops. */
1210 n_orig_loops = 0;
1211 for (aloop = loop->inner; aloop; aloop = aloop->next)
1212 n_orig_loops++;
1213 orig_loops = XNEWVEC (class loop *, n_orig_loops)((class loop * *) xmalloc (sizeof (class loop *) * (n_orig_loops
)))
;
1214 for (aloop = loop->inner, i = 0; aloop; aloop = aloop->next, i++)
1215 orig_loops[i] = aloop;
1216
1217 set_loop_copy (loop, target);
1218
1219 first_active = XNEWVEC (basic_block, n)((basic_block *) xmalloc (sizeof (basic_block) * (n)));
1220 if (is_latch)
1221 {
1222 memcpy (first_active, bbs, n * sizeof (basic_block));
1223 first_active_latch = latch;
1224 }
1225
1226 spec_edges[SE_ORIG] = orig;
1227 spec_edges[SE_LATCH] = latch_edge;
1228
1229 place_after = e->src;
1230 for (j = 0; j < ndupl; j++)
1231 {
1232 /* Copy loops. */
1233 copy_loops_to (orig_loops, n_orig_loops, target);
1234
1235 /* Copy bbs. */
1236 copy_bbs (bbs, n, new_bbs, spec_edges, 2, new_spec_edges, loop,
1237 place_after, true);
1238 place_after = new_spec_edges[SE_LATCH]->src;
1239
1240 if (flags & DLTHE_RECORD_COPY_NUMBER2)
1241 for (i = 0; i < n; i++)
1242 {
1243 gcc_assert (!new_bbs[i]->aux)((void)(!(!new_bbs[i]->aux) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1243, __FUNCTION__), 0 : 0))
;
1244 new_bbs[i]->aux = (void *)(size_t)(j + 1);
1245 }
1246
1247 /* Note whether the blocks and edges belong to an irreducible loop. */
1248 if (add_irreducible_flag)
1249 {
1250 for (i = 0; i < n; i++)
1251 new_bbs[i]->flags |= BB_DUPLICATED;
1252 for (i = 0; i < n; i++)
1253 {
1254 edge_iterator ei;
1255 new_bb = new_bbs[i];
1256 if (new_bb->loop_father == target)
1257 new_bb->flags |= BB_IRREDUCIBLE_LOOP;
1258
1259 FOR_EACH_EDGE (ae, ei, new_bb->succs)for ((ei) = ei_start_1 (&((new_bb->succs))); ei_cond (
(ei), &(ae)); ei_next (&(ei)))
1260 if ((ae->dest->flags & BB_DUPLICATED)
1261 && (ae->src->loop_father == target
1262 || ae->dest->loop_father == target))
1263 ae->flags |= EDGE_IRREDUCIBLE_LOOP;
1264 }
1265 for (i = 0; i < n; i++)
1266 new_bbs[i]->flags &= ~BB_DUPLICATED;
1267 }
1268
1269 /* Redirect the special edges. */
1270 if (is_latch)
1271 {
1272 redirect_edge_and_branch_force (latch_edge, new_bbs[0]);
1273 redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
1274 loop->header);
1275 set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], latch);
1276 latch = loop->latch = new_bbs[n - 1];
1277 e = latch_edge = new_spec_edges[SE_LATCH];
1278 }
1279 else
1280 {
1281 redirect_edge_and_branch_force (new_spec_edges[SE_LATCH],
1282 loop->header);
1283 redirect_edge_and_branch_force (e, new_bbs[0]);
1284 set_immediate_dominator (CDI_DOMINATORS, new_bbs[0], e->src);
1285 e = new_spec_edges[SE_LATCH];
1286 }
1287
1288 /* Record exit edge in this copy. */
1289 if (orig && bitmap_bit_p (wont_exit, j + 1))
1290 {
1291 if (to_remove)
1292 to_remove->safe_push (new_spec_edges[SE_ORIG]);
1293 force_edge_cold (new_spec_edges[SE_ORIG], true);
1294
1295 /* Scale the frequencies of the blocks dominated by the exit. */
1296 if (bbs_to_scale && scale_after_exit)
1297 {
1298 EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)for (bmp_iter_set_init (&(bi), (bbs_to_scale), (0), &
(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next (&
(bi), &(i)))
1299 scale_bbs_frequencies_profile_count (new_bbs + i, 1, after_exit_num,
1300 after_exit_den);
1301 }
1302 }
1303
1304 /* Record the first copy in the control flow order if it is not
1305 the original loop (i.e. in case of peeling). */
1306 if (!first_active_latch)
1307 {
1308 memcpy (first_active, new_bbs, n * sizeof (basic_block));
1309 first_active_latch = new_bbs[n - 1];
1310 }
1311
1312 /* Set counts and frequencies. */
1313 if (flags & DLTHE_FLAG_UPDATE_FREQ1)
1314 {
1315 scale_bbs_frequencies (new_bbs, n, scale_act);
1316 scale_act = scale_act * scale_step[j];
1317 }
1318 }
1319 free (new_bbs);
1320 free (orig_loops);
1321
1322 /* Record the exit edge in the original loop body, and update the frequencies. */
1323 if (orig && bitmap_bit_p (wont_exit, 0))
1324 {
1325 if (to_remove)
1326 to_remove->safe_push (orig);
1327 force_edge_cold (orig, true);
1328
1329 /* Scale the frequencies of the blocks dominated by the exit. */
1330 if (bbs_to_scale && scale_after_exit)
1331 {
1332 EXECUTE_IF_SET_IN_BITMAP (bbs_to_scale, 0, i, bi)for (bmp_iter_set_init (&(bi), (bbs_to_scale), (0), &
(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next (&
(bi), &(i)))
1333 scale_bbs_frequencies_profile_count (bbs + i, 1, after_exit_num,
1334 after_exit_den);
1335 }
1336 }
1337
1338 /* Update the original loop. */
1339 if (!is_latch)
1340 set_immediate_dominator (CDI_DOMINATORS, e->dest, e->src);
1341 if (flags & DLTHE_FLAG_UPDATE_FREQ1)
1342 {
1343 scale_bbs_frequencies (bbs, n, scale_main);
1344 free (scale_step);
1345 }
1346
1347 /* Update dominators of outer blocks if affected. */
1348 for (i = 0; i < n; i++)
1349 {
1350 basic_block dominated, dom_bb;
1351 unsigned j;
1352
1353 bb = bbs[i];
1354 bb->aux = 0;
1355
1356 auto_vec<basic_block> dom_bbs = get_dominated_by (CDI_DOMINATORS, bb);
1357 FOR_EACH_VEC_ELT (dom_bbs, j, dominated)for (j = 0; (dom_bbs).iterate ((j), &(dominated)); ++(j))
1358 {
1359 if (flow_bb_inside_loop_p (loop, dominated))
1360 continue;
1361 dom_bb = nearest_common_dominator (
1362 CDI_DOMINATORS, first_active[i], first_active_latch);
1363 set_immediate_dominator (CDI_DOMINATORS, dominated, dom_bb);
1364 }
1365 }
1366 free (first_active);
1367
1368 free (bbs);
1369 BITMAP_FREE (bbs_to_scale)((void) (bitmap_obstack_free ((bitmap) bbs_to_scale), (bbs_to_scale
) = (bitmap) nullptr))
;
1370
1371 return true;
1372}
1373
1374/* A callback for make_forwarder block, to redirect all edges except for
1375 MFB_KJ_EDGE to the entry part. E is the edge for that we should decide
1376 whether to redirect it. */
1377
1378edge mfb_kj_edge;
1379bool
1380mfb_keep_just (edge e)
1381{
1382 return e != mfb_kj_edge;
1383}
1384
1385/* True when a candidate preheader BLOCK has predecessors from LOOP. */
1386
1387static bool
1388has_preds_from_loop (basic_block block, class loop *loop)
1389{
1390 edge e;
1391 edge_iterator ei;
1392
1393 FOR_EACH_EDGE (e, ei, block->preds)for ((ei) = ei_start_1 (&((block->preds))); ei_cond ((
ei), &(e)); ei_next (&(ei)))
1394 if (e->src->loop_father == loop)
1395 return true;
1396 return false;
1397}
1398
1399/* Creates a pre-header for a LOOP. Returns newly created block. Unless
1400 CP_SIMPLE_PREHEADERS is set in FLAGS, we only force LOOP to have single
1401 entry; otherwise we also force preheader block to have only one successor.
1402 When CP_FALLTHRU_PREHEADERS is set in FLAGS, we force the preheader block
1403 to be a fallthru predecessor to the loop header and to have only
1404 predecessors from outside of the loop.
1405 The function also updates dominators. */
1406
1407basic_block
1408create_preheader (class loop *loop, int flags)
1409{
1410 edge e;
1411 basic_block dummy;
1412 int nentry = 0;
1413 bool irred = false;
1414 bool latch_edge_was_fallthru;
1415 edge one_succ_pred = NULLnullptr, single_entry = NULLnullptr;
1416 edge_iterator ei;
1417
1418 FOR_EACH_EDGE (e, ei, loop->header->preds)for ((ei) = ei_start_1 (&((loop->header->preds))); ei_cond
((ei), &(e)); ei_next (&(ei)))
1419 {
1420 if (e->src == loop->latch)
1421 continue;
1422 irred |= (e->flags & EDGE_IRREDUCIBLE_LOOP) != 0;
1423 nentry++;
1424 single_entry = e;
1425 if (single_succ_p (e->src))
1426 one_succ_pred = e;
1427 }
1428 gcc_assert (nentry)((void)(!(nentry) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1428, __FUNCTION__), 0 : 0))
;
1429 if (nentry == 1)
1430 {
1431 bool need_forwarder_block = false;
1432
1433 /* We do not allow entry block to be the loop preheader, since we
1434 cannot emit code there. */
1435 if (single_entry->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr))
1436 need_forwarder_block = true;
1437 else
1438 {
1439 /* If we want simple preheaders, also force the preheader to have
1440 just a single successor and a normal edge. */
1441 if ((flags & CP_SIMPLE_PREHEADERS)
1442 && ((single_entry->flags & EDGE_COMPLEX(EDGE_ABNORMAL | EDGE_ABNORMAL_CALL | EDGE_EH | EDGE_PRESERVE
)
)
1443 || !single_succ_p (single_entry->src)))
1444 need_forwarder_block = true;
1445 /* If we want fallthru preheaders, also create forwarder block when
1446 preheader ends with a jump or has predecessors from loop. */
1447 else if ((flags & CP_FALLTHRU_PREHEADERS)
1448 && (JUMP_P (BB_END (single_entry->src))(((enum rtx_code) ((single_entry->src)->il.x.rtl->end_
)->code) == JUMP_INSN)
1449 || has_preds_from_loop (single_entry->src, loop)))
1450 need_forwarder_block = true;
1451 }
1452 if (! need_forwarder_block)
1453 return NULLnullptr;
1454 }
1455
1456 mfb_kj_edge = loop_latch_edge (loop);
1457 latch_edge_was_fallthru = (mfb_kj_edge->flags & EDGE_FALLTHRU) != 0;
1458 if (nentry == 1
1459 && ((flags & CP_FALLTHRU_PREHEADERS) == 0
1460 || (single_entry->flags & EDGE_CROSSING) == 0))
1461 dummy = split_edge (single_entry);
1462 else
1463 {
1464 edge fallthru = make_forwarder_block (loop->header, mfb_keep_just, NULLnullptr);
1465 dummy = fallthru->src;
1466 loop->header = fallthru->dest;
1467 }
1468
1469 /* Try to be clever in placing the newly created preheader. The idea is to
1470 avoid breaking any "fallthruness" relationship between blocks.
1471
1472 The preheader was created just before the header and all incoming edges
1473 to the header were redirected to the preheader, except the latch edge.
1474 So the only problematic case is when this latch edge was a fallthru
1475 edge: it is not anymore after the preheader creation so we have broken
1476 the fallthruness. We're therefore going to look for a better place. */
1477 if (latch_edge_was_fallthru)
1478 {
1479 if (one_succ_pred)
1480 e = one_succ_pred;
1481 else
1482 e = EDGE_PRED (dummy, 0)(*(dummy)->preds)[(0)];
1483
1484 move_block_after (dummy, e->src);
1485 }
1486
1487 if (irred)
1488 {
1489 dummy->flags |= BB_IRREDUCIBLE_LOOP;
1490 single_succ_edge (dummy)->flags |= EDGE_IRREDUCIBLE_LOOP;
1491 }
1492
1493 if (dump_file)
1494 fprintf (dump_file, "Created preheader block for loop %i\n",
1495 loop->num);
1496
1497 if (flags & CP_FALLTHRU_PREHEADERS)
1498 gcc_assert ((single_succ_edge (dummy)->flags & EDGE_FALLTHRU)((void)(!((single_succ_edge (dummy)->flags & EDGE_FALLTHRU
) && !(((enum rtx_code) ((dummy)->il.x.rtl->end_
)->code) == JUMP_INSN)) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1499, __FUNCTION__), 0 : 0))
1499 && !JUMP_P (BB_END (dummy)))((void)(!((single_succ_edge (dummy)->flags & EDGE_FALLTHRU
) && !(((enum rtx_code) ((dummy)->il.x.rtl->end_
)->code) == JUMP_INSN)) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1499, __FUNCTION__), 0 : 0))
;
1500
1501 return dummy;
1502}
1503
1504/* Create preheaders for each loop; for meaning of FLAGS see create_preheader. */
1505
1506void
1507create_preheaders (int flags)
1508{
1509 if (!current_loops((cfun + 0)->x_current_loops))
1510 return;
1511
1512 for (auto loop : loops_list (cfun(cfun + 0), 0))
1513 create_preheader (loop, flags);
1514 loops_state_set (LOOPS_HAVE_PREHEADERS);
1515}
1516
1517/* Forces all loop latches to have only single successor. */
1518
1519void
1520force_single_succ_latches (void)
1521{
1522 edge e;
1523
1524 for (auto loop : loops_list (cfun(cfun + 0), 0))
1525 {
1526 if (loop->latch != loop->header && single_succ_p (loop->latch))
1527 continue;
1528
1529 e = find_edge (loop->latch, loop->header);
1530 gcc_checking_assert (e != NULL)((void)(!(e != nullptr) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1530, __FUNCTION__), 0 : 0))
;
1531
1532 split_edge (e);
1533 }
1534 loops_state_set (LOOPS_HAVE_SIMPLE_LATCHES);
1535}
1536
1537/* This function is called from loop_version. It splits the entry edge
1538 of the loop we want to version, adds the versioning condition, and
1539 adjust the edges to the two versions of the loop appropriately.
1540 e is an incoming edge. Returns the basic block containing the
1541 condition.
1542
1543 --- edge e ---- > [second_head]
1544
1545 Split it and insert new conditional expression and adjust edges.
1546
1547 --- edge e ---> [cond expr] ---> [first_head]
1548 |
1549 +---------> [second_head]
1550
1551 THEN_PROB is the probability of then branch of the condition.
1552 ELSE_PROB is the probability of else branch. Note that they may be both
1553 REG_BR_PROB_BASE when condition is IFN_LOOP_VECTORIZED or
1554 IFN_LOOP_DIST_ALIAS. */
1555
1556static basic_block
1557lv_adjust_loop_entry_edge (basic_block first_head, basic_block second_head,
1558 edge e, void *cond_expr,
1559 profile_probability then_prob,
1560 profile_probability else_prob)
1561{
1562 basic_block new_head = NULLnullptr;
1563 edge e1;
1564
1565 gcc_assert (e->dest == second_head)((void)(!(e->dest == second_head) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloopmanip.c"
, 1565, __FUNCTION__), 0 : 0))
;
1566
1567 /* Split edge 'e'. This will create a new basic block, where we can
1568 insert conditional expr. */
1569 new_head = split_edge (e);
1570
1571 lv_add_condition_to_bb (first_head, second_head, new_head,
1572 cond_expr);
1573
1574 /* Don't set EDGE_TRUE_VALUE in RTL mode, as it's invalid there. */
1575 e = single_succ_edge (new_head);
1576 e1 = make_edge (new_head, first_head,
1577 current_ir_type () == IR_GIMPLE ? EDGE_TRUE_VALUE : 0);
1578 e1->probability = then_prob;
1579 e->probability = else_prob;
1580
1581 set_immediate_dominator (CDI_DOMINATORS, first_head, new_head);
1582 set_immediate_dominator (CDI_DOMINATORS, second_head, new_head);
1583
1584 /* Adjust loop header phi nodes. */
1585 lv_adjust_loop_header_phi (first_head, second_head, new_head, e1);
1586
1587 return new_head;
1588}
1589
1590/* Main entry point for Loop Versioning transformation.
1591
1592 This transformation given a condition and a loop, creates
1593 -if (condition) { loop_copy1 } else { loop_copy2 },
1594 where loop_copy1 is the loop transformed in one way, and loop_copy2
1595 is the loop transformed in another way (or unchanged). COND_EXPR
1596 may be a run time test for things that were not resolved by static
1597 analysis (overlapping ranges (anti-aliasing), alignment, etc.).
1598
1599 If non-NULL, CONDITION_BB is set to the basic block containing the
1600 condition.
1601
1602 THEN_PROB is the probability of the then edge of the if. THEN_SCALE
1603 is the ratio by that the frequencies in the original loop should
1604 be scaled. ELSE_SCALE is the ratio by that the frequencies in the
1605 new loop should be scaled.
1606
1607 If PLACE_AFTER is true, we place the new loop after LOOP in the
1608 instruction stream, otherwise it is placed before LOOP. */
1609
1610class loop *
1611loop_version (class loop *loop,
1612 void *cond_expr, basic_block *condition_bb,
1613 profile_probability then_prob, profile_probability else_prob,
1614 profile_probability then_scale, profile_probability else_scale,
1615 bool place_after)
1616{
1617 basic_block first_head, second_head;
1618 edge entry, latch_edge;
1619 int irred_flag;
1620 class loop *nloop;
1621 basic_block cond_bb;
1622
1623 /* Record entry and latch edges for the loop */
1624 entry = loop_preheader_edge (loop);
1625 irred_flag = entry->flags & EDGE_IRREDUCIBLE_LOOP;
1626 entry->flags &= ~EDGE_IRREDUCIBLE_LOOP;
1627
1628 /* Note down head of loop as first_head. */
1629 first_head = entry->dest;
1630
1631 /* 1) Duplicate loop on the entry edge. */
1632 if (!cfg_hook_duplicate_loop_body_to_header_edge (loop, entry, 1, NULLnullptr, NULLnullptr,
1
Assuming the condition is false
2
Taking false branch
1633 NULLnullptr, 0))
1634 {
1635 entry->flags |= irred_flag;
1636 return NULLnullptr;
1637 }
1638
1639 /* 2) loopify the duplicated new loop. */
1640 latch_edge = single_succ_edge (get_bb_copy (loop->latch));
1641 nloop = alloc_loop ();
1642 class loop *outer = loop_outer (latch_edge->dest->loop_father);
1643 edge new_header_edge = single_pred_edge (get_bb_copy (loop->header));
1644 nloop->header = new_header_edge->dest;
1645 nloop->latch = latch_edge->src;
1646 loop_redirect_edge (latch_edge, nloop->header);
1647
1648 /* Compute new loop. */
1649 add_loop (nloop, outer);
3
Calling 'add_loop'
1650 copy_loop_info (loop, nloop);
1651 set_loop_copy (loop, nloop);
1652
1653 /* loopify redirected latch_edge. Update its PENDING_STMTS. */
1654 lv_flush_pending_stmts (latch_edge);
1655
1656 /* After duplication entry edge now points to new loop head block.
1657 Note down new head as second_head. */
1658 second_head = entry->dest;
1659
1660 /* 3) Split loop entry edge and insert new block with cond expr. */
1661 cond_bb = lv_adjust_loop_entry_edge (first_head, second_head,
1662 entry, cond_expr, then_prob, else_prob);
1663 if (condition_bb)
1664 *condition_bb = cond_bb;
1665
1666 if (!cond_bb)
1667 {
1668 entry->flags |= irred_flag;
1669 return NULLnullptr;
1670 }
1671
1672 /* Add cond_bb to appropriate loop. */
1673 if (cond_bb->loop_father)
1674 remove_bb_from_loops (cond_bb);
1675 add_bb_to_loop (cond_bb, outer);
1676
1677 /* 4) Scale the original loop and new loop frequency. */
1678 scale_loop_frequencies (loop, then_scale);
1679 scale_loop_frequencies (nloop, else_scale);
1680 update_dominators_in_loop (loop);
1681 update_dominators_in_loop (nloop);
1682
1683 /* Adjust irreducible flag. */
1684 if (irred_flag)
1685 {
1686 cond_bb->flags |= BB_IRREDUCIBLE_LOOP;
1687 loop_preheader_edge (loop)->flags |= EDGE_IRREDUCIBLE_LOOP;
1688 loop_preheader_edge (nloop)->flags |= EDGE_IRREDUCIBLE_LOOP;
1689 single_pred_edge (cond_bb)->flags |= EDGE_IRREDUCIBLE_LOOP;
1690 }
1691
1692 if (place_after)
1693 {
1694 basic_block *bbs = get_loop_body_in_dom_order (nloop), after;
1695 unsigned i;
1696
1697 after = loop->latch;
1698
1699 for (i = 0; i < nloop->num_nodes; i++)
1700 {
1701 move_block_after (bbs[i], after);
1702 after = bbs[i];
1703 }
1704 free (bbs);
1705 }
1706
1707 /* At this point condition_bb is loop preheader with two successors,
1708 first_head and second_head. Make sure that loop preheader has only
1709 one successor. */
1710 split_edge (loop_preheader_edge (loop));
1711 split_edge (loop_preheader_edge (nloop));
1712
1713 return nloop;
1714}

/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.h

1/* Natural loop functions
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20#ifndef GCC_CFGLOOP_H
21#define GCC_CFGLOOP_H
22
23#include "cfgloopmanip.h"
24
25/* Structure to hold decision about unrolling/peeling. */
26enum lpt_dec
27{
28 LPT_NONE,
29 LPT_UNROLL_CONSTANT,
30 LPT_UNROLL_RUNTIME,
31 LPT_UNROLL_STUPID
32};
33
34struct GTY (()) lpt_decision {
35 enum lpt_dec decision;
36 unsigned times;
37};
38
39/* The type of extend applied to an IV. */
40enum iv_extend_code
41{
42 IV_SIGN_EXTEND,
43 IV_ZERO_EXTEND,
44 IV_UNKNOWN_EXTEND
45};
46
47/* The structure describing a bound on number of iterations of a loop. */
48
49class GTY ((chain_next ("%h.next"))) nb_iter_bound {
50public:
51 /* The statement STMT is executed at most ... */
52 gimple *stmt;
53
54 /* ... BOUND + 1 times (BOUND must be an unsigned constant).
55 The + 1 is added for the following reasons:
56
57 a) 0 would otherwise be unused, while we would need to care more about
58 overflows (as MAX + 1 is sometimes produced as the estimate on number
59 of executions of STMT).
60 b) it is consistent with the result of number_of_iterations_exit. */
61 widest_int bound;
62
63 /* True if, after executing the statement BOUND + 1 times, we will
64 leave the loop; that is, all the statements after it are executed at most
65 BOUND times. */
66 bool is_exit;
67
68 /* The next bound in the list. */
69 class nb_iter_bound *next;
70};
71
72/* Description of the loop exit. */
73
74struct GTY ((for_user)) loop_exit {
75 /* The exit edge. */
76 edge e;
77
78 /* Previous and next exit in the list of the exits of the loop. */
79 struct loop_exit *prev;
80 struct loop_exit *next;
81
82 /* Next element in the list of loops from that E exits. */
83 struct loop_exit *next_e;
84};
85
86struct loop_exit_hasher : ggc_ptr_hash<loop_exit>
87{
88 typedef edge compare_type;
89
90 static hashval_t hash (loop_exit *);
91 static bool equal (loop_exit *, edge);
92 static void remove (loop_exit *);
93};
94
95typedef class loop *loop_p;
96
97/* An integer estimation of the number of iterations. Estimate_state
98 describes what is the state of the estimation. */
99enum loop_estimation
100{
101 /* Estimate was not computed yet. */
102 EST_NOT_COMPUTED,
103 /* Estimate is ready. */
104 EST_AVAILABLE,
105 EST_LAST
106};
107
108/* The structure describing non-overflow control induction variable for
109 loop's exit edge. */
110struct GTY ((chain_next ("%h.next"))) control_iv {
111 tree base;
112 tree step;
113 struct control_iv *next;
114};
115
116/* Structure to hold information for each natural loop. */
117class GTY ((chain_next ("%h.next"))) loop {
118public:
119 /* Index into loops array. Note indices will never be reused after loop
120 is destroyed. */
121 int num;
122
123 /* Number of loop insns. */
124 unsigned ninsns;
125
126 /* Basic block of loop header. */
127 basic_block header;
128
129 /* Basic block of loop latch. */
130 basic_block latch;
131
132 /* For loop unrolling/peeling decision. */
133 struct lpt_decision lpt_decision;
134
135 /* Average number of executed insns per iteration. */
136 unsigned av_ninsns;
137
138 /* Number of blocks contained within the loop. */
139 unsigned num_nodes;
140
141 /* Superloops of the loop, starting with the outermost loop. */
142 vec<loop_p, va_gc> *superloops;
143
144 /* The first inner (child) loop or NULL if innermost loop. */
145 class loop *inner;
146
147 /* Link to the next (sibling) loop. */
148 class loop *next;
149
150 /* Auxiliary info specific to a pass. */
151 PTRvoid * GTY ((skip (""))) aux;
152
153 /* The number of times the latch of the loop is executed. This can be an
154 INTEGER_CST, or a symbolic expression representing the number of
155 iterations like "N - 1", or a COND_EXPR containing the runtime
156 conditions under which the number of iterations is non zero.
157
158 Don't access this field directly: number_of_latch_executions
159 computes and caches the computed information in this field. */
160 tree nb_iterations;
161
162 /* An integer guaranteed to be greater or equal to nb_iterations. Only
163 valid if any_upper_bound is true. */
164 widest_int nb_iterations_upper_bound;
165
166 widest_int nb_iterations_likely_upper_bound;
167
168 /* An integer giving an estimate on nb_iterations. Unlike
169 nb_iterations_upper_bound, there is no guarantee that it is at least
170 nb_iterations. */
171 widest_int nb_iterations_estimate;
172
173 /* If > 0, an integer, where the user asserted that for any
174 I in [ 0, nb_iterations ) and for any J in
175 [ I, min ( I + safelen, nb_iterations ) ), the Ith and Jth iterations
176 of the loop can be safely evaluated concurrently. */
177 int safelen;
178
179 /* Preferred vectorization factor for the loop if non-zero. */
180 int simdlen;
181
182 /* Constraints are generally set by consumers and affect certain
183 semantics of niter analyzer APIs. Currently the APIs affected are
184 number_of_iterations_exit* functions and their callers. One typical
185 use case of constraints is to vectorize possibly infinite loop:
186
187 1) Compute niter->assumptions by calling niter analyzer API and
188 record it as possible condition for loop versioning.
189 2) Clear buffered result of niter/scev analyzer.
190 3) Set constraint LOOP_C_FINITE assuming the loop is finite.
191 4) Analyze data references. Since data reference analysis depends
192 on niter/scev analyzer, the point is that niter/scev analysis
193 is done under circumstance of LOOP_C_FINITE constraint.
194 5) Version the loop with niter->assumptions computed in step 1).
195 6) Vectorize the versioned loop in which niter->assumptions is
196 checked to be true.
197 7) Update constraints in versioned loops so that niter analyzer
198 in following passes can use it.
199
200 Note consumers are usually the loop optimizers and it is consumers'
201 responsibility to set/clear constraints correctly. Failing to do
202 that might result in hard to track down bugs in niter/scev consumers. */
203 unsigned constraints;
204
205 /* An integer estimation of the number of iterations. Estimate_state
206 describes what is the state of the estimation. */
207 ENUM_BITFIELD(loop_estimation)enum loop_estimation estimate_state : 8;
208
209 unsigned any_upper_bound : 1;
210 unsigned any_estimate : 1;
211 unsigned any_likely_upper_bound : 1;
212
213 /* True if the loop can be parallel. */
214 unsigned can_be_parallel : 1;
215
216 /* True if -Waggressive-loop-optimizations warned about this loop
217 already. */
218 unsigned warned_aggressive_loop_optimizations : 1;
219
220 /* True if this loop should never be vectorized. */
221 unsigned dont_vectorize : 1;
222
223 /* True if we should try harder to vectorize this loop. */
224 unsigned force_vectorize : 1;
225
226 /* True if the loop is part of an oacc kernels region. */
227 unsigned in_oacc_kernels_region : 1;
228
229 /* True if the loop is known to be finite. This is a localized
230 flag_finite_loops or similar pragmas state. */
231 unsigned finite_p : 1;
232
233 /* The number of times to unroll the loop. 0 means no information given,
234 just do what we always do. A value of 1 means do not unroll the loop.
235 A value of USHRT_MAX means unroll with no specific unrolling factor.
236 Other values means unroll with the given unrolling factor. */
237 unsigned short unroll;
238
239 /* If this loop was inlined the main clique of the callee which does
240 not need remapping when copying the loop body. */
241 unsigned short owned_clique;
242
243 /* For SIMD loops, this is a unique identifier of the loop, referenced
244 by IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LANE and IFN_GOMP_SIMD_LAST_LANE
245 builtins. */
246 tree simduid;
247
248 /* In loop optimization, it's common to generate loops from the original
249 loop. This field records the index of the original loop which can be
250 used to track the original loop from newly generated loops. This can
251 be done by calling function get_loop (cfun, orig_loop_num). Note the
252 original loop could be destroyed for various reasons thus no longer
253 exists, as a result, function call to get_loop returns NULL pointer.
254 In this case, this field should not be used and needs to be cleared
255 whenever possible. */
256 int orig_loop_num;
257
258 /* Upper bound on number of iterations of a loop. */
259 class nb_iter_bound *bounds;
260
261 /* Non-overflow control ivs of a loop. */
262 struct control_iv *control_ivs;
263
264 /* Head of the cyclic list of the exits of the loop. */
265 struct loop_exit *exits;
266
267 /* Number of iteration analysis data for RTL. */
268 class niter_desc *simple_loop_desc;
269
270 /* For sanity checking during loop fixup we record here the former
271 loop header for loops marked for removal. Note that this prevents
272 the basic-block from being collected but its index can still be
273 reused. */
274 basic_block former_header;
275};
276
277/* Set if the loop is known to be infinite. */
278#define LOOP_C_INFINITE(1 << 0) (1 << 0)
279/* Set if the loop is known to be finite without any assumptions. */
280#define LOOP_C_FINITE(1 << 1) (1 << 1)
281
282/* Set C to the LOOP constraint. */
283static inline void
284loop_constraint_set (class loop *loop, unsigned c)
285{
286 loop->constraints |= c;
287}
288
289/* Clear C from the LOOP constraint. */
290static inline void
291loop_constraint_clear (class loop *loop, unsigned c)
292{
293 loop->constraints &= ~c;
294}
295
296/* Check if C is set in the LOOP constraint. */
297static inline bool
298loop_constraint_set_p (class loop *loop, unsigned c)
299{
300 return (loop->constraints & c) == c;
301}
302
303/* Flags for state of loop structure. */
304enum
305{
306 LOOPS_HAVE_PREHEADERS = 1,
307 LOOPS_HAVE_SIMPLE_LATCHES = 2,
308 LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS = 4,
309 LOOPS_HAVE_RECORDED_EXITS = 8,
310 LOOPS_MAY_HAVE_MULTIPLE_LATCHES = 16,
311 LOOP_CLOSED_SSA = 32,
312 LOOPS_NEED_FIXUP = 64,
313 LOOPS_HAVE_FALLTHRU_PREHEADERS = 128
314};
315
316#define LOOPS_NORMAL(LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS
)
(LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES \
317 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
318#define AVOID_CFG_MODIFICATIONS(LOOPS_MAY_HAVE_MULTIPLE_LATCHES) (LOOPS_MAY_HAVE_MULTIPLE_LATCHES)
319
320/* Structure to hold CFG information about natural loops within a function. */
321struct GTY (()) loops {
322 /* State of loops. */
323 int state;
324
325 /* Array of the loops. */
326 vec<loop_p, va_gc> *larray;
327
328 /* Maps edges to the list of their descriptions as loop exits. Edges
329 whose sources or destinations have loop_father == NULL (which may
330 happen during the cfg manipulations) should not appear in EXITS. */
331 hash_table<loop_exit_hasher> *GTY(()) exits;
332
333 /* Pointer to root of loop hierarchy tree. */
334 class loop *tree_root;
335};
336
337/* Loop recognition. */
338bool bb_loop_header_p (basic_block);
339void init_loops_structure (struct function *, struct loops *, unsigned);
340extern struct loops *flow_loops_find (struct loops *);
341extern void disambiguate_loops_with_multiple_latches (void);
342extern void flow_loops_free (struct loops *);
343extern void flow_loops_dump (FILE *,
344 void (*)(const class loop *, FILE *, int), int);
345extern void flow_loop_dump (const class loop *, FILE *,
346 void (*)(const class loop *, FILE *, int), int);
347class loop *alloc_loop (void);
348extern void flow_loop_free (class loop *);
349int flow_loop_nodes_find (basic_block, class loop *);
350unsigned fix_loop_structure (bitmap changed_bbs);
351bool mark_irreducible_loops (void);
352void release_recorded_exits (function *);
353void record_loop_exits (void);
354void rescan_loop_exit (edge, bool, bool);
355void sort_sibling_loops (function *);
356
357/* Loop data structure manipulation/querying. */
358extern void flow_loop_tree_node_add (class loop *, class loop *,
359 class loop * = NULLnullptr);
360extern void flow_loop_tree_node_remove (class loop *);
361extern bool flow_loop_nested_p (const class loop *, const class loop *);
362extern bool flow_bb_inside_loop_p (const class loop *, const_basic_block);
363extern class loop * find_common_loop (class loop *, class loop *);
364class loop *superloop_at_depth (class loop *, unsigned);
365struct eni_weights;
366extern int num_loop_insns (const class loop *);
367extern int average_num_loop_insns (const class loop *);
368extern unsigned get_loop_level (const class loop *);
369extern bool loop_exit_edge_p (const class loop *, const_edge);
370extern bool loop_exits_to_bb_p (class loop *, basic_block);
371extern bool loop_exits_from_bb_p (class loop *, basic_block);
372extern void mark_loop_exit_edges (void);
373extern dump_user_location_t get_loop_location (class loop *loop);
374
375/* Loops & cfg manipulation. */
376extern basic_block *get_loop_body (const class loop *);
377extern unsigned get_loop_body_with_size (const class loop *, basic_block *,
378 unsigned);
379extern basic_block *get_loop_body_in_dom_order (const class loop *);
380extern basic_block *get_loop_body_in_bfs_order (const class loop *);
381extern basic_block *get_loop_body_in_custom_order (const class loop *,
382 int (*) (const void *, const void *));
383extern basic_block *get_loop_body_in_custom_order (const class loop *, void *,
384 int (*) (const void *, const void *, void *));
385
386extern auto_vec<edge> get_loop_exit_edges (const class loop *, basic_block * = NULLnullptr);
387extern edge single_exit (const class loop *);
388extern edge single_likely_exit (class loop *loop, const vec<edge> &);
389extern unsigned num_loop_branches (const class loop *);
390
391extern edge loop_preheader_edge (const class loop *);
392extern edge loop_latch_edge (const class loop *);
393
394extern void add_bb_to_loop (basic_block, class loop *);
395extern void remove_bb_from_loops (basic_block);
396
397extern void cancel_loop_tree (class loop *);
398extern void delete_loop (class loop *);
399
400
401extern void verify_loop_structure (void);
402
403/* Loop analysis. */
404extern bool just_once_each_iteration_p (const class loop *, const_basic_block);
405gcov_type expected_loop_iterations_unbounded (const class loop *,
406 bool *read_profile_p = NULLnullptr, bool by_profile_only = false);
407extern unsigned expected_loop_iterations (class loop *);
408extern rtx doloop_condition_get (rtx_insn *);
409
410void mark_loop_for_removal (loop_p);
411
412/* Induction variable analysis. */
413
414/* The description of induction variable. The things are a bit complicated
415 due to need to handle subregs and extends. The value of the object described
416 by it can be obtained as follows (all computations are done in extend_mode):
417
418 Value in i-th iteration is
419 delta + mult * extend_{extend_mode} (subreg_{mode} (base + i * step)).
420
421 If first_special is true, the value in the first iteration is
422 delta + mult * base
423
424 If extend = UNKNOWN, first_special must be false, delta 0, mult 1 and value is
425 subreg_{mode} (base + i * step)
426
427 The get_iv_value function can be used to obtain these expressions.
428
429 ??? Add a third mode field that would specify the mode in that inner
430 computation is done, which would enable it to be different from the
431 outer one? */
432
433class rtx_iv
434{
435public:
436 /* Its base and step (mode of base and step is supposed to be extend_mode,
437 see the description above). */
438 rtx base, step;
439
440 /* The type of extend applied to it (IV_SIGN_EXTEND, IV_ZERO_EXTEND,
441 or IV_UNKNOWN_EXTEND). */
442 enum iv_extend_code extend;
443
444 /* Operations applied in the extended mode. */
445 rtx delta, mult;
446
447 /* The mode it is extended to. */
448 scalar_int_mode extend_mode;
449
450 /* The mode the variable iterates in. */
451 scalar_int_mode mode;
452
453 /* Whether the first iteration needs to be handled specially. */
454 unsigned first_special : 1;
455};
456
457/* The description of an exit from the loop and of the number of iterations
458 till we take the exit. */
459
460class GTY(()) niter_desc
461{
462public:
463 /* The edge out of the loop. */
464 edge out_edge;
465
466 /* The other edge leading from the condition. */
467 edge in_edge;
468
469 /* True if we are able to say anything about number of iterations of the
470 loop. */
471 bool simple_p;
472
473 /* True if the loop iterates the constant number of times. */
474 bool const_iter;
475
476 /* Number of iterations if constant. */
477 uint64_t niter;
478
479 /* Assumptions under that the rest of the information is valid. */
480 rtx assumptions;
481
482 /* Assumptions under that the loop ends before reaching the latch,
483 even if value of niter_expr says otherwise. */
484 rtx noloop_assumptions;
485
486 /* Condition under that the loop is infinite. */
487 rtx infinite;
488
489 /* Whether the comparison is signed. */
490 bool signed_p;
491
492 /* The mode in that niter_expr should be computed. */
493 scalar_int_mode mode;
494
495 /* The number of iterations of the loop. */
496 rtx niter_expr;
497};
498
499extern void iv_analysis_loop_init (class loop *);
500extern bool iv_analyze (rtx_insn *, scalar_int_mode, rtx, class rtx_iv *);
501extern bool iv_analyze_result (rtx_insn *, rtx, class rtx_iv *);
502extern bool iv_analyze_expr (rtx_insn *, scalar_int_mode, rtx,
503 class rtx_iv *);
504extern rtx get_iv_value (class rtx_iv *, rtx);
505extern bool biv_p (rtx_insn *, scalar_int_mode, rtx);
506extern void iv_analysis_done (void);
507
508extern class niter_desc *get_simple_loop_desc (class loop *loop);
509extern void free_simple_loop_desc (class loop *loop);
510
511static inline class niter_desc *
512simple_loop_desc (class loop *loop)
513{
514 return loop->simple_loop_desc;
515}
516
517/* Accessors for the loop structures. */
518
519/* Returns the loop with index NUM from FNs loop tree. */
520
521static inline class loop *
522get_loop (struct function *fn, unsigned num)
523{
524 return (*loops_for_fn (fn)->larray)[num];
525}
526
527/* Returns the number of superloops of LOOP. */
528
529static inline unsigned
530loop_depth (const class loop *loop)
531{
532 return vec_safe_length (loop->superloops);
533}
534
535/* Returns the immediate superloop of LOOP, or NULL if LOOP is the outermost
536 loop. */
537
538static inline class loop *
539loop_outer (const class loop *loop)
540{
541 unsigned n = vec_safe_length (loop->superloops);
542
543 if (n == 0)
544 return NULLnullptr;
545
546 return (*loop->superloops)[n - 1];
547}
548
549/* Returns true if LOOP has at least one exit edge. */
550
551static inline bool
552loop_has_exit_edges (const class loop *loop)
553{
554 return loop->exits->next->e != NULLnullptr;
555}
556
557/* Returns the list of loops in FN. */
558
559inline vec<loop_p, va_gc> *
560get_loops (struct function *fn)
561{
562 struct loops *loops = loops_for_fn (fn);
563 if (!loops)
564 return NULLnullptr;
565
566 return loops->larray;
567}
568
569/* Returns the number of loops in FN (including the removed
570 ones and the fake loop that forms the root of the loop tree). */
571
572static inline unsigned
573number_of_loops (struct function *fn)
574{
575 struct loops *loops = loops_for_fn (fn);
576 if (!loops)
6
Assuming 'loops' is non-null
7
Taking false branch
577 return 0;
578
579 return vec_safe_length (loops->larray);
8
Calling 'vec_safe_length<loop *, va_gc>'
11
Returning from 'vec_safe_length<loop *, va_gc>'
580}
581
582/* Returns true if state of the loops satisfies all properties
583 described by FLAGS. */
584
585static inline bool
586loops_state_satisfies_p (function *fn, unsigned flags)
587{
588 return (loops_for_fn (fn)->state & flags) == flags;
589}
590
591static inline bool
592loops_state_satisfies_p (unsigned flags)
593{
594 return loops_state_satisfies_p (cfun(cfun + 0), flags);
595}
596
597/* Sets FLAGS to the loops state. */
598
599static inline void
600loops_state_set (function *fn, unsigned flags)
601{
602 loops_for_fn (fn)->state |= flags;
603}
604
605static inline void
606loops_state_set (unsigned flags)
607{
608 loops_state_set (cfun(cfun + 0), flags);
609}
610
611/* Clears FLAGS from the loops state. */
612
613static inline void
614loops_state_clear (function *fn, unsigned flags)
615{
616 loops_for_fn (fn)->state &= ~flags;
617}
618
619static inline void
620loops_state_clear (unsigned flags)
621{
622 if (!current_loops((cfun + 0)->x_current_loops))
623 return;
624 loops_state_clear (cfun(cfun + 0), flags);
625}
626
627/* Check loop structure invariants, if internal consistency checks are
628 enabled. */
629
630static inline void
631checking_verify_loop_structure (void)
632{
633 /* VERIFY_LOOP_STRUCTURE essentially asserts that no loops need fixups.
634
635 The loop optimizers should never make changes to the CFG which
636 require loop fixups. But the low level CFG manipulation code may
637 set the flag conservatively.
638
639 Go ahead and clear the flag here. That avoids the assert inside
640 VERIFY_LOOP_STRUCTURE, and if there is an inconsistency in the loop
641 structures VERIFY_LOOP_STRUCTURE will detect it.
642
643 This also avoid the compile time cost of excessive fixups. */
644 loops_state_clear (LOOPS_NEED_FIXUP);
645 if (flag_checkingglobal_options.x_flag_checking)
646 verify_loop_structure ();
647}
648
649/* Loop iterators. */
650
651/* Flags for loop iteration. */
652
653enum li_flags
654{
655 LI_INCLUDE_ROOT = 1, /* Include the fake root of the loop tree. */
656 LI_FROM_INNERMOST = 2, /* Iterate over the loops in the reverse order,
657 starting from innermost ones. */
658 LI_ONLY_INNERMOST = 4 /* Iterate only over innermost loops. */
659};
660
661/* Provide the functionality of std::as_const to support range-based for
662 to use const iterator. (We can't use std::as_const itself because it's
663 a C++17 feature.) */
664template <typename T>
665constexpr const T &
666as_const (T &t)
667{
668 return t;
669}
670
671/* A list for visiting loops, which contains the loop numbers instead of
672 the loop pointers. If the loop ROOT is offered (non-null), the visiting
673 will start from it, otherwise it would start from the tree_root of
674 loops_for_fn (FN) instead. The scope is restricted in function FN and
675 the visiting order is specified by FLAGS. */
676
677class loops_list
678{
679public:
680 loops_list (function *fn, unsigned flags, class loop *root = nullptr);
681
682 template <typename T> class Iter
683 {
684 public:
685 Iter (const loops_list &l, unsigned idx) : list (l), curr_idx (idx)
686 {
687 fill_curr_loop ();
688 }
689
690 T operator* () const { return curr_loop; }
691
692 Iter &
693 operator++ ()
694 {
695 if (curr_idx < list.to_visit.length ())
696 {
697 /* Bump the index and fill a new one. */
698 curr_idx++;
699 fill_curr_loop ();
700 }
701 else
702 gcc_assert (!curr_loop)((void)(!(!curr_loop) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.h"
, 702, __FUNCTION__), 0 : 0))
;
703
704 return *this;
705 }
706
707 bool
708 operator!= (const Iter &rhs) const
709 {
710 return this->curr_idx != rhs.curr_idx;
711 }
712
713 private:
714 /* Fill the current loop starting from the current index. */
715 void fill_curr_loop ();
716
717 /* Reference to the loop list to visit. */
718 const loops_list &list;
719
720 /* The current index in the list to visit. */
721 unsigned curr_idx;
722
723 /* The loop implied by the current index. */
724 class loop *curr_loop;
725 };
726
727 using iterator = Iter<class loop *>;
728 using const_iterator = Iter<const class loop *>;
729
730 iterator
731 begin ()
732 {
733 return iterator (*this, 0);
734 }
735
736 iterator
737 end ()
738 {
739 return iterator (*this, to_visit.length ());
740 }
741
742 const_iterator
743 begin () const
744 {
745 return const_iterator (*this, 0);
746 }
747
748 const_iterator
749 end () const
750 {
751 return const_iterator (*this, to_visit.length ());
752 }
753
754private:
755 /* Walk loop tree starting from ROOT as the visiting order specified
756 by FLAGS. */
757 void walk_loop_tree (class loop *root, unsigned flags);
758
759 /* The function we are visiting. */
760 function *fn;
761
762 /* The list of loops to visit. */
763 auto_vec<int, 16> to_visit;
764};
765
766/* Starting from current index CURR_IDX (inclusive), find one index
767 which stands for one valid loop and fill the found loop as CURR_LOOP,
768 if we can't find one, set CURR_LOOP as null. */
769
770template <typename T>
771inline void
772loops_list::Iter<T>::fill_curr_loop ()
773{
774 int anum;
775
776 while (this->list.to_visit.iterate (this->curr_idx, &anum))
777 {
778 class loop *loop = get_loop (this->list.fn, anum);
779 if (loop)
780 {
781 curr_loop = loop;
782 return;
783 }
784 this->curr_idx++;
785 }
786
787 curr_loop = nullptr;
788}
789
790/* Set up the loops list to visit according to the specified
791 function scope FN and iterating order FLAGS. If ROOT is
792 not null, the visiting would start from it, otherwise it
793 will start from tree_root of loops_for_fn (FN). */
794
795inline loops_list::loops_list (function *fn, unsigned flags, class loop *root)
796{
797 struct loops *loops = loops_for_fn (fn);
798 gcc_assert (!root || loops)((void)(!(!root || loops) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.h"
, 798, __FUNCTION__), 0 : 0))
;
799
800 /* Check mutually exclusive flags should not co-exist. */
801 unsigned checked_flags = LI_ONLY_INNERMOST | LI_FROM_INNERMOST;
802 gcc_assert ((flags & checked_flags) != checked_flags)((void)(!((flags & checked_flags) != checked_flags) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.h"
, 802, __FUNCTION__), 0 : 0))
;
803
804 this->fn = fn;
805 if (!loops)
806 return;
807
808 class loop *tree_root = root ? root : loops->tree_root;
809
810 this->to_visit.reserve_exact (number_of_loops (fn));
811
812 /* When root is tree_root of loops_for_fn (fn) and the visiting
813 order is LI_ONLY_INNERMOST, we would like to use linear
814 search here since it has a more stable bound than the
815 walk_loop_tree. */
816 if (flags & LI_ONLY_INNERMOST && tree_root == loops->tree_root)
817 {
818 gcc_assert (tree_root->num == 0)((void)(!(tree_root->num == 0) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.h"
, 818, __FUNCTION__), 0 : 0))
;
819 if (tree_root->inner == NULLnullptr)
820 {
821 if (flags & LI_INCLUDE_ROOT)
822 this->to_visit.quick_push (0);
823
824 return;
825 }
826
827 class loop *aloop;
828 unsigned int i;
829 for (i = 1; vec_safe_iterate (loops->larray, i, &aloop); i++)
830 if (aloop != NULLnullptr && aloop->inner == NULLnullptr)
831 this->to_visit.quick_push (aloop->num);
832 }
833 else
834 walk_loop_tree (tree_root, flags);
835}
836
837/* The properties of the target. */
838struct target_cfgloop {
839 /* Number of available registers. */
840 unsigned x_target_avail_regs;
841
842 /* Number of available registers that are call-clobbered. */
843 unsigned x_target_clobbered_regs;
844
845 /* Number of registers reserved for temporary expressions. */
846 unsigned x_target_res_regs;
847
848 /* The cost for register when there still is some reserve, but we are
849 approaching the number of available registers. */
850 unsigned x_target_reg_cost[2];
851
852 /* The cost for register when we need to spill. */
853 unsigned x_target_spill_cost[2];
854};
855
856extern struct target_cfgloop default_target_cfgloop;
857#if SWITCHABLE_TARGET1
858extern struct target_cfgloop *this_target_cfgloop;
859#else
860#define this_target_cfgloop (&default_target_cfgloop)
861#endif
862
863#define target_avail_regs(this_target_cfgloop->x_target_avail_regs) \
864 (this_target_cfgloop->x_target_avail_regs)
865#define target_clobbered_regs(this_target_cfgloop->x_target_clobbered_regs) \
866 (this_target_cfgloop->x_target_clobbered_regs)
867#define target_res_regs(this_target_cfgloop->x_target_res_regs) \
868 (this_target_cfgloop->x_target_res_regs)
869#define target_reg_cost(this_target_cfgloop->x_target_reg_cost) \
870 (this_target_cfgloop->x_target_reg_cost)
871#define target_spill_cost(this_target_cfgloop->x_target_spill_cost) \
872 (this_target_cfgloop->x_target_spill_cost)
873
874/* Register pressure estimation for induction variable optimizations & loop
875 invariant motion. */
876extern unsigned estimate_reg_pressure_cost (unsigned, unsigned, bool, bool);
877extern void init_set_costs (void);
878
879/* Loop optimizer initialization. */
880extern void loop_optimizer_init (unsigned);
881extern void loop_optimizer_finalize (function *, bool = false);
882inline void
883loop_optimizer_finalize ()
884{
885 loop_optimizer_finalize (cfun(cfun + 0));
886}
887
888/* Optimization passes. */
889enum
890{
891 UAP_UNROLL = 1, /* Enables unrolling of loops if it seems profitable. */
892 UAP_UNROLL_ALL = 2 /* Enables unrolling of all loops. */
893};
894
895extern void doloop_optimize_loops (void);
896extern void move_loop_invariants (void);
897extern auto_vec<basic_block> get_loop_hot_path (const class loop *loop);
898
899/* Returns the outermost loop of the loop nest that contains LOOP.*/
900static inline class loop *
901loop_outermost (class loop *loop)
902{
903 unsigned n = vec_safe_length (loop->superloops);
904
905 if (n <= 1)
906 return loop;
907
908 return (*loop->superloops)[1];
909}
910
911extern void record_niter_bound (class loop *, const widest_int &, bool, bool);
912extern HOST_WIDE_INTlong get_estimated_loop_iterations_int (class loop *);
913extern HOST_WIDE_INTlong get_max_loop_iterations_int (const class loop *);
914extern HOST_WIDE_INTlong get_likely_max_loop_iterations_int (class loop *);
915extern bool get_estimated_loop_iterations (class loop *loop, widest_int *nit);
916extern bool get_max_loop_iterations (const class loop *loop, widest_int *nit);
917extern bool get_likely_max_loop_iterations (class loop *loop, widest_int *nit);
918extern int bb_loop_depth (const_basic_block);
919
920/* Converts VAL to widest_int. */
921
922static inline widest_int
923gcov_type_to_wide_int (gcov_type val)
924{
925 HOST_WIDE_INTlong a[2];
926
927 a[0] = (unsigned HOST_WIDE_INTlong) val;
928 /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by
929 the size of type. */
930 val >>= HOST_BITS_PER_WIDE_INT64 - 1;
931 val >>= 1;
932 a[1] = (unsigned HOST_WIDE_INTlong) val;
933
934 return widest_int::from_array (a, 2);
935}
936#endif /* GCC_CFGLOOP_H */

/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h

1/* Vector API for GNU compiler.
2 Copyright (C) 2004-2021 Free Software Foundation, Inc.
3 Contributed by Nathan Sidwell <nathan@codesourcery.com>
4 Re-implemented in C++ by Diego Novillo <dnovillo@google.com>
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 3, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
21
22#ifndef GCC_VEC_H
23#define GCC_VEC_H
24
25/* Some gen* file have no ggc support as the header file gtype-desc.h is
26 missing. Provide these definitions in case ggc.h has not been included.
27 This is not a problem because any code that runs before gengtype is built
28 will never need to use GC vectors.*/
29
30extern void ggc_free (void *);
31extern size_t ggc_round_alloc_size (size_t requested_size);
32extern void *ggc_realloc (void *, size_t MEM_STAT_DECL);
33
34/* Templated vector type and associated interfaces.
35
36 The interface functions are typesafe and use inline functions,
37 sometimes backed by out-of-line generic functions. The vectors are
38 designed to interoperate with the GTY machinery.
39
40 There are both 'index' and 'iterate' accessors. The index accessor
41 is implemented by operator[]. The iterator returns a boolean
42 iteration condition and updates the iteration variable passed by
43 reference. Because the iterator will be inlined, the address-of
44 can be optimized away.
45
46 Each operation that increases the number of active elements is
47 available in 'quick' and 'safe' variants. The former presumes that
48 there is sufficient allocated space for the operation to succeed
49 (it dies if there is not). The latter will reallocate the
50 vector, if needed. Reallocation causes an exponential increase in
51 vector size. If you know you will be adding N elements, it would
52 be more efficient to use the reserve operation before adding the
53 elements with the 'quick' operation. This will ensure there are at
54 least as many elements as you ask for, it will exponentially
55 increase if there are too few spare slots. If you want reserve a
56 specific number of slots, but do not want the exponential increase
57 (for instance, you know this is the last allocation), use the
58 reserve_exact operation. You can also create a vector of a
59 specific size from the get go.
60
61 You should prefer the push and pop operations, as they append and
62 remove from the end of the vector. If you need to remove several
63 items in one go, use the truncate operation. The insert and remove
64 operations allow you to change elements in the middle of the
65 vector. There are two remove operations, one which preserves the
66 element ordering 'ordered_remove', and one which does not
67 'unordered_remove'. The latter function copies the end element
68 into the removed slot, rather than invoke a memmove operation. The
69 'lower_bound' function will determine where to place an item in the
70 array using insert that will maintain sorted order.
71
72 Vectors are template types with three arguments: the type of the
73 elements in the vector, the allocation strategy, and the physical
74 layout to use
75
76 Four allocation strategies are supported:
77
78 - Heap: allocation is done using malloc/free. This is the
79 default allocation strategy.
80
81 - GC: allocation is done using ggc_alloc/ggc_free.
82
83 - GC atomic: same as GC with the exception that the elements
84 themselves are assumed to be of an atomic type that does
85 not need to be garbage collected. This means that marking
86 routines do not need to traverse the array marking the
87 individual elements. This increases the performance of
88 GC activities.
89
90 Two physical layouts are supported:
91
92 - Embedded: The vector is structured using the trailing array
93 idiom. The last member of the structure is an array of size
94 1. When the vector is initially allocated, a single memory
95 block is created to hold the vector's control data and the
96 array of elements. These vectors cannot grow without
97 reallocation (see discussion on embeddable vectors below).
98
99 - Space efficient: The vector is structured as a pointer to an
100 embedded vector. This is the default layout. It means that
101 vectors occupy a single word of storage before initial
102 allocation. Vectors are allowed to grow (the internal
103 pointer is reallocated but the main vector instance does not
104 need to relocate).
105
106 The type, allocation and layout are specified when the vector is
107 declared.
108
109 If you need to directly manipulate a vector, then the 'address'
110 accessor will return the address of the start of the vector. Also
111 the 'space' predicate will tell you whether there is spare capacity
112 in the vector. You will not normally need to use these two functions.
113
114 Notes on the different layout strategies
115
116 * Embeddable vectors (vec<T, A, vl_embed>)
117
118 These vectors are suitable to be embedded in other data
119 structures so that they can be pre-allocated in a contiguous
120 memory block.
121
122 Embeddable vectors are implemented using the trailing array
123 idiom, thus they are not resizeable without changing the address
124 of the vector object itself. This means you cannot have
125 variables or fields of embeddable vector type -- always use a
126 pointer to a vector. The one exception is the final field of a
127 structure, which could be a vector type.
128
129 You will have to use the embedded_size & embedded_init calls to
130 create such objects, and they will not be resizeable (so the
131 'safe' allocation variants are not available).
132
133 Properties of embeddable vectors:
134
135 - The whole vector and control data are allocated in a single
136 contiguous block. It uses the trailing-vector idiom, so
137 allocation must reserve enough space for all the elements
138 in the vector plus its control data.
139 - The vector cannot be re-allocated.
140 - The vector cannot grow nor shrink.
141 - No indirections needed for access/manipulation.
142 - It requires 2 words of storage (prior to vector allocation).
143
144
145 * Space efficient vector (vec<T, A, vl_ptr>)
146
147 These vectors can grow dynamically and are allocated together
148 with their control data. They are suited to be included in data
149 structures. Prior to initial allocation, they only take a single
150 word of storage.
151
152 These vectors are implemented as a pointer to embeddable vectors.
153 The semantics allow for this pointer to be NULL to represent
154 empty vectors. This way, empty vectors occupy minimal space in
155 the structure containing them.
156
157 Properties:
158
159 - The whole vector and control data are allocated in a single
160 contiguous block.
161 - The whole vector may be re-allocated.
162 - Vector data may grow and shrink.
163 - Access and manipulation requires a pointer test and
164 indirection.
165 - It requires 1 word of storage (prior to vector allocation).
166
167 An example of their use would be,
168
169 struct my_struct {
170 // A space-efficient vector of tree pointers in GC memory.
171 vec<tree, va_gc, vl_ptr> v;
172 };
173
174 struct my_struct *s;
175
176 if (s->v.length ()) { we have some contents }
177 s->v.safe_push (decl); // append some decl onto the end
178 for (ix = 0; s->v.iterate (ix, &elt); ix++)
179 { do something with elt }
180*/
181
182/* Support function for statistics. */
183extern void dump_vec_loc_statistics (void);
184
185/* Hashtable mapping vec addresses to descriptors. */
186extern htab_t vec_mem_usage_hash;
187
188/* Control data for vectors. This contains the number of allocated
189 and used slots inside a vector. */
190
191struct vec_prefix
192{
193 /* FIXME - These fields should be private, but we need to cater to
194 compilers that have stricter notions of PODness for types. */
195
196 /* Memory allocation support routines in vec.c. */
197 void register_overhead (void *, size_t, size_t CXX_MEM_STAT_INFO);
198 void release_overhead (void *, size_t, size_t, bool CXX_MEM_STAT_INFO);
199 static unsigned calculate_allocation (vec_prefix *, unsigned, bool);
200 static unsigned calculate_allocation_1 (unsigned, unsigned);
201
202 /* Note that vec_prefix should be a base class for vec, but we use
203 offsetof() on vector fields of tree structures (e.g.,
204 tree_binfo::base_binfos), and offsetof only supports base types.
205
206 To compensate, we make vec_prefix a field inside vec and make
207 vec a friend class of vec_prefix so it can access its fields. */
208 template <typename, typename, typename> friend struct vec;
209
210 /* The allocator types also need access to our internals. */
211 friend struct va_gc;
212 friend struct va_gc_atomic;
213 friend struct va_heap;
214
215 unsigned m_alloc : 31;
216 unsigned m_using_auto_storage : 1;
217 unsigned m_num;
218};
219
220/* Calculate the number of slots to reserve a vector, making sure that
221 RESERVE slots are free. If EXACT grow exactly, otherwise grow
222 exponentially. PFX is the control data for the vector. */
223
224inline unsigned
225vec_prefix::calculate_allocation (vec_prefix *pfx, unsigned reserve,
226 bool exact)
227{
228 if (exact)
229 return (pfx ? pfx->m_num : 0) + reserve;
230 else if (!pfx)
231 return MAX (4, reserve)((4) > (reserve) ? (4) : (reserve));
232 return calculate_allocation_1 (pfx->m_alloc, pfx->m_num + reserve);
233}
234
235template<typename, typename, typename> struct vec;
236
237/* Valid vector layouts
238
239 vl_embed - Embeddable vector that uses the trailing array idiom.
240 vl_ptr - Space efficient vector that uses a pointer to an
241 embeddable vector. */
242struct vl_embed { };
243struct vl_ptr { };
244
245
246/* Types of supported allocations
247
248 va_heap - Allocation uses malloc/free.
249 va_gc - Allocation uses ggc_alloc.
250 va_gc_atomic - Same as GC, but individual elements of the array
251 do not need to be marked during collection. */
252
253/* Allocator type for heap vectors. */
254struct va_heap
255{
256 /* Heap vectors are frequently regular instances, so use the vl_ptr
257 layout for them. */
258 typedef vl_ptr default_layout;
259
260 template<typename T>
261 static void reserve (vec<T, va_heap, vl_embed> *&, unsigned, bool
262 CXX_MEM_STAT_INFO);
263
264 template<typename T>
265 static void release (vec<T, va_heap, vl_embed> *&);
266};
267
268
269/* Allocator for heap memory. Ensure there are at least RESERVE free
270 slots in V. If EXACT is true, grow exactly, else grow
271 exponentially. As a special case, if the vector had not been
272 allocated and RESERVE is 0, no vector will be created. */
273
274template<typename T>
275inline void
276va_heap::reserve (vec<T, va_heap, vl_embed> *&v, unsigned reserve, bool exact
277 MEM_STAT_DECL)
278{
279 size_t elt_size = sizeof (T);
280 unsigned alloc
281 = vec_prefix::calculate_allocation (v ? &v->m_vecpfx : 0, reserve, exact);
282 gcc_checking_assert (alloc)((void)(!(alloc) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 282, __FUNCTION__), 0 : 0))
;
283
284 if (GATHER_STATISTICS0 && v)
285 v->m_vecpfx.release_overhead (v, elt_size * v->allocated (),
286 v->allocated (), false);
287
288 size_t size = vec<T, va_heap, vl_embed>::embedded_size (alloc);
289 unsigned nelem = v ? v->length () : 0;
290 v = static_cast <vec<T, va_heap, vl_embed> *> (xrealloc (v, size));
291 v->embedded_init (alloc, nelem);
292
293 if (GATHER_STATISTICS0)
294 v->m_vecpfx.register_overhead (v, alloc, elt_size PASS_MEM_STAT);
295}
296
297
298#if GCC_VERSION(4 * 1000 + 2) >= 4007
299#pragma GCC diagnostic push
300#pragma GCC diagnostic ignored "-Wfree-nonheap-object"
301#endif
302
303/* Free the heap space allocated for vector V. */
304
305template<typename T>
306void
307va_heap::release (vec<T, va_heap, vl_embed> *&v)
308{
309 size_t elt_size = sizeof (T);
310 if (v == NULLnullptr)
311 return;
312
313 if (GATHER_STATISTICS0)
314 v->m_vecpfx.release_overhead (v, elt_size * v->allocated (),
315 v->allocated (), true);
316 ::free (v);
317 v = NULLnullptr;
318}
319
320#if GCC_VERSION(4 * 1000 + 2) >= 4007
321#pragma GCC diagnostic pop
322#endif
323
324/* Allocator type for GC vectors. Notice that we need the structure
325 declaration even if GC is not enabled. */
326
327struct va_gc
328{
329 /* Use vl_embed as the default layout for GC vectors. Due to GTY
330 limitations, GC vectors must always be pointers, so it is more
331 efficient to use a pointer to the vl_embed layout, rather than
332 using a pointer to a pointer as would be the case with vl_ptr. */
333 typedef vl_embed default_layout;
334
335 template<typename T, typename A>
336 static void reserve (vec<T, A, vl_embed> *&, unsigned, bool
337 CXX_MEM_STAT_INFO);
338
339 template<typename T, typename A>
340 static void release (vec<T, A, vl_embed> *&v);
341};
342
343
344/* Free GC memory used by V and reset V to NULL. */
345
346template<typename T, typename A>
347inline void
348va_gc::release (vec<T, A, vl_embed> *&v)
349{
350 if (v)
351 ::ggc_free (v);
352 v = NULLnullptr;
353}
354
355
356/* Allocator for GC memory. Ensure there are at least RESERVE free
357 slots in V. If EXACT is true, grow exactly, else grow
358 exponentially. As a special case, if the vector had not been
359 allocated and RESERVE is 0, no vector will be created. */
360
361template<typename T, typename A>
362void
363va_gc::reserve (vec<T, A, vl_embed> *&v, unsigned reserve, bool exact
364 MEM_STAT_DECL)
365{
366 unsigned alloc
367 = vec_prefix::calculate_allocation (v
19.1
'v' is non-null
19.1
'v' is non-null
19.1
'v' is non-null
? &v->m_vecpfx : 0, reserve, exact);
20
'?' condition is true
368 if (!alloc)
21
Assuming 'alloc' is 0
22
Taking true branch
369 {
370 ::ggc_free (v);
371 v = NULLnullptr;
23
Null pointer value stored to field 'larray'
372 return;
373 }
374
375 /* Calculate the amount of space we want. */
376 size_t size = vec<T, A, vl_embed>::embedded_size (alloc);
377
378 /* Ask the allocator how much space it will really give us. */
379 size = ::ggc_round_alloc_size (size);
380
381 /* Adjust the number of slots accordingly. */
382 size_t vec_offset = sizeof (vec_prefix);
383 size_t elt_size = sizeof (T);
384 alloc = (size - vec_offset) / elt_size;
385
386 /* And finally, recalculate the amount of space we ask for. */
387 size = vec_offset + alloc * elt_size;
388
389 unsigned nelem = v ? v->length () : 0;
390 v = static_cast <vec<T, A, vl_embed> *> (::ggc_realloc (v, size
391 PASS_MEM_STAT));
392 v->embedded_init (alloc, nelem);
393}
394
395
396/* Allocator type for GC vectors. This is for vectors of types
397 atomics w.r.t. collection, so allocation and deallocation is
398 completely inherited from va_gc. */
399struct va_gc_atomic : va_gc
400{
401};
402
403
404/* Generic vector template. Default values for A and L indicate the
405 most commonly used strategies.
406
407 FIXME - Ideally, they would all be vl_ptr to encourage using regular
408 instances for vectors, but the existing GTY machinery is limited
409 in that it can only deal with GC objects that are pointers
410 themselves.
411
412 This means that vector operations that need to deal with
413 potentially NULL pointers, must be provided as free
414 functions (see the vec_safe_* functions above). */
415template<typename T,
416 typename A = va_heap,
417 typename L = typename A::default_layout>
418struct GTY((user)) vec
419{
420};
421
422/* Allow C++11 range-based 'for' to work directly on vec<T>*. */
423template<typename T, typename A, typename L>
424T* begin (vec<T,A,L> *v) { return v ? v->begin () : nullptr; }
425template<typename T, typename A, typename L>
426T* end (vec<T,A,L> *v) { return v ? v->end () : nullptr; }
427template<typename T, typename A, typename L>
428const T* begin (const vec<T,A,L> *v) { return v ? v->begin () : nullptr; }
429template<typename T, typename A, typename L>
430const T* end (const vec<T,A,L> *v) { return v ? v->end () : nullptr; }
431
432/* Generic vec<> debug helpers.
433
434 These need to be instantiated for each vec<TYPE> used throughout
435 the compiler like this:
436
437 DEFINE_DEBUG_VEC (TYPE)
438
439 The reason we have a debug_helper() is because GDB can't
440 disambiguate a plain call to debug(some_vec), and it must be called
441 like debug<TYPE>(some_vec). */
442
443template<typename T>
444void
445debug_helper (vec<T> &ref)
446{
447 unsigned i;
448 for (i = 0; i < ref.length (); ++i)
449 {
450 fprintf (stderrstderr, "[%d] = ", i);
451 debug_slim (ref[i]);
452 fputc ('\n', stderrstderr);
453 }
454}
455
456/* We need a separate va_gc variant here because default template
457 argument for functions cannot be used in c++-98. Once this
458 restriction is removed, those variant should be folded with the
459 above debug_helper. */
460
461template<typename T>
462void
463debug_helper (vec<T, va_gc> &ref)
464{
465 unsigned i;
466 for (i = 0; i < ref.length (); ++i)
467 {
468 fprintf (stderrstderr, "[%d] = ", i);
469 debug_slim (ref[i]);
470 fputc ('\n', stderrstderr);
471 }
472}
473
474/* Macro to define debug(vec<T>) and debug(vec<T, va_gc>) helper
475 functions for a type T. */
476
477#define DEFINE_DEBUG_VEC(T)template void debug_helper (vec<T> &); template void
debug_helper (vec<T, va_gc> &); __attribute__ ((__used__
)) void debug (vec<T> &ref) { debug_helper <T>
(ref); } __attribute__ ((__used__)) void debug (vec<T>
*ptr) { if (ptr) debug (*ptr); else fprintf (stderr, "<nil>\n"
); } __attribute__ ((__used__)) void debug (vec<T, va_gc>
&ref) { debug_helper <T> (ref); } __attribute__ ((
__used__)) void debug (vec<T, va_gc> *ptr) { if (ptr) debug
(*ptr); else fprintf (stderr, "<nil>\n"); }
\
478 template void debug_helper (vec<T> &); \
479 template void debug_helper (vec<T, va_gc> &); \
480 /* Define the vec<T> debug functions. */ \
481 DEBUG_FUNCTION__attribute__ ((__used__)) void \
482 debug (vec<T> &ref) \
483 { \
484 debug_helper <T> (ref); \
485 } \
486 DEBUG_FUNCTION__attribute__ ((__used__)) void \
487 debug (vec<T> *ptr) \
488 { \
489 if (ptr) \
490 debug (*ptr); \
491 else \
492 fprintf (stderrstderr, "<nil>\n"); \
493 } \
494 /* Define the vec<T, va_gc> debug functions. */ \
495 DEBUG_FUNCTION__attribute__ ((__used__)) void \
496 debug (vec<T, va_gc> &ref) \
497 { \
498 debug_helper <T> (ref); \
499 } \
500 DEBUG_FUNCTION__attribute__ ((__used__)) void \
501 debug (vec<T, va_gc> *ptr) \
502 { \
503 if (ptr) \
504 debug (*ptr); \
505 else \
506 fprintf (stderrstderr, "<nil>\n"); \
507 }
508
509/* Default-construct N elements in DST. */
510
511template <typename T>
512inline void
513vec_default_construct (T *dst, unsigned n)
514{
515#ifdef BROKEN_VALUE_INITIALIZATION
516 /* Versions of GCC before 4.4 sometimes leave certain objects
517 uninitialized when value initialized, though if the type has
518 user defined default ctor, that ctor is invoked. As a workaround
519 perform clearing first and then the value initialization, which
520 fixes the case when value initialization doesn't initialize due to
521 the bugs and should initialize to all zeros, but still allows
522 vectors for types with user defined default ctor that initializes
523 some or all elements to non-zero. If T has no user defined
524 default ctor and some non-static data members have user defined
525 default ctors that initialize to non-zero the workaround will
526 still not work properly; in that case we just need to provide
527 user defined default ctor. */
528 memset (dst, '\0', sizeof (T) * n);
529#endif
530 for ( ; n; ++dst, --n)
531 ::new (static_cast<void*>(dst)) T ();
532}
533
534/* Copy-construct N elements in DST from *SRC. */
535
536template <typename T>
537inline void
538vec_copy_construct (T *dst, const T *src, unsigned n)
539{
540 for ( ; n; ++dst, ++src, --n)
541 ::new (static_cast<void*>(dst)) T (*src);
542}
543
544/* Type to provide zero-initialized values for vec<T, A, L>. This is
545 used to provide nil initializers for vec instances. Since vec must
546 be a trivially copyable type that can be copied by memcpy and zeroed
547 out by memset, it must have defaulted default and copy ctor and copy
548 assignment. To initialize a vec either use value initialization
549 (e.g., vec() or vec v{ };) or assign it the value vNULL. This isn't
550 needed for file-scope and function-local static vectors, which are
551 zero-initialized by default. */
552struct vnull { };
553constexpr vnull vNULL{ };
554
555
556/* Embeddable vector. These vectors are suitable to be embedded
557 in other data structures so that they can be pre-allocated in a
558 contiguous memory block.
559
560 Embeddable vectors are implemented using the trailing array idiom,
561 thus they are not resizeable without changing the address of the
562 vector object itself. This means you cannot have variables or
563 fields of embeddable vector type -- always use a pointer to a
564 vector. The one exception is the final field of a structure, which
565 could be a vector type.
566
567 You will have to use the embedded_size & embedded_init calls to
568 create such objects, and they will not be resizeable (so the 'safe'
569 allocation variants are not available).
570
571 Properties:
572
573 - The whole vector and control data are allocated in a single
574 contiguous block. It uses the trailing-vector idiom, so
575 allocation must reserve enough space for all the elements
576 in the vector plus its control data.
577 - The vector cannot be re-allocated.
578 - The vector cannot grow nor shrink.
579 - No indirections needed for access/manipulation.
580 - It requires 2 words of storage (prior to vector allocation). */
581
582template<typename T, typename A>
583struct GTY((user)) vec<T, A, vl_embed>
584{
585public:
586 unsigned allocated (void) const { return m_vecpfx.m_alloc; }
587 unsigned length (void) const { return m_vecpfx.m_num; }
588 bool is_empty (void) const { return m_vecpfx.m_num == 0; }
589 T *address (void) { return m_vecdata; }
590 const T *address (void) const { return m_vecdata; }
591 T *begin () { return address (); }
592 const T *begin () const { return address (); }
593 T *end () { return address () + length (); }
594 const T *end () const { return address () + length (); }
595 const T &operator[] (unsigned) const;
596 T &operator[] (unsigned);
597 T &last (void);
598 bool space (unsigned) const;
599 bool iterate (unsigned, T *) const;
600 bool iterate (unsigned, T **) const;
601 vec *copy (ALONE_CXX_MEM_STAT_INFO) const;
602 void splice (const vec &);
603 void splice (const vec *src);
604 T *quick_push (const T &);
605 T &pop (void);
606 void truncate (unsigned);
607 void quick_insert (unsigned, const T &);
608 void ordered_remove (unsigned);
609 void unordered_remove (unsigned);
610 void block_remove (unsigned, unsigned);
611 void qsort (int (*) (const void *, const void *))qsort (int (*) (const void *, const void *));
612 void sort (int (*) (const void *, const void *, void *), void *);
613 void stablesort (int (*) (const void *, const void *, void *), void *);
614 T *bsearch (const void *key, int (*compar)(const void *, const void *));
615 T *bsearch (const void *key,
616 int (*compar)(const void *, const void *, void *), void *);
617 unsigned lower_bound (T, bool (*)(const T &, const T &)) const;
618 bool contains (const T &search) const;
619 static size_t embedded_size (unsigned);
620 void embedded_init (unsigned, unsigned = 0, unsigned = 0);
621 void quick_grow (unsigned len);
622 void quick_grow_cleared (unsigned len);
623
624 /* vec class can access our internal data and functions. */
625 template <typename, typename, typename> friend struct vec;
626
627 /* The allocator types also need access to our internals. */
628 friend struct va_gc;
629 friend struct va_gc_atomic;
630 friend struct va_heap;
631
632 /* FIXME - These fields should be private, but we need to cater to
633 compilers that have stricter notions of PODness for types. */
634 vec_prefix m_vecpfx;
635 T m_vecdata[1];
636};
637
638
639/* Convenience wrapper functions to use when dealing with pointers to
640 embedded vectors. Some functionality for these vectors must be
641 provided via free functions for these reasons:
642
643 1- The pointer may be NULL (e.g., before initial allocation).
644
645 2- When the vector needs to grow, it must be reallocated, so
646 the pointer will change its value.
647
648 Because of limitations with the current GC machinery, all vectors
649 in GC memory *must* be pointers. */
650
651
652/* If V contains no room for NELEMS elements, return false. Otherwise,
653 return true. */
654template<typename T, typename A>
655inline bool
656vec_safe_space (const vec<T, A, vl_embed> *v, unsigned nelems)
657{
658 return v ? v->space (nelems) : nelems == 0;
659}
660
661
662/* If V is NULL, return 0. Otherwise, return V->length(). */
663template<typename T, typename A>
664inline unsigned
665vec_safe_length (const vec<T, A, vl_embed> *v)
666{
667 return v ? v->length () : 0;
9
Assuming 'v' is non-null
10
'?' condition is true
668}
669
670
671/* If V is NULL, return NULL. Otherwise, return V->address(). */
672template<typename T, typename A>
673inline T *
674vec_safe_address (vec<T, A, vl_embed> *v)
675{
676 return v ? v->address () : NULLnullptr;
677}
678
679
680/* If V is NULL, return true. Otherwise, return V->is_empty(). */
681template<typename T, typename A>
682inline bool
683vec_safe_is_empty (vec<T, A, vl_embed> *v)
684{
685 return v ? v->is_empty () : true;
686}
687
688/* If V does not have space for NELEMS elements, call
689 V->reserve(NELEMS, EXACT). */
690template<typename T, typename A>
691inline bool
692vec_safe_reserve (vec<T, A, vl_embed> *&v, unsigned nelems, bool exact = false
693 CXX_MEM_STAT_INFO)
694{
695 bool extend = nelems
15.1
'nelems' is 1
15.1
'nelems' is 1
15.1
'nelems' is 1
? !vec_safe_space (v, nelems) : false;
16
'?' condition is true
17
Assuming the condition is true
696 if (extend
17.1
'extend' is true
17.1
'extend' is true
17.1
'extend' is true
)
18
Taking true branch
697 A::reserve (v, nelems, exact PASS_MEM_STAT);
19
Calling 'va_gc::reserve'
24
Returning from 'va_gc::reserve'
698 return extend;
699}
700
701template<typename T, typename A>
702inline bool
703vec_safe_reserve_exact (vec<T, A, vl_embed> *&v, unsigned nelems
704 CXX_MEM_STAT_INFO)
705{
706 return vec_safe_reserve (v, nelems, true PASS_MEM_STAT);
707}
708
709
710/* Allocate GC memory for V with space for NELEMS slots. If NELEMS
711 is 0, V is initialized to NULL. */
712
713template<typename T, typename A>
714inline void
715vec_alloc (vec<T, A, vl_embed> *&v, unsigned nelems CXX_MEM_STAT_INFO)
716{
717 v = NULLnullptr;
718 vec_safe_reserve (v, nelems, false PASS_MEM_STAT);
719}
720
721
722/* Free the GC memory allocated by vector V and set it to NULL. */
723
724template<typename T, typename A>
725inline void
726vec_free (vec<T, A, vl_embed> *&v)
727{
728 A::release (v);
729}
730
731
732/* Grow V to length LEN. Allocate it, if necessary. */
733template<typename T, typename A>
734inline void
735vec_safe_grow (vec<T, A, vl_embed> *&v, unsigned len,
736 bool exact = false CXX_MEM_STAT_INFO)
737{
738 unsigned oldlen = vec_safe_length (v);
739 gcc_checking_assert (len >= oldlen)((void)(!(len >= oldlen) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 739, __FUNCTION__), 0 : 0))
;
740 vec_safe_reserve (v, len - oldlen, exact PASS_MEM_STAT);
741 v->quick_grow (len);
742}
743
744
745/* If V is NULL, allocate it. Call V->safe_grow_cleared(LEN). */
746template<typename T, typename A>
747inline void
748vec_safe_grow_cleared (vec<T, A, vl_embed> *&v, unsigned len,
749 bool exact = false CXX_MEM_STAT_INFO)
750{
751 unsigned oldlen = vec_safe_length (v);
752 vec_safe_grow (v, len, exact PASS_MEM_STAT);
753 vec_default_construct (v->address () + oldlen, len - oldlen);
754}
755
756
757/* Assume V is not NULL. */
758
759template<typename T>
760inline void
761vec_safe_grow_cleared (vec<T, va_heap, vl_ptr> *&v,
762 unsigned len, bool exact = false CXX_MEM_STAT_INFO)
763{
764 v->safe_grow_cleared (len, exact PASS_MEM_STAT);
765}
766
767/* If V does not have space for NELEMS elements, call
768 V->reserve(NELEMS, EXACT). */
769
770template<typename T>
771inline bool
772vec_safe_reserve (vec<T, va_heap, vl_ptr> *&v, unsigned nelems, bool exact = false
773 CXX_MEM_STAT_INFO)
774{
775 return v->reserve (nelems, exact);
776}
777
778
779/* If V is NULL return false, otherwise return V->iterate(IX, PTR). */
780template<typename T, typename A>
781inline bool
782vec_safe_iterate (const vec<T, A, vl_embed> *v, unsigned ix, T **ptr)
783{
784 if (v)
785 return v->iterate (ix, ptr);
786 else
787 {
788 *ptr = 0;
789 return false;
790 }
791}
792
793template<typename T, typename A>
794inline bool
795vec_safe_iterate (const vec<T, A, vl_embed> *v, unsigned ix, T *ptr)
796{
797 if (v)
798 return v->iterate (ix, ptr);
799 else
800 {
801 *ptr = 0;
802 return false;
803 }
804}
805
806
807/* If V has no room for one more element, reallocate it. Then call
808 V->quick_push(OBJ). */
809template<typename T, typename A>
810inline T *
811vec_safe_push (vec<T, A, vl_embed> *&v, const T &obj CXX_MEM_STAT_INFO)
812{
813 vec_safe_reserve (v, 1, false PASS_MEM_STAT);
15
Calling 'vec_safe_reserve<loop *, va_gc>'
25
Returning from 'vec_safe_reserve<loop *, va_gc>'
814 return v->quick_push (obj);
26
Called C++ object pointer is null
815}
816
817
818/* if V has no room for one more element, reallocate it. Then call
819 V->quick_insert(IX, OBJ). */
820template<typename T, typename A>
821inline void
822vec_safe_insert (vec<T, A, vl_embed> *&v, unsigned ix, const T &obj
823 CXX_MEM_STAT_INFO)
824{
825 vec_safe_reserve (v, 1, false PASS_MEM_STAT);
826 v->quick_insert (ix, obj);
827}
828
829
830/* If V is NULL, do nothing. Otherwise, call V->truncate(SIZE). */
831template<typename T, typename A>
832inline void
833vec_safe_truncate (vec<T, A, vl_embed> *v, unsigned size)
834{
835 if (v)
836 v->truncate (size);
837}
838
839
840/* If SRC is not NULL, return a pointer to a copy of it. */
841template<typename T, typename A>
842inline vec<T, A, vl_embed> *
843vec_safe_copy (vec<T, A, vl_embed> *src CXX_MEM_STAT_INFO)
844{
845 return src ? src->copy (ALONE_PASS_MEM_STAT) : NULLnullptr;
846}
847
848/* Copy the elements from SRC to the end of DST as if by memcpy.
849 Reallocate DST, if necessary. */
850template<typename T, typename A>
851inline void
852vec_safe_splice (vec<T, A, vl_embed> *&dst, const vec<T, A, vl_embed> *src
853 CXX_MEM_STAT_INFO)
854{
855 unsigned src_len = vec_safe_length (src);
856 if (src_len)
857 {
858 vec_safe_reserve_exact (dst, vec_safe_length (dst) + src_len
859 PASS_MEM_STAT);
860 dst->splice (*src);
861 }
862}
863
864/* Return true if SEARCH is an element of V. Note that this is O(N) in the
865 size of the vector and so should be used with care. */
866
867template<typename T, typename A>
868inline bool
869vec_safe_contains (vec<T, A, vl_embed> *v, const T &search)
870{
871 return v ? v->contains (search) : false;
872}
873
874/* Index into vector. Return the IX'th element. IX must be in the
875 domain of the vector. */
876
877template<typename T, typename A>
878inline const T &
879vec<T, A, vl_embed>::operator[] (unsigned ix) const
880{
881 gcc_checking_assert (ix < m_vecpfx.m_num)((void)(!(ix < m_vecpfx.m_num) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 881, __FUNCTION__), 0 : 0))
;
882 return m_vecdata[ix];
883}
884
885template<typename T, typename A>
886inline T &
887vec<T, A, vl_embed>::operator[] (unsigned ix)
888{
889 gcc_checking_assert (ix < m_vecpfx.m_num)((void)(!(ix < m_vecpfx.m_num) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 889, __FUNCTION__), 0 : 0))
;
890 return m_vecdata[ix];
891}
892
893
894/* Get the final element of the vector, which must not be empty. */
895
896template<typename T, typename A>
897inline T &
898vec<T, A, vl_embed>::last (void)
899{
900 gcc_checking_assert (m_vecpfx.m_num > 0)((void)(!(m_vecpfx.m_num > 0) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 900, __FUNCTION__), 0 : 0))
;
901 return (*this)[m_vecpfx.m_num - 1];
902}
903
904
905/* If this vector has space for NELEMS additional entries, return
906 true. You usually only need to use this if you are doing your
907 own vector reallocation, for instance on an embedded vector. This
908 returns true in exactly the same circumstances that vec::reserve
909 will. */
910
911template<typename T, typename A>
912inline bool
913vec<T, A, vl_embed>::space (unsigned nelems) const
914{
915 return m_vecpfx.m_alloc - m_vecpfx.m_num >= nelems;
916}
917
918
919/* Return iteration condition and update PTR to point to the IX'th
920 element of this vector. Use this to iterate over the elements of a
921 vector as follows,
922
923 for (ix = 0; vec<T, A>::iterate (v, ix, &ptr); ix++)
924 continue; */
925
926template<typename T, typename A>
927inline bool
928vec<T, A, vl_embed>::iterate (unsigned ix, T *ptr) const
929{
930 if (ix < m_vecpfx.m_num)
931 {
932 *ptr = m_vecdata[ix];
933 return true;
934 }
935 else
936 {
937 *ptr = 0;
938 return false;
939 }
940}
941
942
943/* Return iteration condition and update *PTR to point to the
944 IX'th element of this vector. Use this to iterate over the
945 elements of a vector as follows,
946
947 for (ix = 0; v->iterate (ix, &ptr); ix++)
948 continue;
949
950 This variant is for vectors of objects. */
951
952template<typename T, typename A>
953inline bool
954vec<T, A, vl_embed>::iterate (unsigned ix, T **ptr) const
955{
956 if (ix < m_vecpfx.m_num)
957 {
958 *ptr = CONST_CAST (T *, &m_vecdata[ix])(const_cast<T *> ((&m_vecdata[ix])));
959 return true;
960 }
961 else
962 {
963 *ptr = 0;
964 return false;
965 }
966}
967
968
969/* Return a pointer to a copy of this vector. */
970
971template<typename T, typename A>
972inline vec<T, A, vl_embed> *
973vec<T, A, vl_embed>::copy (ALONE_MEM_STAT_DECLvoid) const
974{
975 vec<T, A, vl_embed> *new_vec = NULLnullptr;
976 unsigned len = length ();
977 if (len)
978 {
979 vec_alloc (new_vec, len PASS_MEM_STAT);
980 new_vec->embedded_init (len, len);
981 vec_copy_construct (new_vec->address (), m_vecdata, len);
982 }
983 return new_vec;
984}
985
986
987/* Copy the elements from SRC to the end of this vector as if by memcpy.
988 The vector must have sufficient headroom available. */
989
990template<typename T, typename A>
991inline void
992vec<T, A, vl_embed>::splice (const vec<T, A, vl_embed> &src)
993{
994 unsigned len = src.length ();
995 if (len)
996 {
997 gcc_checking_assert (space (len))((void)(!(space (len)) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 997, __FUNCTION__), 0 : 0))
;
998 vec_copy_construct (end (), src.address (), len);
999 m_vecpfx.m_num += len;
1000 }
1001}
1002
1003template<typename T, typename A>
1004inline void
1005vec<T, A, vl_embed>::splice (const vec<T, A, vl_embed> *src)
1006{
1007 if (src)
1008 splice (*src);
1009}
1010
1011
1012/* Push OBJ (a new element) onto the end of the vector. There must be
1013 sufficient space in the vector. Return a pointer to the slot
1014 where OBJ was inserted. */
1015
1016template<typename T, typename A>
1017inline T *
1018vec<T, A, vl_embed>::quick_push (const T &obj)
1019{
1020 gcc_checking_assert (space (1))((void)(!(space (1)) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1020, __FUNCTION__), 0 : 0))
;
1021 T *slot = &m_vecdata[m_vecpfx.m_num++];
1022 *slot = obj;
1023 return slot;
1024}
1025
1026
1027/* Pop and return the last element off the end of the vector. */
1028
1029template<typename T, typename A>
1030inline T &
1031vec<T, A, vl_embed>::pop (void)
1032{
1033 gcc_checking_assert (length () > 0)((void)(!(length () > 0) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1033, __FUNCTION__), 0 : 0))
;
1034 return m_vecdata[--m_vecpfx.m_num];
1035}
1036
1037
1038/* Set the length of the vector to SIZE. The new length must be less
1039 than or equal to the current length. This is an O(1) operation. */
1040
1041template<typename T, typename A>
1042inline void
1043vec<T, A, vl_embed>::truncate (unsigned size)
1044{
1045 gcc_checking_assert (length () >= size)((void)(!(length () >= size) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1045, __FUNCTION__), 0 : 0))
;
1046 m_vecpfx.m_num = size;
1047}
1048
1049
1050/* Insert an element, OBJ, at the IXth position of this vector. There
1051 must be sufficient space. */
1052
1053template<typename T, typename A>
1054inline void
1055vec<T, A, vl_embed>::quick_insert (unsigned ix, const T &obj)
1056{
1057 gcc_checking_assert (length () < allocated ())((void)(!(length () < allocated ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1057, __FUNCTION__), 0 : 0))
;
1058 gcc_checking_assert (ix <= length ())((void)(!(ix <= length ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1058, __FUNCTION__), 0 : 0))
;
1059 T *slot = &m_vecdata[ix];
1060 memmove (slot + 1, slot, (m_vecpfx.m_num++ - ix) * sizeof (T));
1061 *slot = obj;
1062}
1063
1064
1065/* Remove an element from the IXth position of this vector. Ordering of
1066 remaining elements is preserved. This is an O(N) operation due to
1067 memmove. */
1068
1069template<typename T, typename A>
1070inline void
1071vec<T, A, vl_embed>::ordered_remove (unsigned ix)
1072{
1073 gcc_checking_assert (ix < length ())((void)(!(ix < length ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1073, __FUNCTION__), 0 : 0))
;
1074 T *slot = &m_vecdata[ix];
1075 memmove (slot, slot + 1, (--m_vecpfx.m_num - ix) * sizeof (T));
1076}
1077
1078
1079/* Remove elements in [START, END) from VEC for which COND holds. Ordering of
1080 remaining elements is preserved. This is an O(N) operation. */
1081
1082#define VEC_ORDERED_REMOVE_IF_FROM_TO(vec, read_index, write_index, \{ ((void)(!((end) <= (vec).length ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1083, __FUNCTION__), 0 : 0)); for (read_index = write_index
= (start); read_index < (end); ++read_index) { elem_ptr =
&(vec)[read_index]; bool remove_p = (cond); if (remove_p
) continue; if (read_index != write_index) (vec)[write_index]
= (vec)[read_index]; write_index++; } if (read_index - write_index
> 0) (vec).block_remove (write_index, read_index - write_index
); }
1083 elem_ptr, start, end, cond){ ((void)(!((end) <= (vec).length ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1083, __FUNCTION__), 0 : 0)); for (read_index = write_index
= (start); read_index < (end); ++read_index) { elem_ptr =
&(vec)[read_index]; bool remove_p = (cond); if (remove_p
) continue; if (read_index != write_index) (vec)[write_index]
= (vec)[read_index]; write_index++; } if (read_index - write_index
> 0) (vec).block_remove (write_index, read_index - write_index
); }
\
1084 { \
1085 gcc_assert ((end) <= (vec).length ())((void)(!((end) <= (vec).length ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1085, __FUNCTION__), 0 : 0))
; \
1086 for (read_index = write_index = (start); read_index < (end); \
1087 ++read_index) \
1088 { \
1089 elem_ptr = &(vec)[read_index]; \
1090 bool remove_p = (cond); \
1091 if (remove_p) \
1092 continue; \
1093 \
1094 if (read_index != write_index) \
1095 (vec)[write_index] = (vec)[read_index]; \
1096 \
1097 write_index++; \
1098 } \
1099 \
1100 if (read_index - write_index > 0) \
1101 (vec).block_remove (write_index, read_index - write_index); \
1102 }
1103
1104
1105/* Remove elements from VEC for which COND holds. Ordering of remaining
1106 elements is preserved. This is an O(N) operation. */
1107
1108#define VEC_ORDERED_REMOVE_IF(vec, read_index, write_index, elem_ptr, \{ ((void)(!(((vec).length ()) <= ((vec)).length ()) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1109, __FUNCTION__), 0 : 0)); for (read_index = write_index
= (0); read_index < ((vec).length ()); ++read_index) { elem_ptr
= &((vec))[read_index]; bool remove_p = ((cond)); if (remove_p
) continue; if (read_index != write_index) ((vec))[write_index
] = ((vec))[read_index]; write_index++; } if (read_index - write_index
> 0) ((vec)).block_remove (write_index, read_index - write_index
); }
1109 cond){ ((void)(!(((vec).length ()) <= ((vec)).length ()) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1109, __FUNCTION__), 0 : 0)); for (read_index = write_index
= (0); read_index < ((vec).length ()); ++read_index) { elem_ptr
= &((vec))[read_index]; bool remove_p = ((cond)); if (remove_p
) continue; if (read_index != write_index) ((vec))[write_index
] = ((vec))[read_index]; write_index++; } if (read_index - write_index
> 0) ((vec)).block_remove (write_index, read_index - write_index
); }
\
1110 VEC_ORDERED_REMOVE_IF_FROM_TO ((vec), read_index, write_index, \{ ((void)(!(((vec).length ()) <= ((vec)).length ()) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1111, __FUNCTION__), 0 : 0)); for (read_index = write_index
= (0); read_index < ((vec).length ()); ++read_index) { elem_ptr
= &((vec))[read_index]; bool remove_p = ((cond)); if (remove_p
) continue; if (read_index != write_index) ((vec))[write_index
] = ((vec))[read_index]; write_index++; } if (read_index - write_index
> 0) ((vec)).block_remove (write_index, read_index - write_index
); }
1111 elem_ptr, 0, (vec).length (), (cond)){ ((void)(!(((vec).length ()) <= ((vec)).length ()) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1111, __FUNCTION__), 0 : 0)); for (read_index = write_index
= (0); read_index < ((vec).length ()); ++read_index) { elem_ptr
= &((vec))[read_index]; bool remove_p = ((cond)); if (remove_p
) continue; if (read_index != write_index) ((vec))[write_index
] = ((vec))[read_index]; write_index++; } if (read_index - write_index
> 0) ((vec)).block_remove (write_index, read_index - write_index
); }
1112
1113/* Remove an element from the IXth position of this vector. Ordering of
1114 remaining elements is destroyed. This is an O(1) operation. */
1115
1116template<typename T, typename A>
1117inline void
1118vec<T, A, vl_embed>::unordered_remove (unsigned ix)
1119{
1120 gcc_checking_assert (ix < length ())((void)(!(ix < length ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1120, __FUNCTION__), 0 : 0))
;
1121 m_vecdata[ix] = m_vecdata[--m_vecpfx.m_num];
1122}
1123
1124
1125/* Remove LEN elements starting at the IXth. Ordering is retained.
1126 This is an O(N) operation due to memmove. */
1127
1128template<typename T, typename A>
1129inline void
1130vec<T, A, vl_embed>::block_remove (unsigned ix, unsigned len)
1131{
1132 gcc_checking_assert (ix + len <= length ())((void)(!(ix + len <= length ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1132, __FUNCTION__), 0 : 0))
;
1133 T *slot = &m_vecdata[ix];
1134 m_vecpfx.m_num -= len;
1135 memmove (slot, slot + len, (m_vecpfx.m_num - ix) * sizeof (T));
1136}
1137
1138
1139/* Sort the contents of this vector with qsort. CMP is the comparison
1140 function to pass to qsort. */
1141
1142template<typename T, typename A>
1143inline void
1144vec<T, A, vl_embed>::qsort (int (*cmp) (const void *, const void *))qsort (int (*cmp) (const void *, const void *))
1145{
1146 if (length () > 1)
1147 gcc_qsort (address (), length (), sizeof (T), cmp);
1148}
1149
1150/* Sort the contents of this vector with qsort. CMP is the comparison
1151 function to pass to qsort. */
1152
1153template<typename T, typename A>
1154inline void
1155vec<T, A, vl_embed>::sort (int (*cmp) (const void *, const void *, void *),
1156 void *data)
1157{
1158 if (length () > 1)
1159 gcc_sort_r (address (), length (), sizeof (T), cmp, data);
1160}
1161
1162/* Sort the contents of this vector with gcc_stablesort_r. CMP is the
1163 comparison function to pass to qsort. */
1164
1165template<typename T, typename A>
1166inline void
1167vec<T, A, vl_embed>::stablesort (int (*cmp) (const void *, const void *,
1168 void *), void *data)
1169{
1170 if (length () > 1)
1171 gcc_stablesort_r (address (), length (), sizeof (T), cmp, data);
1172}
1173
1174/* Search the contents of the sorted vector with a binary search.
1175 CMP is the comparison function to pass to bsearch. */
1176
1177template<typename T, typename A>
1178inline T *
1179vec<T, A, vl_embed>::bsearch (const void *key,
1180 int (*compar) (const void *, const void *))
1181{
1182 const void *base = this->address ();
1183 size_t nmemb = this->length ();
1184 size_t size = sizeof (T);
1185 /* The following is a copy of glibc stdlib-bsearch.h. */
1186 size_t l, u, idx;
1187 const void *p;
1188 int comparison;
1189
1190 l = 0;
1191 u = nmemb;
1192 while (l < u)
1193 {
1194 idx = (l + u) / 2;
1195 p = (const void *) (((const char *) base) + (idx * size));
1196 comparison = (*compar) (key, p);
1197 if (comparison < 0)
1198 u = idx;
1199 else if (comparison > 0)
1200 l = idx + 1;
1201 else
1202 return (T *)const_cast<void *>(p);
1203 }
1204
1205 return NULLnullptr;
1206}
1207
1208/* Search the contents of the sorted vector with a binary search.
1209 CMP is the comparison function to pass to bsearch. */
1210
1211template<typename T, typename A>
1212inline T *
1213vec<T, A, vl_embed>::bsearch (const void *key,
1214 int (*compar) (const void *, const void *,
1215 void *), void *data)
1216{
1217 const void *base = this->address ();
1218 size_t nmemb = this->length ();
1219 size_t size = sizeof (T);
1220 /* The following is a copy of glibc stdlib-bsearch.h. */
1221 size_t l, u, idx;
1222 const void *p;
1223 int comparison;
1224
1225 l = 0;
1226 u = nmemb;
1227 while (l < u)
1228 {
1229 idx = (l + u) / 2;
1230 p = (const void *) (((const char *) base) + (idx * size));
1231 comparison = (*compar) (key, p, data);
1232 if (comparison < 0)
1233 u = idx;
1234 else if (comparison > 0)
1235 l = idx + 1;
1236 else
1237 return (T *)const_cast<void *>(p);
1238 }
1239
1240 return NULLnullptr;
1241}
1242
1243/* Return true if SEARCH is an element of V. Note that this is O(N) in the
1244 size of the vector and so should be used with care. */
1245
1246template<typename T, typename A>
1247inline bool
1248vec<T, A, vl_embed>::contains (const T &search) const
1249{
1250 unsigned int len = length ();
1251 for (unsigned int i = 0; i < len; i++)
1252 if ((*this)[i] == search)
1253 return true;
1254
1255 return false;
1256}
1257
1258/* Find and return the first position in which OBJ could be inserted
1259 without changing the ordering of this vector. LESSTHAN is a
1260 function that returns true if the first argument is strictly less
1261 than the second. */
1262
1263template<typename T, typename A>
1264unsigned
1265vec<T, A, vl_embed>::lower_bound (T obj, bool (*lessthan)(const T &, const T &))
1266 const
1267{
1268 unsigned int len = length ();
1269 unsigned int half, middle;
1270 unsigned int first = 0;
1271 while (len > 0)
1272 {
1273 half = len / 2;
1274 middle = first;
1275 middle += half;
1276 T middle_elem = (*this)[middle];
1277 if (lessthan (middle_elem, obj))
1278 {
1279 first = middle;
1280 ++first;
1281 len = len - half - 1;
1282 }
1283 else
1284 len = half;
1285 }
1286 return first;
1287}
1288
1289
1290/* Return the number of bytes needed to embed an instance of an
1291 embeddable vec inside another data structure.
1292
1293 Use these methods to determine the required size and initialization
1294 of a vector V of type T embedded within another structure (as the
1295 final member):
1296
1297 size_t vec<T, A, vl_embed>::embedded_size (unsigned alloc);
1298 void v->embedded_init (unsigned alloc, unsigned num);
1299
1300 These allow the caller to perform the memory allocation. */
1301
1302template<typename T, typename A>
1303inline size_t
1304vec<T, A, vl_embed>::embedded_size (unsigned alloc)
1305{
1306 struct alignas (T) U { char data[sizeof (T)]; };
1307 typedef vec<U, A, vl_embed> vec_embedded;
1308 typedef typename std::conditional<std::is_standard_layout<T>::value,
1309 vec, vec_embedded>::type vec_stdlayout;
1310 static_assert (sizeof (vec_stdlayout) == sizeof (vec), "");
1311 static_assert (alignof (vec_stdlayout) == alignof (vec), "");
1312 return offsetof (vec_stdlayout, m_vecdata)__builtin_offsetof(vec_stdlayout, m_vecdata) + alloc * sizeof (T);
1313}
1314
1315
1316/* Initialize the vector to contain room for ALLOC elements and
1317 NUM active elements. */
1318
1319template<typename T, typename A>
1320inline void
1321vec<T, A, vl_embed>::embedded_init (unsigned alloc, unsigned num, unsigned aut)
1322{
1323 m_vecpfx.m_alloc = alloc;
1324 m_vecpfx.m_using_auto_storage = aut;
1325 m_vecpfx.m_num = num;
1326}
1327
1328
1329/* Grow the vector to a specific length. LEN must be as long or longer than
1330 the current length. The new elements are uninitialized. */
1331
1332template<typename T, typename A>
1333inline void
1334vec<T, A, vl_embed>::quick_grow (unsigned len)
1335{
1336 gcc_checking_assert (length () <= len && len <= m_vecpfx.m_alloc)((void)(!(length () <= len && len <= m_vecpfx.m_alloc
) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1336, __FUNCTION__), 0 : 0))
;
1337 m_vecpfx.m_num = len;
1338}
1339
1340
1341/* Grow the vector to a specific length. LEN must be as long or longer than
1342 the current length. The new elements are initialized to zero. */
1343
1344template<typename T, typename A>
1345inline void
1346vec<T, A, vl_embed>::quick_grow_cleared (unsigned len)
1347{
1348 unsigned oldlen = length ();
1349 size_t growby = len - oldlen;
1350 quick_grow (len);
1351 if (growby != 0)
1352 vec_default_construct (address () + oldlen, growby);
1353}
1354
1355/* Garbage collection support for vec<T, A, vl_embed>. */
1356
1357template<typename T>
1358void
1359gt_ggc_mx (vec<T, va_gc> *v)
1360{
1361 extern void gt_ggc_mx (T &);
1362 for (unsigned i = 0; i < v->length (); i++)
1363 gt_ggc_mx ((*v)[i]);
1364}
1365
1366template<typename T>
1367void
1368gt_ggc_mx (vec<T, va_gc_atomic, vl_embed> *v ATTRIBUTE_UNUSED__attribute__ ((__unused__)))
1369{
1370 /* Nothing to do. Vectors of atomic types wrt GC do not need to
1371 be traversed. */
1372}
1373
1374
1375/* PCH support for vec<T, A, vl_embed>. */
1376
1377template<typename T, typename A>
1378void
1379gt_pch_nx (vec<T, A, vl_embed> *v)
1380{
1381 extern void gt_pch_nx (T &);
1382 for (unsigned i = 0; i < v->length (); i++)
1383 gt_pch_nx ((*v)[i]);
1384}
1385
1386template<typename T, typename A>
1387void
1388gt_pch_nx (vec<T *, A, vl_embed> *v, gt_pointer_operator op, void *cookie)
1389{
1390 for (unsigned i = 0; i < v->length (); i++)
1391 op (&((*v)[i]), cookie);
1392}
1393
1394template<typename T, typename A>
1395void
1396gt_pch_nx (vec<T, A, vl_embed> *v, gt_pointer_operator op, void *cookie)
1397{
1398 extern void gt_pch_nx (T *, gt_pointer_operator, void *);
1399 for (unsigned i = 0; i < v->length (); i++)
1400 gt_pch_nx (&((*v)[i]), op, cookie);
1401}
1402
1403
1404/* Space efficient vector. These vectors can grow dynamically and are
1405 allocated together with their control data. They are suited to be
1406 included in data structures. Prior to initial allocation, they
1407 only take a single word of storage.
1408
1409 These vectors are implemented as a pointer to an embeddable vector.
1410 The semantics allow for this pointer to be NULL to represent empty
1411 vectors. This way, empty vectors occupy minimal space in the
1412 structure containing them.
1413
1414 Properties:
1415
1416 - The whole vector and control data are allocated in a single
1417 contiguous block.
1418 - The whole vector may be re-allocated.
1419 - Vector data may grow and shrink.
1420 - Access and manipulation requires a pointer test and
1421 indirection.
1422 - It requires 1 word of storage (prior to vector allocation).
1423
1424
1425 Limitations:
1426
1427 These vectors must be PODs because they are stored in unions.
1428 (http://en.wikipedia.org/wiki/Plain_old_data_structures).
1429 As long as we use C++03, we cannot have constructors nor
1430 destructors in classes that are stored in unions. */
1431
1432template<typename T, size_t N = 0>
1433class auto_vec;
1434
1435template<typename T>
1436struct vec<T, va_heap, vl_ptr>
1437{
1438public:
1439 /* Default ctors to ensure triviality. Use value-initialization
1440 (e.g., vec() or vec v{ };) or vNULL to create a zero-initialized
1441 instance. */
1442 vec () = default;
1443 vec (const vec &) = default;
1444 /* Initialization from the generic vNULL. */
1445 vec (vnull): m_vec () { }
1446 /* Same as default ctor: vec storage must be released manually. */
1447 ~vec () = default;
1448
1449 /* Defaulted same as copy ctor. */
1450 vec& operator= (const vec &) = default;
1451
1452 /* Prevent implicit conversion from auto_vec. Use auto_vec::to_vec()
1453 instead. */
1454 template <size_t N>
1455 vec (auto_vec<T, N> &) = delete;
1456
1457 template <size_t N>
1458 void operator= (auto_vec<T, N> &) = delete;
1459
1460 /* Memory allocation and deallocation for the embedded vector.
1461 Needed because we cannot have proper ctors/dtors defined. */
1462 void create (unsigned nelems CXX_MEM_STAT_INFO);
1463 void release (void);
1464
1465 /* Vector operations. */
1466 bool exists (void) const
1467 { return m_vec != NULLnullptr; }
1468
1469 bool is_empty (void) const
1470 { return m_vec ? m_vec->is_empty () : true; }
1471
1472 unsigned length (void) const
1473 { return m_vec ? m_vec->length () : 0; }
1474
1475 T *address (void)
1476 { return m_vec ? m_vec->m_vecdata : NULLnullptr; }
1477
1478 const T *address (void) const
1479 { return m_vec ? m_vec->m_vecdata : NULLnullptr; }
1480
1481 T *begin () { return address (); }
1482 const T *begin () const { return address (); }
1483 T *end () { return begin () + length (); }
1484 const T *end () const { return begin () + length (); }
1485 const T &operator[] (unsigned ix) const
1486 { return (*m_vec)[ix]; }
1487
1488 bool operator!=(const vec &other) const
1489 { return !(*this == other); }
1490
1491 bool operator==(const vec &other) const
1492 { return address () == other.address (); }
1493
1494 T &operator[] (unsigned ix)
1495 { return (*m_vec)[ix]; }
1496
1497 T &last (void)
1498 { return m_vec->last (); }
1499
1500 bool space (int nelems) const
1501 { return m_vec ? m_vec->space (nelems) : nelems == 0; }
1502
1503 bool iterate (unsigned ix, T *p) const;
1504 bool iterate (unsigned ix, T **p) const;
1505 vec copy (ALONE_CXX_MEM_STAT_INFO) const;
1506 bool reserve (unsigned, bool = false CXX_MEM_STAT_INFO);
1507 bool reserve_exact (unsigned CXX_MEM_STAT_INFO);
1508 void splice (const vec &);
1509 void safe_splice (const vec & CXX_MEM_STAT_INFO);
1510 T *quick_push (const T &);
1511 T *safe_push (const T &CXX_MEM_STAT_INFO);
1512 T &pop (void);
1513 void truncate (unsigned);
1514 void safe_grow (unsigned, bool = false CXX_MEM_STAT_INFO);
1515 void safe_grow_cleared (unsigned, bool = false CXX_MEM_STAT_INFO);
1516 void quick_grow (unsigned);
1517 void quick_grow_cleared (unsigned);
1518 void quick_insert (unsigned, const T &);
1519 void safe_insert (unsigned, const T & CXX_MEM_STAT_INFO);
1520 void ordered_remove (unsigned);
1521 void unordered_remove (unsigned);
1522 void block_remove (unsigned, unsigned);
1523 void qsort (int (*) (const void *, const void *))qsort (int (*) (const void *, const void *));
1524 void sort (int (*) (const void *, const void *, void *), void *);
1525 void stablesort (int (*) (const void *, const void *, void *), void *);
1526 T *bsearch (const void *key, int (*compar)(const void *, const void *));
1527 T *bsearch (const void *key,
1528 int (*compar)(const void *, const void *, void *), void *);
1529 unsigned lower_bound (T, bool (*)(const T &, const T &)) const;
1530 bool contains (const T &search) const;
1531 void reverse (void);
1532
1533 bool using_auto_storage () const;
1534
1535 /* FIXME - This field should be private, but we need to cater to
1536 compilers that have stricter notions of PODness for types. */
1537 vec<T, va_heap, vl_embed> *m_vec;
1538};
1539
1540
1541/* auto_vec is a subclass of vec that automatically manages creating and
1542 releasing the internal vector. If N is non zero then it has N elements of
1543 internal storage. The default is no internal storage, and you probably only
1544 want to ask for internal storage for vectors on the stack because if the
1545 size of the vector is larger than the internal storage that space is wasted.
1546 */
1547template<typename T, size_t N /* = 0 */>
1548class auto_vec : public vec<T, va_heap>
1549{
1550public:
1551 auto_vec ()
1552 {
1553 m_auto.embedded_init (MAX (N, 2)((N) > (2) ? (N) : (2)), 0, 1);
1554 this->m_vec = &m_auto;
1555 }
1556
1557 auto_vec (size_t s CXX_MEM_STAT_INFO)
1558 {
1559 if (s > N)
1560 {
1561 this->create (s PASS_MEM_STAT);
1562 return;
1563 }
1564
1565 m_auto.embedded_init (MAX (N, 2)((N) > (2) ? (N) : (2)), 0, 1);
1566 this->m_vec = &m_auto;
1567 }
1568
1569 ~auto_vec ()
1570 {
1571 this->release ();
1572 }
1573
1574 /* Explicitly convert to the base class. There is no conversion
1575 from a const auto_vec because a copy of the returned vec can
1576 be used to modify *THIS.
1577 This is a legacy function not to be used in new code. */
1578 vec<T, va_heap> to_vec_legacy () {
1579 return *static_cast<vec<T, va_heap> *>(this);
1580 }
1581
1582private:
1583 vec<T, va_heap, vl_embed> m_auto;
1584 T m_data[MAX (N - 1, 1)((N - 1) > (1) ? (N - 1) : (1))];
1585};
1586
1587/* auto_vec is a sub class of vec whose storage is released when it is
1588 destroyed. */
1589template<typename T>
1590class auto_vec<T, 0> : public vec<T, va_heap>
1591{
1592public:
1593 auto_vec () { this->m_vec = NULLnullptr; }
1594 auto_vec (size_t n CXX_MEM_STAT_INFO) { this->create (n PASS_MEM_STAT); }
1595 ~auto_vec () { this->release (); }
1596
1597 auto_vec (vec<T, va_heap>&& r)
1598 {
1599 gcc_assert (!r.using_auto_storage ())((void)(!(!r.using_auto_storage ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1599, __FUNCTION__), 0 : 0))
;
1600 this->m_vec = r.m_vec;
1601 r.m_vec = NULLnullptr;
1602 }
1603
1604 auto_vec (auto_vec<T> &&r)
1605 {
1606 gcc_assert (!r.using_auto_storage ())((void)(!(!r.using_auto_storage ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1606, __FUNCTION__), 0 : 0))
;
1607 this->m_vec = r.m_vec;
1608 r.m_vec = NULLnullptr;
1609 }
1610
1611 auto_vec& operator= (vec<T, va_heap>&& r)
1612 {
1613 if (this == &r)
1614 return *this;
1615
1616 gcc_assert (!r.using_auto_storage ())((void)(!(!r.using_auto_storage ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1616, __FUNCTION__), 0 : 0))
;
1617 this->release ();
1618 this->m_vec = r.m_vec;
1619 r.m_vec = NULLnullptr;
1620 return *this;
1621 }
1622
1623 auto_vec& operator= (auto_vec<T> &&r)
1624 {
1625 if (this == &r)
1626 return *this;
1627
1628 gcc_assert (!r.using_auto_storage ())((void)(!(!r.using_auto_storage ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1628, __FUNCTION__), 0 : 0))
;
1629 this->release ();
1630 this->m_vec = r.m_vec;
1631 r.m_vec = NULLnullptr;
1632 return *this;
1633 }
1634
1635 /* Explicitly convert to the base class. There is no conversion
1636 from a const auto_vec because a copy of the returned vec can
1637 be used to modify *THIS.
1638 This is a legacy function not to be used in new code. */
1639 vec<T, va_heap> to_vec_legacy () {
1640 return *static_cast<vec<T, va_heap> *>(this);
1641 }
1642
1643 // You probably don't want to copy a vector, so these are deleted to prevent
1644 // unintentional use. If you really need a copy of the vectors contents you
1645 // can use copy ().
1646 auto_vec(const auto_vec &) = delete;
1647 auto_vec &operator= (const auto_vec &) = delete;
1648};
1649
1650
1651/* Allocate heap memory for pointer V and create the internal vector
1652 with space for NELEMS elements. If NELEMS is 0, the internal
1653 vector is initialized to empty. */
1654
1655template<typename T>
1656inline void
1657vec_alloc (vec<T> *&v, unsigned nelems CXX_MEM_STAT_INFO)
1658{
1659 v = new vec<T>;
1660 v->create (nelems PASS_MEM_STAT);
1661}
1662
1663
1664/* A subclass of auto_vec <char *> that frees all of its elements on
1665 deletion. */
1666
1667class auto_string_vec : public auto_vec <char *>
1668{
1669 public:
1670 ~auto_string_vec ();
1671};
1672
1673/* A subclass of auto_vec <T *> that deletes all of its elements on
1674 destruction.
1675
1676 This is a crude way for a vec to "own" the objects it points to
1677 and clean up automatically.
1678
1679 For example, no attempt is made to delete elements when an item
1680 within the vec is overwritten.
1681
1682 We can't rely on gnu::unique_ptr within a container,
1683 since we can't rely on move semantics in C++98. */
1684
1685template <typename T>
1686class auto_delete_vec : public auto_vec <T *>
1687{
1688 public:
1689 auto_delete_vec () {}
1690 auto_delete_vec (size_t s) : auto_vec <T *> (s) {}
1691
1692 ~auto_delete_vec ();
1693
1694private:
1695 DISABLE_COPY_AND_ASSIGN(auto_delete_vec)auto_delete_vec (const auto_delete_vec&) = delete; void operator
= (const auto_delete_vec &) = delete
;
1696};
1697
1698/* Conditionally allocate heap memory for VEC and its internal vector. */
1699
1700template<typename T>
1701inline void
1702vec_check_alloc (vec<T, va_heap> *&vec, unsigned nelems CXX_MEM_STAT_INFO)
1703{
1704 if (!vec)
1705 vec_alloc (vec, nelems PASS_MEM_STAT);
1706}
1707
1708
1709/* Free the heap memory allocated by vector V and set it to NULL. */
1710
1711template<typename T>
1712inline void
1713vec_free (vec<T> *&v)
1714{
1715 if (v == NULLnullptr)
1716 return;
1717
1718 v->release ();
1719 delete v;
1720 v = NULLnullptr;
1721}
1722
1723
1724/* Return iteration condition and update PTR to point to the IX'th
1725 element of this vector. Use this to iterate over the elements of a
1726 vector as follows,
1727
1728 for (ix = 0; v.iterate (ix, &ptr); ix++)
1729 continue; */
1730
1731template<typename T>
1732inline bool
1733vec<T, va_heap, vl_ptr>::iterate (unsigned ix, T *ptr) const
1734{
1735 if (m_vec)
1736 return m_vec->iterate (ix, ptr);
1737 else
1738 {
1739 *ptr = 0;
1740 return false;
1741 }
1742}
1743
1744
1745/* Return iteration condition and update *PTR to point to the
1746 IX'th element of this vector. Use this to iterate over the
1747 elements of a vector as follows,
1748
1749 for (ix = 0; v->iterate (ix, &ptr); ix++)
1750 continue;
1751
1752 This variant is for vectors of objects. */
1753
1754template<typename T>
1755inline bool
1756vec<T, va_heap, vl_ptr>::iterate (unsigned ix, T **ptr) const
1757{
1758 if (m_vec)
1759 return m_vec->iterate (ix, ptr);
1760 else
1761 {
1762 *ptr = 0;
1763 return false;
1764 }
1765}
1766
1767
1768/* Convenience macro for forward iteration. */
1769#define FOR_EACH_VEC_ELT(V, I, P)for (I = 0; (V).iterate ((I), &(P)); ++(I)) \
1770 for (I = 0; (V).iterate ((I), &(P)); ++(I))
1771
1772#define FOR_EACH_VEC_SAFE_ELT(V, I, P)for (I = 0; vec_safe_iterate ((V), (I), &(P)); ++(I)) \
1773 for (I = 0; vec_safe_iterate ((V), (I), &(P)); ++(I))
1774
1775/* Likewise, but start from FROM rather than 0. */
1776#define FOR_EACH_VEC_ELT_FROM(V, I, P, FROM)for (I = (FROM); (V).iterate ((I), &(P)); ++(I)) \
1777 for (I = (FROM); (V).iterate ((I), &(P)); ++(I))
1778
1779/* Convenience macro for reverse iteration. */
1780#define FOR_EACH_VEC_ELT_REVERSE(V, I, P)for (I = (V).length () - 1; (V).iterate ((I), &(P)); (I)--
)
\
1781 for (I = (V).length () - 1; \
1782 (V).iterate ((I), &(P)); \
1783 (I)--)
1784
1785#define FOR_EACH_VEC_SAFE_ELT_REVERSE(V, I, P)for (I = vec_safe_length (V) - 1; vec_safe_iterate ((V), (I),
&(P)); (I)--)
\
1786 for (I = vec_safe_length (V) - 1; \
1787 vec_safe_iterate ((V), (I), &(P)); \
1788 (I)--)
1789
1790/* auto_string_vec's dtor, freeing all contained strings, automatically
1791 chaining up to ~auto_vec <char *>, which frees the internal buffer. */
1792
1793inline
1794auto_string_vec::~auto_string_vec ()
1795{
1796 int i;
1797 char *str;
1798 FOR_EACH_VEC_ELT (*this, i, str)for (i = 0; (*this).iterate ((i), &(str)); ++(i))
1799 free (str);
1800}
1801
1802/* auto_delete_vec's dtor, deleting all contained items, automatically
1803 chaining up to ~auto_vec <T*>, which frees the internal buffer. */
1804
1805template <typename T>
1806inline
1807auto_delete_vec<T>::~auto_delete_vec ()
1808{
1809 int i;
1810 T *item;
1811 FOR_EACH_VEC_ELT (*this, i, item)for (i = 0; (*this).iterate ((i), &(item)); ++(i))
1812 delete item;
1813}
1814
1815
1816/* Return a copy of this vector. */
1817
1818template<typename T>
1819inline vec<T, va_heap, vl_ptr>
1820vec<T, va_heap, vl_ptr>::copy (ALONE_MEM_STAT_DECLvoid) const
1821{
1822 vec<T, va_heap, vl_ptr> new_vec{ };
1823 if (length ())
1824 new_vec.m_vec = m_vec->copy (ALONE_PASS_MEM_STAT);
1825 return new_vec;
1826}
1827
1828
1829/* Ensure that the vector has at least RESERVE slots available (if
1830 EXACT is false), or exactly RESERVE slots available (if EXACT is
1831 true).
1832
1833 This may create additional headroom if EXACT is false.
1834
1835 Note that this can cause the embedded vector to be reallocated.
1836 Returns true iff reallocation actually occurred. */
1837
1838template<typename T>
1839inline bool
1840vec<T, va_heap, vl_ptr>::reserve (unsigned nelems, bool exact MEM_STAT_DECL)
1841{
1842 if (space (nelems))
1843 return false;
1844
1845 /* For now play a game with va_heap::reserve to hide our auto storage if any,
1846 this is necessary because it doesn't have enough information to know the
1847 embedded vector is in auto storage, and so should not be freed. */
1848 vec<T, va_heap, vl_embed> *oldvec = m_vec;
1849 unsigned int oldsize = 0;
1850 bool handle_auto_vec = m_vec && using_auto_storage ();
1851 if (handle_auto_vec)
1852 {
1853 m_vec = NULLnullptr;
1854 oldsize = oldvec->length ();
1855 nelems += oldsize;
1856 }
1857
1858 va_heap::reserve (m_vec, nelems, exact PASS_MEM_STAT);
1859 if (handle_auto_vec)
1860 {
1861 vec_copy_construct (m_vec->address (), oldvec->address (), oldsize);
1862 m_vec->m_vecpfx.m_num = oldsize;
1863 }
1864
1865 return true;
1866}
1867
1868
1869/* Ensure that this vector has exactly NELEMS slots available. This
1870 will not create additional headroom. Note this can cause the
1871 embedded vector to be reallocated. Returns true iff reallocation
1872 actually occurred. */
1873
1874template<typename T>
1875inline bool
1876vec<T, va_heap, vl_ptr>::reserve_exact (unsigned nelems MEM_STAT_DECL)
1877{
1878 return reserve (nelems, true PASS_MEM_STAT);
1879}
1880
1881
1882/* Create the internal vector and reserve NELEMS for it. This is
1883 exactly like vec::reserve, but the internal vector is
1884 unconditionally allocated from scratch. The old one, if it
1885 existed, is lost. */
1886
1887template<typename T>
1888inline void
1889vec<T, va_heap, vl_ptr>::create (unsigned nelems MEM_STAT_DECL)
1890{
1891 m_vec = NULLnullptr;
1892 if (nelems > 0)
1893 reserve_exact (nelems PASS_MEM_STAT);
1894}
1895
1896
1897/* Free the memory occupied by the embedded vector. */
1898
1899template<typename T>
1900inline void
1901vec<T, va_heap, vl_ptr>::release (void)
1902{
1903 if (!m_vec)
1904 return;
1905
1906 if (using_auto_storage ())
1907 {
1908 m_vec->m_vecpfx.m_num = 0;
1909 return;
1910 }
1911
1912 va_heap::release (m_vec);
1913}
1914
1915/* Copy the elements from SRC to the end of this vector as if by memcpy.
1916 SRC and this vector must be allocated with the same memory
1917 allocation mechanism. This vector is assumed to have sufficient
1918 headroom available. */
1919
1920template<typename T>
1921inline void
1922vec<T, va_heap, vl_ptr>::splice (const vec<T, va_heap, vl_ptr> &src)
1923{
1924 if (src.length ())
1925 m_vec->splice (*(src.m_vec));
1926}
1927
1928
1929/* Copy the elements in SRC to the end of this vector as if by memcpy.
1930 SRC and this vector must be allocated with the same mechanism.
1931 If there is not enough headroom in this vector, it will be reallocated
1932 as needed. */
1933
1934template<typename T>
1935inline void
1936vec<T, va_heap, vl_ptr>::safe_splice (const vec<T, va_heap, vl_ptr> &src
1937 MEM_STAT_DECL)
1938{
1939 if (src.length ())
1940 {
1941 reserve_exact (src.length ());
1942 splice (src);
1943 }
1944}
1945
1946
1947/* Push OBJ (a new element) onto the end of the vector. There must be
1948 sufficient space in the vector. Return a pointer to the slot
1949 where OBJ was inserted. */
1950
1951template<typename T>
1952inline T *
1953vec<T, va_heap, vl_ptr>::quick_push (const T &obj)
1954{
1955 return m_vec->quick_push (obj);
1956}
1957
1958
1959/* Push a new element OBJ onto the end of this vector. Reallocates
1960 the embedded vector, if needed. Return a pointer to the slot where
1961 OBJ was inserted. */
1962
1963template<typename T>
1964inline T *
1965vec<T, va_heap, vl_ptr>::safe_push (const T &obj MEM_STAT_DECL)
1966{
1967 reserve (1, false PASS_MEM_STAT);
1968 return quick_push (obj);
1969}
1970
1971
1972/* Pop and return the last element off the end of the vector. */
1973
1974template<typename T>
1975inline T &
1976vec<T, va_heap, vl_ptr>::pop (void)
1977{
1978 return m_vec->pop ();
1979}
1980
1981
1982/* Set the length of the vector to LEN. The new length must be less
1983 than or equal to the current length. This is an O(1) operation. */
1984
1985template<typename T>
1986inline void
1987vec<T, va_heap, vl_ptr>::truncate (unsigned size)
1988{
1989 if (m_vec)
1990 m_vec->truncate (size);
1991 else
1992 gcc_checking_assert (size == 0)((void)(!(size == 0) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1992, __FUNCTION__), 0 : 0))
;
1993}
1994
1995
1996/* Grow the vector to a specific length. LEN must be as long or
1997 longer than the current length. The new elements are
1998 uninitialized. Reallocate the internal vector, if needed. */
1999
2000template<typename T>
2001inline void
2002vec<T, va_heap, vl_ptr>::safe_grow (unsigned len, bool exact MEM_STAT_DECL)
2003{
2004 unsigned oldlen = length ();
2005 gcc_checking_assert (oldlen <= len)((void)(!(oldlen <= len) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2005, __FUNCTION__), 0 : 0))
;
2006 reserve (len - oldlen, exact PASS_MEM_STAT);
2007 if (m_vec)
2008 m_vec->quick_grow (len);
2009 else
2010 gcc_checking_assert (len == 0)((void)(!(len == 0) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2010, __FUNCTION__), 0 : 0))
;
2011}
2012
2013
2014/* Grow the embedded vector to a specific length. LEN must be as
2015 long or longer than the current length. The new elements are
2016 initialized to zero. Reallocate the internal vector, if needed. */
2017
2018template<typename T>
2019inline void
2020vec<T, va_heap, vl_ptr>::safe_grow_cleared (unsigned len, bool exact
2021 MEM_STAT_DECL)
2022{
2023 unsigned oldlen = length ();
2024 size_t growby = len - oldlen;
2025 safe_grow (len, exact PASS_MEM_STAT);
2026 if (growby != 0)
2027 vec_default_construct (address () + oldlen, growby);
2028}
2029
2030
2031/* Same as vec::safe_grow but without reallocation of the internal vector.
2032 If the vector cannot be extended, a runtime assertion will be triggered. */
2033
2034template<typename T>
2035inline void
2036vec<T, va_heap, vl_ptr>::quick_grow (unsigned len)
2037{
2038 gcc_checking_assert (m_vec)((void)(!(m_vec) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2038, __FUNCTION__), 0 : 0))
;
2039 m_vec->quick_grow (len);
2040}
2041
2042
2043/* Same as vec::quick_grow_cleared but without reallocation of the
2044 internal vector. If the vector cannot be extended, a runtime
2045 assertion will be triggered. */
2046
2047template<typename T>
2048inline void
2049vec<T, va_heap, vl_ptr>::quick_grow_cleared (unsigned len)
2050{
2051 gcc_checking_assert (m_vec)((void)(!(m_vec) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2051, __FUNCTION__), 0 : 0))
;
2052 m_vec->quick_grow_cleared (len);
2053}
2054
2055
2056/* Insert an element, OBJ, at the IXth position of this vector. There
2057 must be sufficient space. */
2058
2059template<typename T>
2060inline void
2061vec<T, va_heap, vl_ptr>::quick_insert (unsigned ix, const T &obj)
2062{
2063 m_vec->quick_insert (ix, obj);
2064}
2065
2066
2067/* Insert an element, OBJ, at the IXth position of the vector.
2068 Reallocate the embedded vector, if necessary. */
2069
2070template<typename T>
2071inline void
2072vec<T, va_heap, vl_ptr>::safe_insert (unsigned ix, const T &obj MEM_STAT_DECL)
2073{
2074 reserve (1, false PASS_MEM_STAT);
2075 quick_insert (ix, obj);
2076}
2077
2078
2079/* Remove an element from the IXth position of this vector. Ordering of
2080 remaining elements is preserved. This is an O(N) operation due to
2081 a memmove. */
2082
2083template<typename T>
2084inline void
2085vec<T, va_heap, vl_ptr>::ordered_remove (unsigned ix)
2086{
2087 m_vec->ordered_remove (ix);
2088}
2089
2090
2091/* Remove an element from the IXth position of this vector. Ordering
2092 of remaining elements is destroyed. This is an O(1) operation. */
2093
2094template<typename T>
2095inline void
2096vec<T, va_heap, vl_ptr>::unordered_remove (unsigned ix)
2097{
2098 m_vec->unordered_remove (ix);
2099}
2100
2101
2102/* Remove LEN elements starting at the IXth. Ordering is retained.
2103 This is an O(N) operation due to memmove. */
2104
2105template<typename T>
2106inline void
2107vec<T, va_heap, vl_ptr>::block_remove (unsigned ix, unsigned len)
2108{
2109 m_vec->block_remove (ix, len);
2110}
2111
2112
2113/* Sort the contents of this vector with qsort. CMP is the comparison
2114 function to pass to qsort. */
2115
2116template<typename T>
2117inline void
2118vec<T, va_heap, vl_ptr>::qsort (int (*cmp) (const void *, const void *))qsort (int (*cmp) (const void *, const void *))
2119{
2120 if (m_vec)
2121 m_vec->qsort (cmp)qsort (cmp);
2122}
2123
2124/* Sort the contents of this vector with qsort. CMP is the comparison
2125 function to pass to qsort. */
2126
2127template<typename T>
2128inline void
2129vec<T, va_heap, vl_ptr>::sort (int (*cmp) (const void *, const void *,
2130 void *), void *data)
2131{
2132 if (m_vec)
2133 m_vec->sort (cmp, data);
2134}
2135
2136/* Sort the contents of this vector with gcc_stablesort_r. CMP is the
2137 comparison function to pass to qsort. */
2138
2139template<typename T>
2140inline void
2141vec<T, va_heap, vl_ptr>::stablesort (int (*cmp) (const void *, const void *,
2142 void *), void *data)
2143{
2144 if (m_vec)
2145 m_vec->stablesort (cmp, data);
2146}
2147
2148/* Search the contents of the sorted vector with a binary search.
2149 CMP is the comparison function to pass to bsearch. */
2150
2151template<typename T>
2152inline T *
2153vec<T, va_heap, vl_ptr>::bsearch (const void *key,
2154 int (*cmp) (const void *, const void *))
2155{
2156 if (m_vec)
2157 return m_vec->bsearch (key, cmp);
2158 return NULLnullptr;
2159}
2160
2161/* Search the contents of the sorted vector with a binary search.
2162 CMP is the comparison function to pass to bsearch. */
2163
2164template<typename T>
2165inline T *
2166vec<T, va_heap, vl_ptr>::bsearch (const void *key,
2167 int (*cmp) (const void *, const void *,
2168 void *), void *data)
2169{
2170 if (m_vec)
2171 return m_vec->bsearch (key, cmp, data);
2172 return NULLnullptr;
2173}
2174
2175
2176/* Find and return the first position in which OBJ could be inserted
2177 without changing the ordering of this vector. LESSTHAN is a
2178 function that returns true if the first argument is strictly less
2179 than the second. */
2180
2181template<typename T>
2182inline unsigned
2183vec<T, va_heap, vl_ptr>::lower_bound (T obj,
2184 bool (*lessthan)(const T &, const T &))
2185 const
2186{
2187 return m_vec ? m_vec->lower_bound (obj, lessthan) : 0;
2188}
2189
2190/* Return true if SEARCH is an element of V. Note that this is O(N) in the
2191 size of the vector and so should be used with care. */
2192
2193template<typename T>
2194inline bool
2195vec<T, va_heap, vl_ptr>::contains (const T &search) const
2196{
2197 return m_vec ? m_vec->contains (search) : false;
2198}
2199
2200/* Reverse content of the vector. */
2201
2202template<typename T>
2203inline void
2204vec<T, va_heap, vl_ptr>::reverse (void)
2205{
2206 unsigned l = length ();
2207 T *ptr = address ();
2208
2209 for (unsigned i = 0; i < l / 2; i++)
2210 std::swap (ptr[i], ptr[l - i - 1]);
2211}
2212
2213template<typename T>
2214inline bool
2215vec<T, va_heap, vl_ptr>::using_auto_storage () const
2216{
2217 return m_vec ? m_vec->m_vecpfx.m_using_auto_storage : false;
2218}
2219
2220/* Release VEC and call release of all element vectors. */
2221
2222template<typename T>
2223inline void
2224release_vec_vec (vec<vec<T> > &vec)
2225{
2226 for (unsigned i = 0; i < vec.length (); i++)
2227 vec[i].release ();
2228
2229 vec.release ();
2230}
2231
2232// Provide a subset of the std::span functionality. (We can't use std::span
2233// itself because it's a C++20 feature.)
2234//
2235// In addition, provide an invalid value that is distinct from all valid
2236// sequences (including the empty sequence). This can be used to return
2237// failure without having to use std::optional.
2238//
2239// There is no operator bool because it would be ambiguous whether it is
2240// testing for a valid value or an empty sequence.
2241template<typename T>
2242class array_slice
2243{
2244 template<typename OtherT> friend class array_slice;
2245
2246public:
2247 using value_type = T;
2248 using iterator = T *;
2249 using const_iterator = const T *;
2250
2251 array_slice () : m_base (nullptr), m_size (0) {}
2252
2253 template<typename OtherT>
2254 array_slice (array_slice<OtherT> other)
2255 : m_base (other.m_base), m_size (other.m_size) {}
2256
2257 array_slice (iterator base, unsigned int size)
2258 : m_base (base), m_size (size) {}
2259
2260 template<size_t N>
2261 array_slice (T (&array)[N]) : m_base (array), m_size (N) {}
2262
2263 template<typename OtherT>
2264 array_slice (const vec<OtherT> &v)
2265 : m_base (v.address ()), m_size (v.length ()) {}
2266
2267 iterator begin () { return m_base; }
2268 iterator end () { return m_base + m_size; }
2269
2270 const_iterator begin () const { return m_base; }
2271 const_iterator end () const { return m_base + m_size; }
2272
2273 value_type &front ();
2274 value_type &back ();
2275 value_type &operator[] (unsigned int i);
2276
2277 const value_type &front () const;
2278 const value_type &back () const;
2279 const value_type &operator[] (unsigned int i) const;
2280
2281 size_t size () const { return m_size; }
2282 size_t size_bytes () const { return m_size * sizeof (T); }
2283 bool empty () const { return m_size == 0; }
2284
2285 // An invalid array_slice that represents a failed operation. This is
2286 // distinct from an empty slice, which is a valid result in some contexts.
2287 static array_slice invalid () { return { nullptr, ~0U }; }
2288
2289 // True if the array is valid, false if it is an array like INVALID.
2290 bool is_valid () const { return m_base || m_size == 0; }
2291
2292private:
2293 iterator m_base;
2294 unsigned int m_size;
2295};
2296
2297template<typename T>
2298inline typename array_slice<T>::value_type &
2299array_slice<T>::front ()
2300{
2301 gcc_checking_assert (m_size)((void)(!(m_size) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2301, __FUNCTION__), 0 : 0))
;
2302 return m_base[0];
2303}
2304
2305template<typename T>
2306inline const typename array_slice<T>::value_type &
2307array_slice<T>::front () const
2308{
2309 gcc_checking_assert (m_size)((void)(!(m_size) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2309, __FUNCTION__), 0 : 0))
;
2310 return m_base[0];
2311}
2312
2313template<typename T>
2314inline typename array_slice<T>::value_type &
2315array_slice<T>::back ()
2316{
2317 gcc_checking_assert (m_size)((void)(!(m_size) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2317, __FUNCTION__), 0 : 0))
;
2318 return m_base[m_size - 1];
2319}
2320
2321template<typename T>
2322inline const typename array_slice<T>::value_type &
2323array_slice<T>::back () const
2324{
2325 gcc_checking_assert (m_size)((void)(!(m_size) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2325, __FUNCTION__), 0 : 0))
;
2326 return m_base[m_size - 1];
2327}
2328
2329template<typename T>
2330inline typename array_slice<T>::value_type &
2331array_slice<T>::operator[] (unsigned int i)
2332{
2333 gcc_checking_assert (i < m_size)((void)(!(i < m_size) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2333, __FUNCTION__), 0 : 0))
;
2334 return m_base[i];
2335}
2336
2337template<typename T>
2338inline const typename array_slice<T>::value_type &
2339array_slice<T>::operator[] (unsigned int i) const
2340{
2341 gcc_checking_assert (i < m_size)((void)(!(i < m_size) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2341, __FUNCTION__), 0 : 0))
;
2342 return m_base[i];
2343}
2344
2345template<typename T>
2346array_slice<T>
2347make_array_slice (T *base, unsigned int size)
2348{
2349 return array_slice<T> (base, size);
2350}
2351
2352#if (GCC_VERSION(4 * 1000 + 2) >= 3000)
2353# pragma GCC poison m_vec m_vecpfx m_vecdata
2354#endif
2355
2356#endif // GCC_VEC_H