Bug Summary

File:build/gcc/cfgloop.c
Warning:line 288, column 5
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-unknown-linux-gnu -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name cfgloop.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model static -mframe-pointer=none -fmath-errno -fno-rounding-math -mconstructor-aliases -munwind-tables -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -resource-dir /usr/lib64/clang/13.0.0 -D IN_GCC -D HAVE_CONFIG_H -I . -I . -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/. -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../include -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcpp/include -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcody -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber/bid -I ../libdecnumber -I /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libbacktrace -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/11/../../../../include/c++/11 -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/11/../../../../include/c++/11/x86_64-suse-linux -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/11/../../../../include/c++/11/backward -internal-isystem /usr/lib64/clang/13.0.0/include -internal-isystem /usr/local/include -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/11/../../../../x86_64-suse-linux/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-narrowing -Wwrite-strings -Wno-error=format-diag -Wno-long-long -Wno-variadic-macros -Wno-overlength-strings -fdeprecated-macro -fdebug-compilation-dir=/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -ferror-limit 19 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=plist-html -analyzer-config silence-checkers=core.NullDereference -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/clang-static-analyzer/2021-11-20-133755-20252-1/report-m_Hsla.plist -x c++ /home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c

/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c

1/* Natural loop discovery code for GNU compiler.
2 Copyright (C) 2000-2021 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "backend.h"
24#include "rtl.h"
25#include "tree.h"
26#include "gimple.h"
27#include "cfghooks.h"
28#include "gimple-ssa.h"
29#include "diagnostic-core.h"
30#include "cfganal.h"
31#include "cfgloop.h"
32#include "gimple-iterator.h"
33#include "dumpfile.h"
34
35static void flow_loops_cfg_dump (FILE *);
36
37/* Dump loop related CFG information. */
38
39static void
40flow_loops_cfg_dump (FILE *file)
41{
42 basic_block bb;
43
44 if (!file)
45 return;
46
47 FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb
; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb->
next_bb)
48 {
49 edge succ;
50 edge_iterator ei;
51
52 fprintf (file, ";; %d succs { ", bb->index);
53 FOR_EACH_EDGE (succ, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei)
, &(succ)); ei_next (&(ei)))
54 fprintf (file, "%d ", succ->dest->index);
55 fprintf (file, "}\n");
56 }
57}
58
59/* Return nonzero if the nodes of LOOP are a subset of OUTER. */
60
61bool
62flow_loop_nested_p (const class loop *outer, const class loop *loop)
63{
64 unsigned odepth = loop_depth (outer);
65
66 return (loop_depth (loop) > odepth
67 && (*loop->superloops)[odepth] == outer);
68}
69
70/* Returns the loop such that LOOP is nested DEPTH (indexed from zero)
71 loops within LOOP. */
72
73class loop *
74superloop_at_depth (class loop *loop, unsigned depth)
75{
76 unsigned ldepth = loop_depth (loop);
77
78 gcc_assert (depth <= ldepth)((void)(!(depth <= ldepth) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 78, __FUNCTION__), 0 : 0))
;
79
80 if (depth == ldepth)
81 return loop;
82
83 return (*loop->superloops)[depth];
84}
85
86/* Returns the list of the latch edges of LOOP. */
87
88static vec<edge>
89get_loop_latch_edges (const class loop *loop)
90{
91 edge_iterator ei;
92 edge e;
93 vec<edge> ret = vNULL;
94
95 FOR_EACH_EDGE (e, ei, loop->header->preds)for ((ei) = ei_start_1 (&((loop->header->preds))); ei_cond
((ei), &(e)); ei_next (&(ei)))
96 {
97 if (dominated_by_p (CDI_DOMINATORS, e->src, loop->header))
98 ret.safe_push (e);
99 }
100
101 return ret;
102}
103
104/* Dump the loop information specified by LOOP to the stream FILE
105 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
106
107void
108flow_loop_dump (const class loop *loop, FILE *file,
109 void (*loop_dump_aux) (const class loop *, FILE *, int),
110 int verbose)
111{
112 basic_block *bbs;
113 unsigned i;
114 vec<edge> latches;
115 edge e;
116
117 if (! loop || ! loop->header)
118 return;
119
120 fprintf (file, ";;\n;; Loop %d\n", loop->num);
121
122 fprintf (file, ";; header %d, ", loop->header->index);
123 if (loop->latch)
124 fprintf (file, "latch %d\n", loop->latch->index);
125 else
126 {
127 fprintf (file, "multiple latches:");
128 latches = get_loop_latch_edges (loop);
129 FOR_EACH_VEC_ELT (latches, i, e)for (i = 0; (latches).iterate ((i), &(e)); ++(i))
130 fprintf (file, " %d", e->src->index);
131 latches.release ();
132 fprintf (file, "\n");
133 }
134
135 fprintf (file, ";; depth %d, outer %ld\n",
136 loop_depth (loop), (long) (loop_outer (loop)
137 ? loop_outer (loop)->num : -1));
138
139 if (loop->latch)
140 {
141 bool read_profile_p;
142 gcov_type nit = expected_loop_iterations_unbounded (loop, &read_profile_p);
143 if (read_profile_p && !loop->any_estimate)
144 fprintf (file, ";; profile-based iteration count: %" PRIu64"l" "u" "\n",
145 (uint64_t) nit);
146 }
147
148 fprintf (file, ";; nodes:");
149 bbs = get_loop_body (loop);
150 for (i = 0; i < loop->num_nodes; i++)
151 fprintf (file, " %d", bbs[i]->index);
152 free (bbs);
153 fprintf (file, "\n");
154
155 if (loop_dump_aux)
156 loop_dump_aux (loop, file, verbose);
157}
158
159/* Dump the loop information about loops to the stream FILE,
160 using auxiliary dump callback function LOOP_DUMP_AUX if non null. */
161
162void
163flow_loops_dump (FILE *file, void (*loop_dump_aux) (const class loop *, FILE *, int), int verbose)
164{
165 if (!current_loops((cfun + 0)->x_current_loops) || ! file)
166 return;
167
168 fprintf (file, ";; %d loops found\n", number_of_loops (cfun(cfun + 0)));
169
170 for (auto loop : loops_list (cfun(cfun + 0), LI_INCLUDE_ROOT))
171 {
172 flow_loop_dump (loop, file, loop_dump_aux, verbose);
173 }
174
175 if (verbose)
176 flow_loops_cfg_dump (file);
177}
178
179/* Free data allocated for LOOP. */
180
181void
182flow_loop_free (class loop *loop)
183{
184 struct loop_exit *exit, *next;
185
186 vec_free (loop->superloops);
187
188 /* Break the list of the loop exit records. They will be freed when the
189 corresponding edge is rescanned or removed, and this avoids
190 accessing the (already released) head of the list stored in the
191 loop structure. */
192 for (exit = loop->exits->next; exit != loop->exits; exit = next)
193 {
194 next = exit->next;
195 exit->next = exit;
196 exit->prev = exit;
197 }
198
199 ggc_free (loop->exits);
200 ggc_free (loop);
201}
202
203/* Free all the memory allocated for LOOPS. */
204
205void
206flow_loops_free (struct loops *loops)
207{
208 if (loops->larray)
209 {
210 unsigned i;
211 loop_p loop;
212
213 /* Free the loop descriptors. */
214 FOR_EACH_VEC_SAFE_ELT (loops->larray, i, loop)for (i = 0; vec_safe_iterate ((loops->larray), (i), &(
loop)); ++(i))
215 {
216 if (!loop)
217 continue;
218
219 flow_loop_free (loop);
220 }
221
222 vec_free (loops->larray);
223 }
224}
225
226/* Find the nodes contained within the LOOP with header HEADER.
227 Return the number of nodes within the loop. */
228
229int
230flow_loop_nodes_find (basic_block header, class loop *loop)
231{
232 vec<basic_block> stack = vNULL;
233 int num_nodes = 1;
234 edge latch;
235 edge_iterator latch_ei;
236
237 header->loop_father = loop;
238
239 FOR_EACH_EDGE (latch, latch_ei, loop->header->preds)for ((latch_ei) = ei_start_1 (&((loop->header->preds
))); ei_cond ((latch_ei), &(latch)); ei_next (&(latch_ei
)))
240 {
241 if (latch->src->loop_father == loop
242 || !dominated_by_p (CDI_DOMINATORS, latch->src, loop->header))
243 continue;
244
245 num_nodes++;
246 stack.safe_push (latch->src);
247 latch->src->loop_father = loop;
248
249 while (!stack.is_empty ())
250 {
251 basic_block node;
252 edge e;
253 edge_iterator ei;
254
255 node = stack.pop ();
256
257 FOR_EACH_EDGE (e, ei, node->preds)for ((ei) = ei_start_1 (&((node->preds))); ei_cond ((ei
), &(e)); ei_next (&(ei)))
258 {
259 basic_block ancestor = e->src;
260
261 if (ancestor->loop_father != loop)
262 {
263 ancestor->loop_father = loop;
264 num_nodes++;
265 stack.safe_push (ancestor);
266 }
267 }
268 }
269 }
270 stack.release ();
271
272 return num_nodes;
273}
274
275/* Records the vector of superloops of the loop LOOP, whose immediate
276 superloop is FATHER. */
277
278static void
279establish_preds (class loop *loop, class loop *father)
280{
281 loop_p ploop;
282 unsigned depth = loop_depth (father) + 1;
14
Calling 'loop_depth'
19
Returning from 'loop_depth'
283 unsigned i;
284
285 loop->superloops = 0;
286 vec_alloc (loop->superloops, depth);
20
Calling 'vec_alloc<loop *, va_gc>'
28
Returning from 'vec_alloc<loop *, va_gc>'
287 FOR_EACH_VEC_SAFE_ELT (father->superloops, i, ploop)for (i = 0; vec_safe_iterate ((father->superloops), (i), &
(ploop)); ++(i))
29
Calling 'vec_safe_iterate<loop *, va_gc>'
32
Returning from 'vec_safe_iterate<loop *, va_gc>'
33
Loop condition is true. Entering loop body
288 loop->superloops->quick_push (ploop);
34
Called C++ object pointer is null
289 loop->superloops->quick_push (father);
290
291 for (ploop = loop->inner; ploop; ploop = ploop->next)
292 establish_preds (ploop, loop);
293}
294
295/* Add LOOP to the loop hierarchy tree where FATHER is father of the
296 added loop. If LOOP has some children, take care of that their
297 pred field will be initialized correctly. If AFTER is non-null
298 then it's expected it's a pointer into FATHERs inner sibling
299 list and LOOP is added behind AFTER, otherwise it's added in front
300 of FATHERs siblings. */
301
302void
303flow_loop_tree_node_add (class loop *father, class loop *loop,
304 class loop *after)
305{
306 if (after
11.1
'after' is null
11.1
'after' is null
11.1
'after' is null
)
12
Taking false branch
307 {
308 loop->next = after->next;
309 after->next = loop;
310 }
311 else
312 {
313 loop->next = father->inner;
314 father->inner = loop;
315 }
316
317 establish_preds (loop, father);
13
Calling 'establish_preds'
318}
319
320/* Remove LOOP from the loop hierarchy tree. */
321
322void
323flow_loop_tree_node_remove (class loop *loop)
324{
325 class loop *prev, *father;
326
327 father = loop_outer (loop);
328
329 /* Remove loop from the list of sons. */
330 if (father->inner == loop)
331 father->inner = loop->next;
332 else
333 {
334 for (prev = father->inner; prev->next != loop; prev = prev->next)
335 continue;
336 prev->next = loop->next;
337 }
338
339 loop->superloops = NULLnullptr;
340}
341
342/* Allocates and returns new loop structure. */
343
344class loop *
345alloc_loop (void)
346{
347 class loop *loop = ggc_cleared_alloc<class loop> ();
348
349 loop->exits = ggc_cleared_alloc<loop_exit> ();
350 loop->exits->next = loop->exits->prev = loop->exits;
351 loop->can_be_parallel = false;
352 loop->constraints = 0;
353 loop->nb_iterations_upper_bound = 0;
354 loop->nb_iterations_likely_upper_bound = 0;
355 loop->nb_iterations_estimate = 0;
356 return loop;
357}
358
359/* Initializes loops structure LOOPS, reserving place for NUM_LOOPS loops
360 (including the root of the loop tree). */
361
362void
363init_loops_structure (struct function *fn,
364 struct loops *loops, unsigned num_loops)
365{
366 class loop *root;
367
368 memset (loops, 0, sizeof *loops);
369 vec_alloc (loops->larray, num_loops);
370
371 /* Dummy loop containing whole function. */
372 root = alloc_loop ();
373 root->num_nodes = n_basic_blocks_for_fn (fn)((fn)->cfg->x_n_basic_blocks);
374 root->latch = EXIT_BLOCK_PTR_FOR_FN (fn)((fn)->cfg->x_exit_block_ptr);
375 root->header = ENTRY_BLOCK_PTR_FOR_FN (fn)((fn)->cfg->x_entry_block_ptr);
376 ENTRY_BLOCK_PTR_FOR_FN (fn)((fn)->cfg->x_entry_block_ptr)->loop_father = root;
377 EXIT_BLOCK_PTR_FOR_FN (fn)((fn)->cfg->x_exit_block_ptr)->loop_father = root;
378
379 loops->larray->quick_push (root);
380 loops->tree_root = root;
381}
382
383/* Returns whether HEADER is a loop header. */
384
385bool
386bb_loop_header_p (basic_block header)
387{
388 edge_iterator ei;
389 edge e;
390
391 /* If we have an abnormal predecessor, do not consider the
392 loop (not worth the problems). */
393 if (bb_has_abnormal_pred (header))
394 return false;
395
396 /* Look for back edges where a predecessor is dominated
397 by this block. A natural loop has a single entry
398 node (header) that dominates all the nodes in the
399 loop. It also has single back edge to the header
400 from a latch node. */
401 FOR_EACH_EDGE (e, ei, header->preds)for ((ei) = ei_start_1 (&((header->preds))); ei_cond (
(ei), &(e)); ei_next (&(ei)))
402 {
403 basic_block latch = e->src;
404 if (latch != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)
405 && dominated_by_p (CDI_DOMINATORS, latch, header))
406 return true;
407 }
408
409 return false;
410}
411
412/* Find all the natural loops in the function and save in LOOPS structure and
413 recalculate loop_father information in basic block structures.
414 If LOOPS is non-NULL then the loop structures for already recorded loops
415 will be re-used and their number will not change. We assume that no
416 stale loops exist in LOOPS.
417 When LOOPS is NULL it is allocated and re-built from scratch.
418 Return the built LOOPS structure. */
419
420struct loops *
421flow_loops_find (struct loops *loops)
422{
423 bool from_scratch = (loops == NULLnullptr);
1
Assuming the condition is false
424 int *rc_order;
425 int b;
426 unsigned i;
427
428 /* Ensure that the dominators are computed. */
429 calculate_dominance_info (CDI_DOMINATORS);
430
431 if (!loops
1.1
'loops' is non-null
1.1
'loops' is non-null
1.1
'loops' is non-null
)
2
Taking false branch
432 {
433 loops = ggc_cleared_alloc<struct loops> ();
434 init_loops_structure (cfun(cfun + 0), loops, 1);
435 }
436
437 /* Ensure that loop exits were released. */
438 gcc_assert (loops->exits == NULL)((void)(!(loops->exits == nullptr) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 438, __FUNCTION__), 0 : 0))
;
3
Assuming the condition is true
4
'?' condition is false
439
440 /* Taking care of this degenerate case makes the rest of
441 this code simpler. */
442 if (n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks) == NUM_FIXED_BLOCKS(2))
5
Assuming field 'x_n_basic_blocks' is not equal to NUM_FIXED_BLOCKS
6
Taking false branch
443 return loops;
444
445 /* The root loop node contains all basic-blocks. */
446 loops->tree_root->num_nodes = n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks);
447
448 /* Compute depth first search order of the CFG so that outer
449 natural loops will be found before inner natural loops. */
450 rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun))((int *) xmalloc (sizeof (int) * ((((cfun + 0))->cfg->x_n_basic_blocks
))))
;
451 pre_and_rev_post_order_compute (NULLnullptr, rc_order, false);
452
453 /* Gather all loop headers in reverse completion order and allocate
454 loop structures for loops that are not already present. */
455 auto_vec<loop_p> larray (loops->larray->length ());
456 for (b = 0; b < n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks) - NUM_FIXED_BLOCKS(2); b++)
7
Assuming the condition is false
8
Loop condition is false. Execution continues on line 498
457 {
458 basic_block header = BASIC_BLOCK_FOR_FN (cfun, rc_order[b])((*(((cfun + 0))->cfg->x_basic_block_info))[(rc_order[b
])])
;
459 if (bb_loop_header_p (header))
460 {
461 class loop *loop;
462
463 /* The current active loop tree has valid loop-fathers for
464 header blocks. */
465 if (!from_scratch
466 && header->loop_father->header == header)
467 {
468 loop = header->loop_father;
469 /* If we found an existing loop remove it from the
470 loop tree. It is going to be inserted again
471 below. */
472 flow_loop_tree_node_remove (loop);
473 }
474 else
475 {
476 /* Otherwise allocate a new loop structure for the loop. */
477 loop = alloc_loop ();
478 /* ??? We could re-use unused loop slots here. */
479 loop->num = loops->larray->length ();
480 vec_safe_push (loops->larray, loop);
481 loop->header = header;
482
483 if (!from_scratch
484 && dump_file && (dump_flags & TDF_DETAILS))
485 fprintf (dump_file, "flow_loops_find: discovered new "
486 "loop %d with header %d\n",
487 loop->num, header->index);
488 }
489 /* Reset latch, we recompute it below. */
490 loop->latch = NULLnullptr;
491 larray.safe_push (loop);
492 }
493
494 /* Make blocks part of the loop root node at start. */
495 header->loop_father = loops->tree_root;
496 }
497
498 free (rc_order);
499
500 /* Now iterate over the loops found, insert them into the loop tree
501 and assign basic-block ownership. */
502 for (i = 0; i < larray.length (); ++i)
9
Assuming the condition is true
10
Loop condition is true. Entering loop body
503 {
504 class loop *loop = larray[i];
505 basic_block header = loop->header;
506 edge_iterator ei;
507 edge e;
508
509 flow_loop_tree_node_add (header->loop_father, loop);
11
Calling 'flow_loop_tree_node_add'
510 loop->num_nodes = flow_loop_nodes_find (loop->header, loop);
511
512 /* Look for the latch for this header block, if it has just a
513 single one. */
514 FOR_EACH_EDGE (e, ei, header->preds)for ((ei) = ei_start_1 (&((header->preds))); ei_cond (
(ei), &(e)); ei_next (&(ei)))
515 {
516 basic_block latch = e->src;
517
518 if (flow_bb_inside_loop_p (loop, latch))
519 {
520 if (loop->latch != NULLnullptr)
521 {
522 /* More than one latch edge. */
523 loop->latch = NULLnullptr;
524 break;
525 }
526 loop->latch = latch;
527 }
528 }
529 }
530
531 return loops;
532}
533
534/* qsort helper for sort_sibling_loops. */
535
536static int *sort_sibling_loops_cmp_rpo;
537static int
538sort_sibling_loops_cmp (const void *la_, const void *lb_)
539{
540 const class loop *la = *(const class loop * const *)la_;
541 const class loop *lb = *(const class loop * const *)lb_;
542 return (sort_sibling_loops_cmp_rpo[la->header->index]
543 - sort_sibling_loops_cmp_rpo[lb->header->index]);
544}
545
546/* Sort sibling loops in RPO order. */
547
548void
549sort_sibling_loops (function *fn)
550{
551 /* Match flow_loops_find in the order we sort sibling loops. */
552 sort_sibling_loops_cmp_rpo = XNEWVEC (int, last_basic_block_for_fn (cfun))((int *) xmalloc (sizeof (int) * ((((cfun + 0))->cfg->x_last_basic_block
))))
;
553 int *rc_order = XNEWVEC (int, n_basic_blocks_for_fn (cfun))((int *) xmalloc (sizeof (int) * ((((cfun + 0))->cfg->x_n_basic_blocks
))))
;
554 pre_and_rev_post_order_compute_fn (fn, NULLnullptr, rc_order, false);
555 for (int i = 0; i < n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks) - NUM_FIXED_BLOCKS(2); ++i)
556 sort_sibling_loops_cmp_rpo[rc_order[i]] = i;
557 free (rc_order);
558
559 auto_vec<loop_p, 3> siblings;
560 for (auto loop : loops_list (fn, LI_INCLUDE_ROOT))
561 if (loop->inner && loop->inner->next)
562 {
563 loop_p sibling = loop->inner;
564 do
565 {
566 siblings.safe_push (sibling);
567 sibling = sibling->next;
568 }
569 while (sibling);
570 siblings.qsort (sort_sibling_loops_cmp)qsort (sort_sibling_loops_cmp);
571 loop_p *siblingp = &loop->inner;
572 for (unsigned i = 0; i < siblings.length (); ++i)
573 {
574 *siblingp = siblings[i];
575 siblingp = &(*siblingp)->next;
576 }
577 *siblingp = NULLnullptr;
578 siblings.truncate (0);
579 }
580
581 free (sort_sibling_loops_cmp_rpo);
582 sort_sibling_loops_cmp_rpo = NULLnullptr;
583}
584
585/* Ratio of frequencies of edges so that one of more latch edges is
586 considered to belong to inner loop with same header. */
587#define HEAVY_EDGE_RATIO8 8
588
589/* Minimum number of samples for that we apply
590 find_subloop_latch_edge_by_profile heuristics. */
591#define HEAVY_EDGE_MIN_SAMPLES10 10
592
593/* If the profile info is available, finds an edge in LATCHES that much more
594 frequent than the remaining edges. Returns such an edge, or NULL if we do
595 not find one.
596
597 We do not use guessed profile here, only the measured one. The guessed
598 profile is usually too flat and unreliable for this (and it is mostly based
599 on the loop structure of the program, so it does not make much sense to
600 derive the loop structure from it). */
601
602static edge
603find_subloop_latch_edge_by_profile (vec<edge> latches)
604{
605 unsigned i;
606 edge e, me = NULLnullptr;
607 profile_count mcount = profile_count::zero (), tcount = profile_count::zero ();
608
609 FOR_EACH_VEC_ELT (latches, i, e)for (i = 0; (latches).iterate ((i), &(e)); ++(i))
610 {
611 if (e->count ()> mcount)
612 {
613 me = e;
614 mcount = e->count();
615 }
616 tcount += e->count();
617 }
618
619 if (!tcount.initialized_p () || !(tcount.ipa () > HEAVY_EDGE_MIN_SAMPLES10)
620 || (tcount - mcount).apply_scale (HEAVY_EDGE_RATIO8, 1) > tcount)
621 return NULLnullptr;
622
623 if (dump_file)
624 fprintf (dump_file,
625 "Found latch edge %d -> %d using profile information.\n",
626 me->src->index, me->dest->index);
627 return me;
628}
629
630/* Among LATCHES, guesses a latch edge of LOOP corresponding to subloop, based
631 on the structure of induction variables. Returns this edge, or NULL if we
632 do not find any.
633
634 We are quite conservative, and look just for an obvious simple innermost
635 loop (which is the case where we would lose the most performance by not
636 disambiguating the loop). More precisely, we look for the following
637 situation: The source of the chosen latch edge dominates sources of all
638 the other latch edges. Additionally, the header does not contain a phi node
639 such that the argument from the chosen edge is equal to the argument from
640 another edge. */
641
642static edge
643find_subloop_latch_edge_by_ivs (class loop *loop ATTRIBUTE_UNUSED__attribute__ ((__unused__)), vec<edge> latches)
644{
645 edge e, latch = latches[0];
646 unsigned i;
647 gphi *phi;
648 gphi_iterator psi;
649 tree lop;
650 basic_block bb;
651
652 /* Find the candidate for the latch edge. */
653 for (i = 1; latches.iterate (i, &e); i++)
654 if (dominated_by_p (CDI_DOMINATORS, latch->src, e->src))
655 latch = e;
656
657 /* Verify that it dominates all the latch edges. */
658 FOR_EACH_VEC_ELT (latches, i, e)for (i = 0; (latches).iterate ((i), &(e)); ++(i))
659 if (!dominated_by_p (CDI_DOMINATORS, e->src, latch->src))
660 return NULLnullptr;
661
662 /* Check for a phi node that would deny that this is a latch edge of
663 a subloop. */
664 for (psi = gsi_start_phis (loop->header); !gsi_end_p (psi); gsi_next (&psi))
665 {
666 phi = psi.phi ();
667 lop = PHI_ARG_DEF_FROM_EDGE (phi, latch)gimple_phi_arg_def (((phi)), ((latch)->dest_idx));
668
669 /* Ignore the values that are not changed inside the subloop. */
670 if (TREE_CODE (lop)((enum tree_code) (lop)->base.code) != SSA_NAME
671 || SSA_NAME_DEF_STMT (lop)(tree_check ((lop), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 671, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
== phi)
672 continue;
673 bb = gimple_bb (SSA_NAME_DEF_STMT (lop)(tree_check ((lop), "/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 673, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
);
674 if (!bb || !flow_bb_inside_loop_p (loop, bb))
675 continue;
676
677 FOR_EACH_VEC_ELT (latches, i, e)for (i = 0; (latches).iterate ((i), &(e)); ++(i))
678 if (e != latch
679 && PHI_ARG_DEF_FROM_EDGE (phi, e)gimple_phi_arg_def (((phi)), ((e)->dest_idx)) == lop)
680 return NULLnullptr;
681 }
682
683 if (dump_file)
684 fprintf (dump_file,
685 "Found latch edge %d -> %d using iv structure.\n",
686 latch->src->index, latch->dest->index);
687 return latch;
688}
689
690/* If we can determine that one of the several latch edges of LOOP behaves
691 as a latch edge of a separate subloop, returns this edge. Otherwise
692 returns NULL. */
693
694static edge
695find_subloop_latch_edge (class loop *loop)
696{
697 vec<edge> latches = get_loop_latch_edges (loop);
698 edge latch = NULLnullptr;
699
700 if (latches.length () > 1)
701 {
702 latch = find_subloop_latch_edge_by_profile (latches);
703
704 if (!latch
705 /* We consider ivs to guess the latch edge only in SSA. Perhaps we
706 should use cfghook for this, but it is hard to imagine it would
707 be useful elsewhere. */
708 && current_ir_type () == IR_GIMPLE)
709 latch = find_subloop_latch_edge_by_ivs (loop, latches);
710 }
711
712 latches.release ();
713 return latch;
714}
715
716/* Callback for make_forwarder_block. Returns true if the edge E is marked
717 in the set MFB_REIS_SET. */
718
719static hash_set<edge> *mfb_reis_set;
720static bool
721mfb_redirect_edges_in_set (edge e)
722{
723 return mfb_reis_set->contains (e);
724}
725
726/* Creates a subloop of LOOP with latch edge LATCH. */
727
728static void
729form_subloop (class loop *loop, edge latch)
730{
731 edge_iterator ei;
732 edge e, new_entry;
733 class loop *new_loop;
734
735 mfb_reis_set = new hash_set<edge>;
736 FOR_EACH_EDGE (e, ei, loop->header->preds)for ((ei) = ei_start_1 (&((loop->header->preds))); ei_cond
((ei), &(e)); ei_next (&(ei)))
737 {
738 if (e != latch)
739 mfb_reis_set->add (e);
740 }
741 new_entry = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
742 NULLnullptr);
743 delete mfb_reis_set;
744
745 loop->header = new_entry->src;
746
747 /* Find the blocks and subloops that belong to the new loop, and add it to
748 the appropriate place in the loop tree. */
749 new_loop = alloc_loop ();
750 new_loop->header = new_entry->dest;
751 new_loop->latch = latch->src;
752 add_loop (new_loop, loop);
753}
754
755/* Make all the latch edges of LOOP to go to a single forwarder block --
756 a new latch of LOOP. */
757
758static void
759merge_latch_edges (class loop *loop)
760{
761 vec<edge> latches = get_loop_latch_edges (loop);
762 edge latch, e;
763 unsigned i;
764
765 gcc_assert (latches.length () > 0)((void)(!(latches.length () > 0) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 765, __FUNCTION__), 0 : 0))
;
766
767 if (latches.length () == 1)
768 loop->latch = latches[0]->src;
769 else
770 {
771 if (dump_file)
772 fprintf (dump_file, "Merged latch edges of loop %d\n", loop->num);
773
774 mfb_reis_set = new hash_set<edge>;
775 FOR_EACH_VEC_ELT (latches, i, e)for (i = 0; (latches).iterate ((i), &(e)); ++(i))
776 mfb_reis_set->add (e);
777 latch = make_forwarder_block (loop->header, mfb_redirect_edges_in_set,
778 NULLnullptr);
779 delete mfb_reis_set;
780
781 loop->header = latch->dest;
782 loop->latch = latch->src;
783 }
784
785 latches.release ();
786}
787
788/* LOOP may have several latch edges. Transform it into (possibly several)
789 loops with single latch edge. */
790
791static void
792disambiguate_multiple_latches (class loop *loop)
793{
794 edge e;
795
796 /* We eliminate the multiple latches by splitting the header to the forwarder
797 block F and the rest R, and redirecting the edges. There are two cases:
798
799 1) If there is a latch edge E that corresponds to a subloop (we guess
800 that based on profile -- if it is taken much more often than the
801 remaining edges; and on trees, using the information about induction
802 variables of the loops), we redirect E to R, all the remaining edges to
803 F, then rescan the loops and try again for the outer loop.
804 2) If there is no such edge, we redirect all latch edges to F, and the
805 entry edges to R, thus making F the single latch of the loop. */
806
807 if (dump_file)
808 fprintf (dump_file, "Disambiguating loop %d with multiple latches\n",
809 loop->num);
810
811 /* During latch merging, we may need to redirect the entry edges to a new
812 block. This would cause problems if the entry edge was the one from the
813 entry block. To avoid having to handle this case specially, split
814 such entry edge. */
815 e = find_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr), loop->header);
816 if (e)
817 split_edge (e);
818
819 while (1)
820 {
821 e = find_subloop_latch_edge (loop);
822 if (!e)
823 break;
824
825 form_subloop (loop, e);
826 }
827
828 merge_latch_edges (loop);
829}
830
831/* Split loops with multiple latch edges. */
832
833void
834disambiguate_loops_with_multiple_latches (void)
835{
836 for (auto loop : loops_list (cfun(cfun + 0), 0))
837 {
838 if (!loop->latch)
839 disambiguate_multiple_latches (loop);
840 }
841}
842
843/* Return nonzero if basic block BB belongs to LOOP. */
844bool
845flow_bb_inside_loop_p (const class loop *loop, const_basic_block bb)
846{
847 class loop *source_loop;
848
849 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)
850 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr))
851 return 0;
852
853 source_loop = bb->loop_father;
854 return loop == source_loop || flow_loop_nested_p (loop, source_loop);
855}
856
857/* Enumeration predicate for get_loop_body_with_size. */
858static bool
859glb_enum_p (const_basic_block bb, const void *glb_loop)
860{
861 const class loop *const loop = (const class loop *) glb_loop;
862 return (bb != loop->header
863 && dominated_by_p (CDI_DOMINATORS, bb, loop->header));
864}
865
866/* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
867 order against direction of edges from latch. Specially, if
868 header != latch, latch is the 1-st block. LOOP cannot be the fake
869 loop tree root, and its size must be at most MAX_SIZE. The blocks
870 in the LOOP body are stored to BODY, and the size of the LOOP is
871 returned. */
872
873unsigned
874get_loop_body_with_size (const class loop *loop, basic_block *body,
875 unsigned max_size)
876{
877 return dfs_enumerate_from (loop->header, 1, glb_enum_p,
878 body, max_size, loop);
879}
880
881/* Gets basic blocks of a LOOP. Header is the 0-th block, rest is in dfs
882 order against direction of edges from latch. Specially, if
883 header != latch, latch is the 1-st block. */
884
885basic_block *
886get_loop_body (const class loop *loop)
887{
888 basic_block *body, bb;
889 unsigned tv = 0;
890
891 gcc_assert (loop->num_nodes)((void)(!(loop->num_nodes) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 891, __FUNCTION__), 0 : 0))
;
892
893 body = XNEWVEC (basic_block, loop->num_nodes)((basic_block *) xmalloc (sizeof (basic_block) * (loop->num_nodes
)))
;
894
895 if (loop->latch == EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr))
896 {
897 /* There may be blocks unreachable from EXIT_BLOCK, hence we need to
898 special-case the fake loop that contains the whole function. */
899 gcc_assert (loop->num_nodes == (unsigned) n_basic_blocks_for_fn (cfun))((void)(!(loop->num_nodes == (unsigned) (((cfun + 0))->
cfg->x_n_basic_blocks)) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 899, __FUNCTION__), 0 : 0))
;
900 body[tv++] = loop->header;
901 body[tv++] = EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr);
902 FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb
; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb->
next_bb)
903 body[tv++] = bb;
904 }
905 else
906 tv = get_loop_body_with_size (loop, body, loop->num_nodes);
907
908 gcc_assert (tv == loop->num_nodes)((void)(!(tv == loop->num_nodes) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 908, __FUNCTION__), 0 : 0))
;
909 return body;
910}
911
912/* Fills dominance descendants inside LOOP of the basic block BB into
913 array TOVISIT from index *TV. */
914
915static void
916fill_sons_in_loop (const class loop *loop, basic_block bb,
917 basic_block *tovisit, int *tv)
918{
919 basic_block son, postpone = NULLnullptr;
920
921 tovisit[(*tv)++] = bb;
922 for (son = first_dom_son (CDI_DOMINATORS, bb);
923 son;
924 son = next_dom_son (CDI_DOMINATORS, son))
925 {
926 if (!flow_bb_inside_loop_p (loop, son))
927 continue;
928
929 if (dominated_by_p (CDI_DOMINATORS, loop->latch, son))
930 {
931 postpone = son;
932 continue;
933 }
934 fill_sons_in_loop (loop, son, tovisit, tv);
935 }
936
937 if (postpone)
938 fill_sons_in_loop (loop, postpone, tovisit, tv);
939}
940
941/* Gets body of a LOOP (that must be different from the outermost loop)
942 sorted by dominance relation. Additionally, if a basic block s dominates
943 the latch, then only blocks dominated by s are be after it. */
944
945basic_block *
946get_loop_body_in_dom_order (const class loop *loop)
947{
948 basic_block *tovisit;
949 int tv;
950
951 gcc_assert (loop->num_nodes)((void)(!(loop->num_nodes) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 951, __FUNCTION__), 0 : 0))
;
952
953 tovisit = XNEWVEC (basic_block, loop->num_nodes)((basic_block *) xmalloc (sizeof (basic_block) * (loop->num_nodes
)))
;
954
955 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun))((void)(!(loop->latch != (((cfun + 0))->cfg->x_exit_block_ptr
)) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 955, __FUNCTION__), 0 : 0))
;
956
957 tv = 0;
958 fill_sons_in_loop (loop, loop->header, tovisit, &tv);
959
960 gcc_assert (tv == (int) loop->num_nodes)((void)(!(tv == (int) loop->num_nodes) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 960, __FUNCTION__), 0 : 0))
;
961
962 return tovisit;
963}
964
965/* Gets body of a LOOP sorted via provided BB_COMPARATOR. */
966
967basic_block *
968get_loop_body_in_custom_order (const class loop *loop,
969 int (*bb_comparator) (const void *, const void *))
970{
971 basic_block *bbs = get_loop_body (loop);
972
973 qsort (bbs, loop->num_nodes, sizeof (basic_block), bb_comparator)gcc_qsort (bbs, loop->num_nodes, sizeof (basic_block), bb_comparator
)
;
974
975 return bbs;
976}
977
978/* Same as above, but use gcc_sort_r instead of qsort. */
979
980basic_block *
981get_loop_body_in_custom_order (const class loop *loop, void *data,
982 int (*bb_comparator) (const void *, const void *, void *))
983{
984 basic_block *bbs = get_loop_body (loop);
985
986 gcc_sort_r (bbs, loop->num_nodes, sizeof (basic_block), bb_comparator, data);
987
988 return bbs;
989}
990
991/* Get body of a LOOP in breadth first sort order. */
992
993basic_block *
994get_loop_body_in_bfs_order (const class loop *loop)
995{
996 basic_block *blocks;
997 basic_block bb;
998 unsigned int i = 1;
999 unsigned int vc = 0;
1000
1001 gcc_assert (loop->num_nodes)((void)(!(loop->num_nodes) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 1001, __FUNCTION__), 0 : 0))
;
1002 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun))((void)(!(loop->latch != (((cfun + 0))->cfg->x_exit_block_ptr
)) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 1002, __FUNCTION__), 0 : 0))
;
1003
1004 blocks = XNEWVEC (basic_block, loop->num_nodes)((basic_block *) xmalloc (sizeof (basic_block) * (loop->num_nodes
)))
;
1005 auto_bitmap visited;
1006 blocks[0] = loop->header;
1007 bitmap_set_bit (visited, loop->header->index);
1008 while (i < loop->num_nodes)
1009 {
1010 edge e;
1011 edge_iterator ei;
1012 gcc_assert (i > vc)((void)(!(i > vc) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 1012, __FUNCTION__), 0 : 0))
;
1013 bb = blocks[vc++];
1014
1015 FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei)
, &(e)); ei_next (&(ei)))
1016 {
1017 if (flow_bb_inside_loop_p (loop, e->dest))
1018 {
1019 /* This bb is now visited. */
1020 if (bitmap_set_bit (visited, e->dest->index))
1021 blocks[i++] = e->dest;
1022 }
1023 }
1024 }
1025
1026 return blocks;
1027}
1028
1029/* Hash function for struct loop_exit. */
1030
1031hashval_t
1032loop_exit_hasher::hash (loop_exit *exit)
1033{
1034 return htab_hash_pointer (exit->e);
1035}
1036
1037/* Equality function for struct loop_exit. Compares with edge. */
1038
1039bool
1040loop_exit_hasher::equal (loop_exit *exit, edge e)
1041{
1042 return exit->e == e;
1043}
1044
1045/* Frees the list of loop exit descriptions EX. */
1046
1047void
1048loop_exit_hasher::remove (loop_exit *exit)
1049{
1050 loop_exit *next;
1051 for (; exit; exit = next)
1052 {
1053 next = exit->next_e;
1054
1055 exit->next->prev = exit->prev;
1056 exit->prev->next = exit->next;
1057
1058 ggc_free (exit);
1059 }
1060}
1061
1062/* Returns the list of records for E as an exit of a loop. */
1063
1064static struct loop_exit *
1065get_exit_descriptions (edge e)
1066{
1067 return current_loops((cfun + 0)->x_current_loops)->exits->find_with_hash (e, htab_hash_pointer (e));
1068}
1069
1070/* Updates the lists of loop exits in that E appears.
1071 If REMOVED is true, E is being removed, and we
1072 just remove it from the lists of exits.
1073 If NEW_EDGE is true and E is not a loop exit, we
1074 do not try to remove it from loop exit lists. */
1075
1076void
1077rescan_loop_exit (edge e, bool new_edge, bool removed)
1078{
1079 struct loop_exit *exits = NULLnullptr, *exit;
1080 class loop *aloop, *cloop;
1081
1082 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1083 return;
1084
1085 if (!removed
1086 && e->src->loop_father != NULLnullptr
1087 && e->dest->loop_father != NULLnullptr
1088 && !flow_bb_inside_loop_p (e->src->loop_father, e->dest))
1089 {
1090 cloop = find_common_loop (e->src->loop_father, e->dest->loop_father);
1091 for (aloop = e->src->loop_father;
1092 aloop != cloop;
1093 aloop = loop_outer (aloop))
1094 {
1095 exit = ggc_alloc<loop_exit> ();
1096 exit->e = e;
1097
1098 exit->next = aloop->exits->next;
1099 exit->prev = aloop->exits;
1100 exit->next->prev = exit;
1101 exit->prev->next = exit;
1102
1103 exit->next_e = exits;
1104 exits = exit;
1105 }
1106 }
1107
1108 if (!exits && new_edge)
1109 return;
1110
1111 loop_exit **slot
1112 = current_loops((cfun + 0)->x_current_loops)->exits->find_slot_with_hash (e, htab_hash_pointer (e),
1113 exits ? INSERT : NO_INSERT);
1114 if (!slot)
1115 return;
1116
1117 if (exits)
1118 {
1119 if (*slot)
1120 loop_exit_hasher::remove (*slot);
1121 *slot = exits;
1122 }
1123 else
1124 current_loops((cfun + 0)->x_current_loops)->exits->clear_slot (slot);
1125}
1126
1127/* For each loop, record list of exit edges, and start maintaining these
1128 lists. */
1129
1130void
1131record_loop_exits (void)
1132{
1133 basic_block bb;
1134 edge_iterator ei;
1135 edge e;
1136
1137 if (!current_loops((cfun + 0)->x_current_loops))
1138 return;
1139
1140 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1141 return;
1142 loops_state_set (LOOPS_HAVE_RECORDED_EXITS);
1143
1144 gcc_assert (current_loops->exits == NULL)((void)(!(((cfun + 0)->x_current_loops)->exits == nullptr
) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 1144, __FUNCTION__), 0 : 0))
;
1145 current_loops((cfun + 0)->x_current_loops)->exits
1146 = hash_table<loop_exit_hasher>::create_ggc (2 * number_of_loops (cfun(cfun + 0)));
1147
1148 FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb
; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb->
next_bb)
1149 {
1150 FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei)
, &(e)); ei_next (&(ei)))
1151 {
1152 rescan_loop_exit (e, true, false);
1153 }
1154 }
1155}
1156
1157/* Dumps information about the exit in *SLOT to FILE.
1158 Callback for htab_traverse. */
1159
1160int
1161dump_recorded_exit (loop_exit **slot, FILE *file)
1162{
1163 struct loop_exit *exit = *slot;
1164 unsigned n = 0;
1165 edge e = exit->e;
1166
1167 for (; exit != NULLnullptr; exit = exit->next_e)
1168 n++;
1169
1170 fprintf (file, "Edge %d->%d exits %u loops\n",
1171 e->src->index, e->dest->index, n);
1172
1173 return 1;
1174}
1175
1176/* Dumps the recorded exits of loops to FILE. */
1177
1178extern void dump_recorded_exits (FILE *);
1179void
1180dump_recorded_exits (FILE *file)
1181{
1182 if (!current_loops((cfun + 0)->x_current_loops)->exits)
1183 return;
1184 current_loops((cfun + 0)->x_current_loops)->exits->traverse<FILE *, dump_recorded_exit> (file);
1185}
1186
1187/* Releases lists of loop exits. */
1188
1189void
1190release_recorded_exits (function *fn)
1191{
1192 gcc_assert (loops_state_satisfies_p (fn, LOOPS_HAVE_RECORDED_EXITS))((void)(!(loops_state_satisfies_p (fn, LOOPS_HAVE_RECORDED_EXITS
)) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 1192, __FUNCTION__), 0 : 0))
;
1193 loops_for_fn (fn)->exits->empty ();
1194 loops_for_fn (fn)->exits = NULLnullptr;
1195 loops_state_clear (fn, LOOPS_HAVE_RECORDED_EXITS);
1196}
1197
1198/* Returns the list of the exit edges of a LOOP. */
1199
1200auto_vec<edge>
1201get_loop_exit_edges (const class loop *loop, basic_block *body)
1202{
1203 auto_vec<edge> edges;
1204 edge e;
1205 unsigned i;
1206 edge_iterator ei;
1207 struct loop_exit *exit;
1208
1209 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun))((void)(!(loop->latch != (((cfun + 0))->cfg->x_exit_block_ptr
)) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 1209, __FUNCTION__), 0 : 0))
;
1210
1211 /* If we maintain the lists of exits, use them. Otherwise we must
1212 scan the body of the loop. */
1213 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1214 {
1215 for (exit = loop->exits->next; exit->e; exit = exit->next)
1216 edges.safe_push (exit->e);
1217 }
1218 else
1219 {
1220 bool body_from_caller = true;
1221 if (!body)
1222 {
1223 body = get_loop_body (loop);
1224 body_from_caller = false;
1225 }
1226 for (i = 0; i < loop->num_nodes; i++)
1227 FOR_EACH_EDGE (e, ei, body[i]->succs)for ((ei) = ei_start_1 (&((body[i]->succs))); ei_cond (
(ei), &(e)); ei_next (&(ei)))
1228 {
1229 if (!flow_bb_inside_loop_p (loop, e->dest))
1230 edges.safe_push (e);
1231 }
1232 if (!body_from_caller)
1233 free (body);
1234 }
1235
1236 return edges;
1237}
1238
1239/* Counts the number of conditional branches inside LOOP. */
1240
1241unsigned
1242num_loop_branches (const class loop *loop)
1243{
1244 unsigned i, n;
1245 basic_block * body;
1246
1247 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun))((void)(!(loop->latch != (((cfun + 0))->cfg->x_exit_block_ptr
)) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 1247, __FUNCTION__), 0 : 0))
;
1248
1249 body = get_loop_body (loop);
1250 n = 0;
1251 for (i = 0; i < loop->num_nodes; i++)
1252 if (EDGE_COUNT (body[i]->succs)vec_safe_length (body[i]->succs) >= 2)
1253 n++;
1254 free (body);
1255
1256 return n;
1257}
1258
1259/* Adds basic block BB to LOOP. */
1260void
1261add_bb_to_loop (basic_block bb, class loop *loop)
1262{
1263 unsigned i;
1264 loop_p ploop;
1265 edge_iterator ei;
1266 edge e;
1267
1268 gcc_assert (bb->loop_father == NULL)((void)(!(bb->loop_father == nullptr) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 1268, __FUNCTION__), 0 : 0))
;
1269 bb->loop_father = loop;
1270 loop->num_nodes++;
1271 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)for (i = 0; vec_safe_iterate ((loop->superloops), (i), &
(ploop)); ++(i))
1272 ploop->num_nodes++;
1273
1274 FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei)
, &(e)); ei_next (&(ei)))
1275 {
1276 rescan_loop_exit (e, true, false);
1277 }
1278 FOR_EACH_EDGE (e, ei, bb->preds)for ((ei) = ei_start_1 (&((bb->preds))); ei_cond ((ei)
, &(e)); ei_next (&(ei)))
1279 {
1280 rescan_loop_exit (e, true, false);
1281 }
1282}
1283
1284/* Remove basic block BB from loops. */
1285void
1286remove_bb_from_loops (basic_block bb)
1287{
1288 unsigned i;
1289 class loop *loop = bb->loop_father;
1290 loop_p ploop;
1291 edge_iterator ei;
1292 edge e;
1293
1294 gcc_assert (loop != NULL)((void)(!(loop != nullptr) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 1294, __FUNCTION__), 0 : 0))
;
1295 loop->num_nodes--;
1296 FOR_EACH_VEC_SAFE_ELT (loop->superloops, i, ploop)for (i = 0; vec_safe_iterate ((loop->superloops), (i), &
(ploop)); ++(i))
1297 ploop->num_nodes--;
1298 bb->loop_father = NULLnullptr;
1299
1300 FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei)
, &(e)); ei_next (&(ei)))
1301 {
1302 rescan_loop_exit (e, false, true);
1303 }
1304 FOR_EACH_EDGE (e, ei, bb->preds)for ((ei) = ei_start_1 (&((bb->preds))); ei_cond ((ei)
, &(e)); ei_next (&(ei)))
1305 {
1306 rescan_loop_exit (e, false, true);
1307 }
1308}
1309
1310/* Finds nearest common ancestor in loop tree for given loops. */
1311class loop *
1312find_common_loop (class loop *loop_s, class loop *loop_d)
1313{
1314 unsigned sdepth, ddepth;
1315
1316 if (!loop_s) return loop_d;
1317 if (!loop_d) return loop_s;
1318
1319 sdepth = loop_depth (loop_s);
1320 ddepth = loop_depth (loop_d);
1321
1322 if (sdepth < ddepth)
1323 loop_d = (*loop_d->superloops)[sdepth];
1324 else if (sdepth > ddepth)
1325 loop_s = (*loop_s->superloops)[ddepth];
1326
1327 while (loop_s != loop_d)
1328 {
1329 loop_s = loop_outer (loop_s);
1330 loop_d = loop_outer (loop_d);
1331 }
1332 return loop_s;
1333}
1334
1335/* Removes LOOP from structures and frees its data. */
1336
1337void
1338delete_loop (class loop *loop)
1339{
1340 /* Remove the loop from structure. */
1341 flow_loop_tree_node_remove (loop);
1342
1343 /* Remove loop from loops array. */
1344 (*current_loops((cfun + 0)->x_current_loops)->larray)[loop->num] = NULLnullptr;
1345
1346 /* Free loop data. */
1347 flow_loop_free (loop);
1348}
1349
1350/* Cancels the LOOP; it must be innermost one. */
1351
1352static void
1353cancel_loop (class loop *loop)
1354{
1355 basic_block *bbs;
1356 unsigned i;
1357 class loop *outer = loop_outer (loop);
1358
1359 gcc_assert (!loop->inner)((void)(!(!loop->inner) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 1359, __FUNCTION__), 0 : 0))
;
1360
1361 /* Move blocks up one level (they should be removed as soon as possible). */
1362 bbs = get_loop_body (loop);
1363 for (i = 0; i < loop->num_nodes; i++)
1364 bbs[i]->loop_father = outer;
1365
1366 free (bbs);
1367 delete_loop (loop);
1368}
1369
1370/* Cancels LOOP and all its subloops. */
1371void
1372cancel_loop_tree (class loop *loop)
1373{
1374 while (loop->inner)
1375 cancel_loop_tree (loop->inner);
1376 cancel_loop (loop);
1377}
1378
1379/* Disable warnings about missing quoting in GCC diagnostics for
1380 the verification errors. Their format strings don't follow GCC
1381 diagnostic conventions and the calls are ultimately followed by
1382 a deliberate ICE triggered by a failed assertion. */
1383#if __GNUC__4 >= 10
1384# pragma GCC diagnostic push
1385# pragma GCC diagnostic ignored "-Wformat-diag"
1386#endif
1387
1388/* Checks that information about loops is correct
1389 -- sizes of loops are all right
1390 -- results of get_loop_body really belong to the loop
1391 -- loop header have just single entry edge and single latch edge
1392 -- loop latches have only single successor that is header of their loop
1393 -- irreducible loops are correctly marked
1394 -- the cached loop depth and loop father of each bb is correct
1395 */
1396DEBUG_FUNCTION__attribute__ ((__used__)) void
1397verify_loop_structure (void)
1398{
1399 unsigned *sizes, i, j;
1400 basic_block bb, *bbs;
1401 class loop *loop;
1402 int err = 0;
1403 edge e;
1404 unsigned num = number_of_loops (cfun(cfun + 0));
1405 struct loop_exit *exit, *mexit;
1406 bool dom_available = dom_info_available_p (CDI_DOMINATORS);
1407
1408 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1409 {
1410 error ("loop verification on loop tree that needs fixup");
1411 err = 1;
1412 }
1413
1414 /* We need up-to-date dominators, compute or verify them. */
1415 if (!dom_available)
1416 calculate_dominance_info (CDI_DOMINATORS);
1417 else
1418 verify_dominators (CDI_DOMINATORS);
1419
1420 /* Check the loop tree root. */
1421 if (current_loops((cfun + 0)->x_current_loops)->tree_root->header != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)
1422 || current_loops((cfun + 0)->x_current_loops)->tree_root->latch != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)
1423 || (current_loops((cfun + 0)->x_current_loops)->tree_root->num_nodes
1424 != (unsigned) n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks)))
1425 {
1426 error ("corrupt loop tree root");
1427 err = 1;
1428 }
1429
1430 /* Check the headers. */
1431 FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb
; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb->
next_bb)
1432 if (bb_loop_header_p (bb))
1433 {
1434 if (bb->loop_father->header == NULLnullptr)
1435 {
1436 error ("loop with header %d marked for removal", bb->index);
1437 err = 1;
1438 }
1439 else if (bb->loop_father->header != bb)
1440 {
1441 error ("loop with header %d not in loop tree", bb->index);
1442 err = 1;
1443 }
1444 }
1445 else if (bb->loop_father->header == bb)
1446 {
1447 error ("non-loop with header %d not marked for removal", bb->index);
1448 err = 1;
1449 }
1450
1451 /* Check the recorded loop father and sizes of loops. */
1452 auto_sbitmap visited (last_basic_block_for_fn (cfun)(((cfun + 0))->cfg->x_last_basic_block));
1453 bitmap_clear (visited);
1454 bbs = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun))((basic_block *) xmalloc (sizeof (basic_block) * ((((cfun + 0
))->cfg->x_n_basic_blocks))))
;
1455 for (auto loop : loops_list (cfun(cfun + 0), LI_FROM_INNERMOST))
1456 {
1457 unsigned n;
1458
1459 if (loop->header == NULLnullptr)
1460 {
1461 error ("removed loop %d in loop tree", loop->num);
1462 err = 1;
1463 continue;
1464 }
1465
1466 n = get_loop_body_with_size (loop, bbs, n_basic_blocks_for_fn (cfun)(((cfun + 0))->cfg->x_n_basic_blocks));
1467 if (loop->num_nodes != n)
1468 {
1469 error ("size of loop %d should be %d, not %d",
1470 loop->num, n, loop->num_nodes);
1471 err = 1;
1472 }
1473
1474 for (j = 0; j < n; j++)
1475 {
1476 bb = bbs[j];
1477
1478 if (!flow_bb_inside_loop_p (loop, bb))
1479 {
1480 error ("bb %d does not belong to loop %d",
1481 bb->index, loop->num);
1482 err = 1;
1483 }
1484
1485 /* Ignore this block if it is in an inner loop. */
1486 if (bitmap_bit_p (visited, bb->index))
1487 continue;
1488 bitmap_set_bit (visited, bb->index);
1489
1490 if (bb->loop_father != loop)
1491 {
1492 error ("bb %d has father loop %d, should be loop %d",
1493 bb->index, bb->loop_father->num, loop->num);
1494 err = 1;
1495 }
1496 }
1497 }
1498 free (bbs);
1499
1500 /* Check headers and latches. */
1501 for (auto loop : loops_list (cfun(cfun + 0), 0))
1502 {
1503 i = loop->num;
1504 if (loop->header == NULLnullptr)
1505 continue;
1506 if (!bb_loop_header_p (loop->header))
1507 {
1508 error ("loop %d%'s header is not a loop header", i);
1509 err = 1;
1510 }
1511 if (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)
1512 && EDGE_COUNT (loop->header->preds)vec_safe_length (loop->header->preds) != 2)
1513 {
1514 error ("loop %d%'s header does not have exactly 2 entries", i);
1515 err = 1;
1516 }
1517 if (loop->latch)
1518 {
1519 if (!find_edge (loop->latch, loop->header))
1520 {
1521 error ("loop %d%'s latch does not have an edge to its header", i);
1522 err = 1;
1523 }
1524 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, loop->header))
1525 {
1526 error ("loop %d%'s latch is not dominated by its header", i);
1527 err = 1;
1528 }
1529 }
1530 if (loops_state_satisfies_p (LOOPS_HAVE_SIMPLE_LATCHES))
1531 {
1532 if (!single_succ_p (loop->latch))
1533 {
1534 error ("loop %d%'s latch does not have exactly 1 successor", i);
1535 err = 1;
1536 }
1537 if (single_succ (loop->latch) != loop->header)
1538 {
1539 error ("loop %d%'s latch does not have header as successor", i);
1540 err = 1;
1541 }
1542 if (loop->latch->loop_father != loop)
1543 {
1544 error ("loop %d%'s latch does not belong directly to it", i);
1545 err = 1;
1546 }
1547 }
1548 if (loop->header->loop_father != loop)
1549 {
1550 error ("loop %d%'s header does not belong directly to it", i);
1551 err = 1;
1552 }
1553 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1554 {
1555 edge_iterator ei;
1556 FOR_EACH_EDGE (e, ei, loop->header->preds)for ((ei) = ei_start_1 (&((loop->header->preds))); ei_cond
((ei), &(e)); ei_next (&(ei)))
1557 if (dominated_by_p (CDI_DOMINATORS, e->src, loop->header)
1558 && e->flags & EDGE_IRREDUCIBLE_LOOP)
1559 {
1560 error ("loop %d%'s latch is marked as part of irreducible"
1561 " region", i);
1562 err = 1;
1563 }
1564 }
1565 }
1566
1567 /* Check irreducible loops. */
1568 if (loops_state_satisfies_p (LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS))
1569 {
1570 auto_edge_flag saved_edge_irr (cfun(cfun + 0));
1571 auto_bb_flag saved_bb_irr (cfun(cfun + 0));
1572 /* Save old info. */
1573 FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb
; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb->
next_bb)
1574 {
1575 edge_iterator ei;
1576 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1577 bb->flags |= saved_bb_irr;
1578 FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei)
, &(e)); ei_next (&(ei)))
1579 if (e->flags & EDGE_IRREDUCIBLE_LOOP)
1580 e->flags |= saved_edge_irr;
1581 }
1582
1583 /* Recount it. */
1584 mark_irreducible_loops ();
1585
1586 /* Compare. */
1587 FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb
; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb->
next_bb)
1588 {
1589 edge_iterator ei;
1590
1591 if ((bb->flags & BB_IRREDUCIBLE_LOOP)
1592 && !(bb->flags & saved_bb_irr))
1593 {
1594 error ("basic block %d should be marked irreducible", bb->index);
1595 err = 1;
1596 }
1597 else if (!(bb->flags & BB_IRREDUCIBLE_LOOP)
1598 && (bb->flags & saved_bb_irr))
1599 {
1600 error ("basic block %d should not be marked irreducible", bb->index);
1601 err = 1;
1602 }
1603 bb->flags &= ~saved_bb_irr;
1604 FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei)
, &(e)); ei_next (&(ei)))
1605 {
1606 if ((e->flags & EDGE_IRREDUCIBLE_LOOP)
1607 && !(e->flags & saved_edge_irr))
1608 {
1609 error ("edge from %d to %d should be marked irreducible",
1610 e->src->index, e->dest->index);
1611 err = 1;
1612 }
1613 else if (!(e->flags & EDGE_IRREDUCIBLE_LOOP)
1614 && (e->flags & saved_edge_irr))
1615 {
1616 error ("edge from %d to %d should not be marked irreducible",
1617 e->src->index, e->dest->index);
1618 err = 1;
1619 }
1620 e->flags &= ~saved_edge_irr;
1621 }
1622 }
1623 }
1624
1625 /* Check the recorded loop exits. */
1626 for (auto loop : loops_list (cfun(cfun + 0), 0))
1627 {
1628 if (!loop->exits || loop->exits->e != NULLnullptr)
1629 {
1630 error ("corrupted head of the exits list of loop %d",
1631 loop->num);
1632 err = 1;
1633 }
1634 else
1635 {
1636 /* Check that the list forms a cycle, and all elements except
1637 for the head are nonnull. */
1638 for (mexit = loop->exits, exit = mexit->next, i = 0;
1639 exit->e && exit != mexit;
1640 exit = exit->next)
1641 {
1642 if (i++ & 1)
1643 mexit = mexit->next;
1644 }
1645
1646 if (exit != loop->exits)
1647 {
1648 error ("corrupted exits list of loop %d", loop->num);
1649 err = 1;
1650 }
1651 }
1652
1653 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1654 {
1655 if (loop->exits->next != loop->exits)
1656 {
1657 error ("nonempty exits list of loop %d, but exits are not recorded",
1658 loop->num);
1659 err = 1;
1660 }
1661 }
1662 }
1663
1664 if (loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1665 {
1666 unsigned n_exits = 0, eloops;
1667
1668 sizes = XCNEWVEC (unsigned, num)((unsigned *) xcalloc ((num), sizeof (unsigned)));
1669 memset (sizes, 0, sizeof (unsigned) * num);
1670 FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb
; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb->
next_bb)
1671 {
1672 edge_iterator ei;
1673 if (bb->loop_father == current_loops((cfun + 0)->x_current_loops)->tree_root)
1674 continue;
1675 FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei)
, &(e)); ei_next (&(ei)))
1676 {
1677 if (flow_bb_inside_loop_p (bb->loop_father, e->dest))
1678 continue;
1679
1680 n_exits++;
1681 exit = get_exit_descriptions (e);
1682 if (!exit)
1683 {
1684 error ("exit %d->%d not recorded",
1685 e->src->index, e->dest->index);
1686 err = 1;
1687 }
1688 eloops = 0;
1689 for (; exit; exit = exit->next_e)
1690 eloops++;
1691
1692 for (loop = bb->loop_father;
1693 loop != e->dest->loop_father
1694 /* When a loop exit is also an entry edge which
1695 can happen when avoiding CFG manipulations
1696 then the last loop exited is the outer loop
1697 of the loop entered. */
1698 && loop != loop_outer (e->dest->loop_father);
1699 loop = loop_outer (loop))
1700 {
1701 eloops--;
1702 sizes[loop->num]++;
1703 }
1704
1705 if (eloops != 0)
1706 {
1707 error ("wrong list of exited loops for edge %d->%d",
1708 e->src->index, e->dest->index);
1709 err = 1;
1710 }
1711 }
1712 }
1713
1714 if (n_exits != current_loops((cfun + 0)->x_current_loops)->exits->elements ())
1715 {
1716 error ("too many loop exits recorded");
1717 err = 1;
1718 }
1719
1720 for (auto loop : loops_list (cfun(cfun + 0), 0))
1721 {
1722 eloops = 0;
1723 for (exit = loop->exits->next; exit->e; exit = exit->next)
1724 eloops++;
1725 if (eloops != sizes[loop->num])
1726 {
1727 error ("%d exits recorded for loop %d (having %d exits)",
1728 eloops, loop->num, sizes[loop->num]);
1729 err = 1;
1730 }
1731 }
1732
1733 free (sizes);
1734 }
1735
1736 gcc_assert (!err)((void)(!(!err) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 1736, __FUNCTION__), 0 : 0))
;
1737
1738 if (!dom_available)
1739 free_dominance_info (CDI_DOMINATORS);
1740}
1741
1742#if __GNUC__4 >= 10
1743# pragma GCC diagnostic pop
1744#endif
1745
1746/* Returns latch edge of LOOP. */
1747edge
1748loop_latch_edge (const class loop *loop)
1749{
1750 return find_edge (loop->latch, loop->header);
1751}
1752
1753/* Returns preheader edge of LOOP. */
1754edge
1755loop_preheader_edge (const class loop *loop)
1756{
1757 edge e;
1758 edge_iterator ei;
1759
1760 gcc_assert (loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS)((void)(!(loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS) &&
! loops_state_satisfies_p (LOOPS_MAY_HAVE_MULTIPLE_LATCHES))
? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 1761, __FUNCTION__), 0 : 0))
1761 && ! loops_state_satisfies_p (LOOPS_MAY_HAVE_MULTIPLE_LATCHES))((void)(!(loops_state_satisfies_p (LOOPS_HAVE_PREHEADERS) &&
! loops_state_satisfies_p (LOOPS_MAY_HAVE_MULTIPLE_LATCHES))
? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 1761, __FUNCTION__), 0 : 0))
;
1762
1763 FOR_EACH_EDGE (e, ei, loop->header->preds)for ((ei) = ei_start_1 (&((loop->header->preds))); ei_cond
((ei), &(e)); ei_next (&(ei)))
1764 if (e->src != loop->latch)
1765 break;
1766
1767 if (! e)
1768 {
1769 gcc_assert (! loop_outer (loop))((void)(!(! loop_outer (loop)) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 1769, __FUNCTION__), 0 : 0))
;
1770 return single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr));
1771 }
1772
1773 return e;
1774}
1775
1776/* Returns true if E is an exit of LOOP. */
1777
1778bool
1779loop_exit_edge_p (const class loop *loop, const_edge e)
1780{
1781 return (flow_bb_inside_loop_p (loop, e->src)
1782 && !flow_bb_inside_loop_p (loop, e->dest));
1783}
1784
1785/* Returns the single exit edge of LOOP, or NULL if LOOP has either no exit
1786 or more than one exit. If loops do not have the exits recorded, NULL
1787 is returned always. */
1788
1789edge
1790single_exit (const class loop *loop)
1791{
1792 struct loop_exit *exit = loop->exits->next;
1793
1794 if (!loops_state_satisfies_p (LOOPS_HAVE_RECORDED_EXITS))
1795 return NULLnullptr;
1796
1797 if (exit->e && exit->next == loop->exits)
1798 return exit->e;
1799 else
1800 return NULLnullptr;
1801}
1802
1803/* Returns true when BB has an incoming edge exiting LOOP. */
1804
1805bool
1806loop_exits_to_bb_p (class loop *loop, basic_block bb)
1807{
1808 edge e;
1809 edge_iterator ei;
1810
1811 FOR_EACH_EDGE (e, ei, bb->preds)for ((ei) = ei_start_1 (&((bb->preds))); ei_cond ((ei)
, &(e)); ei_next (&(ei)))
1812 if (loop_exit_edge_p (loop, e))
1813 return true;
1814
1815 return false;
1816}
1817
1818/* Returns true when BB has an outgoing edge exiting LOOP. */
1819
1820bool
1821loop_exits_from_bb_p (class loop *loop, basic_block bb)
1822{
1823 edge e;
1824 edge_iterator ei;
1825
1826 FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei)
, &(e)); ei_next (&(ei)))
1827 if (loop_exit_edge_p (loop, e))
1828 return true;
1829
1830 return false;
1831}
1832
1833/* Return location corresponding to the loop control condition if possible. */
1834
1835dump_user_location_t
1836get_loop_location (class loop *loop)
1837{
1838 rtx_insn *insn = NULLnullptr;
1839 class niter_desc *desc = NULLnullptr;
1840 edge exit;
1841
1842 /* For a for or while loop, we would like to return the location
1843 of the for or while statement, if possible. To do this, look
1844 for the branch guarding the loop back-edge. */
1845
1846 /* If this is a simple loop with an in_edge, then the loop control
1847 branch is typically at the end of its source. */
1848 desc = get_simple_loop_desc (loop);
1849 if (desc->in_edge)
1850 {
1851 FOR_BB_INSNS_REVERSE (desc->in_edge->src, insn)for ((insn) = (desc->in_edge->src)->il.x.rtl->end_
; (insn) && (insn) != PREV_INSN ((desc->in_edge->
src)->il.x.head_); (insn) = PREV_INSN (insn))
1852 {
1853 if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
&& INSN_HAS_LOCATION (insn))
1854 return insn;
1855 }
1856 }
1857 /* If loop has a single exit, then the loop control branch
1858 must be at the end of its source. */
1859 if ((exit = single_exit (loop)))
1860 {
1861 FOR_BB_INSNS_REVERSE (exit->src, insn)for ((insn) = (exit->src)->il.x.rtl->end_; (insn) &&
(insn) != PREV_INSN ((exit->src)->il.x.head_); (insn) =
PREV_INSN (insn))
1862 {
1863 if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
&& INSN_HAS_LOCATION (insn))
1864 return insn;
1865 }
1866 }
1867 /* Next check the latch, to see if it is non-empty. */
1868 FOR_BB_INSNS_REVERSE (loop->latch, insn)for ((insn) = (loop->latch)->il.x.rtl->end_; (insn) &&
(insn) != PREV_INSN ((loop->latch)->il.x.head_); (insn
) = PREV_INSN (insn))
1869 {
1870 if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
&& INSN_HAS_LOCATION (insn))
1871 return insn;
1872 }
1873 /* Finally, if none of the above identifies the loop control branch,
1874 return the first location in the loop header. */
1875 FOR_BB_INSNS (loop->header, insn)for ((insn) = (loop->header)->il.x.head_; (insn) &&
(insn) != NEXT_INSN ((loop->header)->il.x.rtl->end_
); (insn) = NEXT_INSN (insn))
1876 {
1877 if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
&& INSN_HAS_LOCATION (insn))
1878 return insn;
1879 }
1880 /* If all else fails, simply return the current function location. */
1881 return dump_user_location_t::from_function_decl (current_function_decl);
1882}
1883
1884/* Records that every statement in LOOP is executed I_BOUND times.
1885 REALISTIC is true if I_BOUND is expected to be close to the real number
1886 of iterations. UPPER is true if we are sure the loop iterates at most
1887 I_BOUND times. */
1888
1889void
1890record_niter_bound (class loop *loop, const widest_int &i_bound,
1891 bool realistic, bool upper)
1892{
1893 /* Update the bounds only when there is no previous estimation, or when the
1894 current estimation is smaller. */
1895 if (upper
1896 && (!loop->any_upper_bound
1897 || wi::ltu_p (i_bound, loop->nb_iterations_upper_bound)))
1898 {
1899 loop->any_upper_bound = true;
1900 loop->nb_iterations_upper_bound = i_bound;
1901 if (!loop->any_likely_upper_bound)
1902 {
1903 loop->any_likely_upper_bound = true;
1904 loop->nb_iterations_likely_upper_bound = i_bound;
1905 }
1906 }
1907 if (realistic
1908 && (!loop->any_estimate
1909 || wi::ltu_p (i_bound, loop->nb_iterations_estimate)))
1910 {
1911 loop->any_estimate = true;
1912 loop->nb_iterations_estimate = i_bound;
1913 }
1914 if (!realistic
1915 && (!loop->any_likely_upper_bound
1916 || wi::ltu_p (i_bound, loop->nb_iterations_likely_upper_bound)))
1917 {
1918 loop->any_likely_upper_bound = true;
1919 loop->nb_iterations_likely_upper_bound = i_bound;
1920 }
1921
1922 /* If an upper bound is smaller than the realistic estimate of the
1923 number of iterations, use the upper bound instead. */
1924 if (loop->any_upper_bound
1925 && loop->any_estimate
1926 && wi::ltu_p (loop->nb_iterations_upper_bound,
1927 loop->nb_iterations_estimate))
1928 loop->nb_iterations_estimate = loop->nb_iterations_upper_bound;
1929 if (loop->any_upper_bound
1930 && loop->any_likely_upper_bound
1931 && wi::ltu_p (loop->nb_iterations_upper_bound,
1932 loop->nb_iterations_likely_upper_bound))
1933 loop->nb_iterations_likely_upper_bound = loop->nb_iterations_upper_bound;
1934}
1935
1936/* Similar to get_estimated_loop_iterations, but returns the estimate only
1937 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
1938 on the number of iterations of LOOP could not be derived, returns -1. */
1939
1940HOST_WIDE_INTlong
1941get_estimated_loop_iterations_int (class loop *loop)
1942{
1943 widest_int nit;
1944 HOST_WIDE_INTlong hwi_nit;
1945
1946 if (!get_estimated_loop_iterations (loop, &nit))
1947 return -1;
1948
1949 if (!wi::fits_shwi_p (nit))
1950 return -1;
1951 hwi_nit = nit.to_shwi ();
1952
1953 return hwi_nit < 0 ? -1 : hwi_nit;
1954}
1955
1956/* Returns an upper bound on the number of executions of statements
1957 in the LOOP. For statements before the loop exit, this exceeds
1958 the number of execution of the latch by one. */
1959
1960HOST_WIDE_INTlong
1961max_stmt_executions_int (class loop *loop)
1962{
1963 HOST_WIDE_INTlong nit = get_max_loop_iterations_int (loop);
1964 HOST_WIDE_INTlong snit;
1965
1966 if (nit == -1)
1967 return -1;
1968
1969 snit = (HOST_WIDE_INTlong) ((unsigned HOST_WIDE_INTlong) nit + 1);
1970
1971 /* If the computation overflows, return -1. */
1972 return snit < 0 ? -1 : snit;
1973}
1974
1975/* Returns an likely upper bound on the number of executions of statements
1976 in the LOOP. For statements before the loop exit, this exceeds
1977 the number of execution of the latch by one. */
1978
1979HOST_WIDE_INTlong
1980likely_max_stmt_executions_int (class loop *loop)
1981{
1982 HOST_WIDE_INTlong nit = get_likely_max_loop_iterations_int (loop);
1983 HOST_WIDE_INTlong snit;
1984
1985 if (nit == -1)
1986 return -1;
1987
1988 snit = (HOST_WIDE_INTlong) ((unsigned HOST_WIDE_INTlong) nit + 1);
1989
1990 /* If the computation overflows, return -1. */
1991 return snit < 0 ? -1 : snit;
1992}
1993
1994/* Sets NIT to the estimated number of executions of the latch of the
1995 LOOP. If we have no reliable estimate, the function returns false, otherwise
1996 returns true. */
1997
1998bool
1999get_estimated_loop_iterations (class loop *loop, widest_int *nit)
2000{
2001 /* Even if the bound is not recorded, possibly we can derrive one from
2002 profile. */
2003 if (!loop->any_estimate)
2004 {
2005 if (loop->header->count.reliable_p ())
2006 {
2007 *nit = gcov_type_to_wide_int
2008 (expected_loop_iterations_unbounded (loop) + 1);
2009 return true;
2010 }
2011 return false;
2012 }
2013
2014 *nit = loop->nb_iterations_estimate;
2015 return true;
2016}
2017
2018/* Sets NIT to an upper bound for the maximum number of executions of the
2019 latch of the LOOP. If we have no reliable estimate, the function returns
2020 false, otherwise returns true. */
2021
2022bool
2023get_max_loop_iterations (const class loop *loop, widest_int *nit)
2024{
2025 if (!loop->any_upper_bound)
2026 return false;
2027
2028 *nit = loop->nb_iterations_upper_bound;
2029 return true;
2030}
2031
2032/* Similar to get_max_loop_iterations, but returns the estimate only
2033 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
2034 on the number of iterations of LOOP could not be derived, returns -1. */
2035
2036HOST_WIDE_INTlong
2037get_max_loop_iterations_int (const class loop *loop)
2038{
2039 widest_int nit;
2040 HOST_WIDE_INTlong hwi_nit;
2041
2042 if (!get_max_loop_iterations (loop, &nit))
2043 return -1;
2044
2045 if (!wi::fits_shwi_p (nit))
2046 return -1;
2047 hwi_nit = nit.to_shwi ();
2048
2049 return hwi_nit < 0 ? -1 : hwi_nit;
2050}
2051
2052/* Sets NIT to an upper bound for the maximum number of executions of the
2053 latch of the LOOP. If we have no reliable estimate, the function returns
2054 false, otherwise returns true. */
2055
2056bool
2057get_likely_max_loop_iterations (class loop *loop, widest_int *nit)
2058{
2059 if (!loop->any_likely_upper_bound)
2060 return false;
2061
2062 *nit = loop->nb_iterations_likely_upper_bound;
2063 return true;
2064}
2065
2066/* Similar to get_max_loop_iterations, but returns the estimate only
2067 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate
2068 on the number of iterations of LOOP could not be derived, returns -1. */
2069
2070HOST_WIDE_INTlong
2071get_likely_max_loop_iterations_int (class loop *loop)
2072{
2073 widest_int nit;
2074 HOST_WIDE_INTlong hwi_nit;
2075
2076 if (!get_likely_max_loop_iterations (loop, &nit))
2077 return -1;
2078
2079 if (!wi::fits_shwi_p (nit))
2080 return -1;
2081 hwi_nit = nit.to_shwi ();
2082
2083 return hwi_nit < 0 ? -1 : hwi_nit;
2084}
2085
2086/* Returns the loop depth of the loop BB belongs to. */
2087
2088int
2089bb_loop_depth (const_basic_block bb)
2090{
2091 return bb->loop_father ? loop_depth (bb->loop_father) : 0;
2092}
2093
2094/* Marks LOOP for removal and sets LOOPS_NEED_FIXUP. */
2095
2096void
2097mark_loop_for_removal (loop_p loop)
2098{
2099 if (loop->header == NULLnullptr)
2100 return;
2101 loop->former_header = loop->header;
2102 loop->header = NULLnullptr;
2103 loop->latch = NULLnullptr;
2104 loops_state_set (LOOPS_NEED_FIXUP);
2105}
2106
2107/* Starting from loop tree ROOT, walk loop tree as the visiting
2108 order specified by FLAGS. The supported visiting orders
2109 are:
2110 - LI_ONLY_INNERMOST
2111 - LI_FROM_INNERMOST
2112 - Preorder (if neither of above is specified) */
2113
2114void
2115loops_list::walk_loop_tree (class loop *root, unsigned flags)
2116{
2117 bool only_innermost_p = flags & LI_ONLY_INNERMOST;
2118 bool from_innermost_p = flags & LI_FROM_INNERMOST;
2119 bool preorder_p = !(only_innermost_p || from_innermost_p);
2120
2121 /* Early handle root without any inner loops, make later
2122 processing simpler, that is all loops processed in the
2123 following while loop are impossible to be root. */
2124 if (!root->inner)
2125 {
2126 if (flags & LI_INCLUDE_ROOT)
2127 this->to_visit.quick_push (root->num);
2128 return;
2129 }
2130 else if (preorder_p && flags & LI_INCLUDE_ROOT)
2131 this->to_visit.quick_push (root->num);
2132
2133 class loop *aloop;
2134 for (aloop = root->inner;
2135 aloop->inner != NULLnullptr;
2136 aloop = aloop->inner)
2137 {
2138 if (preorder_p)
2139 this->to_visit.quick_push (aloop->num);
2140 continue;
2141 }
2142
2143 while (1)
2144 {
2145 gcc_assert (aloop != root)((void)(!(aloop != root) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.c"
, 2145, __FUNCTION__), 0 : 0))
;
2146 if (from_innermost_p || aloop->inner == NULLnullptr)
2147 this->to_visit.quick_push (aloop->num);
2148
2149 if (aloop->next)
2150 {
2151 for (aloop = aloop->next;
2152 aloop->inner != NULLnullptr;
2153 aloop = aloop->inner)
2154 {
2155 if (preorder_p)
2156 this->to_visit.quick_push (aloop->num);
2157 continue;
2158 }
2159 }
2160 else if (loop_outer (aloop) == root)
2161 break;
2162 else
2163 aloop = loop_outer (aloop);
2164 }
2165
2166 /* When visiting from innermost, we need to consider root here
2167 since the previous while loop doesn't handle it. */
2168 if (from_innermost_p && flags & LI_INCLUDE_ROOT)
2169 this->to_visit.quick_push (root->num);
2170}
2171

/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.h

1/* Natural loop functions
2 Copyright (C) 1987-2021 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20#ifndef GCC_CFGLOOP_H
21#define GCC_CFGLOOP_H
22
23#include "cfgloopmanip.h"
24
25/* Structure to hold decision about unrolling/peeling. */
26enum lpt_dec
27{
28 LPT_NONE,
29 LPT_UNROLL_CONSTANT,
30 LPT_UNROLL_RUNTIME,
31 LPT_UNROLL_STUPID
32};
33
34struct GTY (()) lpt_decision {
35 enum lpt_dec decision;
36 unsigned times;
37};
38
39/* The type of extend applied to an IV. */
40enum iv_extend_code
41{
42 IV_SIGN_EXTEND,
43 IV_ZERO_EXTEND,
44 IV_UNKNOWN_EXTEND
45};
46
47/* The structure describing a bound on number of iterations of a loop. */
48
49class GTY ((chain_next ("%h.next"))) nb_iter_bound {
50public:
51 /* The statement STMT is executed at most ... */
52 gimple *stmt;
53
54 /* ... BOUND + 1 times (BOUND must be an unsigned constant).
55 The + 1 is added for the following reasons:
56
57 a) 0 would otherwise be unused, while we would need to care more about
58 overflows (as MAX + 1 is sometimes produced as the estimate on number
59 of executions of STMT).
60 b) it is consistent with the result of number_of_iterations_exit. */
61 widest_int bound;
62
63 /* True if, after executing the statement BOUND + 1 times, we will
64 leave the loop; that is, all the statements after it are executed at most
65 BOUND times. */
66 bool is_exit;
67
68 /* The next bound in the list. */
69 class nb_iter_bound *next;
70};
71
72/* Description of the loop exit. */
73
74struct GTY ((for_user)) loop_exit {
75 /* The exit edge. */
76 edge e;
77
78 /* Previous and next exit in the list of the exits of the loop. */
79 struct loop_exit *prev;
80 struct loop_exit *next;
81
82 /* Next element in the list of loops from that E exits. */
83 struct loop_exit *next_e;
84};
85
86struct loop_exit_hasher : ggc_ptr_hash<loop_exit>
87{
88 typedef edge compare_type;
89
90 static hashval_t hash (loop_exit *);
91 static bool equal (loop_exit *, edge);
92 static void remove (loop_exit *);
93};
94
95typedef class loop *loop_p;
96
97/* An integer estimation of the number of iterations. Estimate_state
98 describes what is the state of the estimation. */
99enum loop_estimation
100{
101 /* Estimate was not computed yet. */
102 EST_NOT_COMPUTED,
103 /* Estimate is ready. */
104 EST_AVAILABLE,
105 EST_LAST
106};
107
108/* The structure describing non-overflow control induction variable for
109 loop's exit edge. */
110struct GTY ((chain_next ("%h.next"))) control_iv {
111 tree base;
112 tree step;
113 struct control_iv *next;
114};
115
116/* Structure to hold information for each natural loop. */
117class GTY ((chain_next ("%h.next"))) loop {
118public:
119 /* Index into loops array. Note indices will never be reused after loop
120 is destroyed. */
121 int num;
122
123 /* Number of loop insns. */
124 unsigned ninsns;
125
126 /* Basic block of loop header. */
127 basic_block header;
128
129 /* Basic block of loop latch. */
130 basic_block latch;
131
132 /* For loop unrolling/peeling decision. */
133 struct lpt_decision lpt_decision;
134
135 /* Average number of executed insns per iteration. */
136 unsigned av_ninsns;
137
138 /* Number of blocks contained within the loop. */
139 unsigned num_nodes;
140
141 /* Superloops of the loop, starting with the outermost loop. */
142 vec<loop_p, va_gc> *superloops;
143
144 /* The first inner (child) loop or NULL if innermost loop. */
145 class loop *inner;
146
147 /* Link to the next (sibling) loop. */
148 class loop *next;
149
150 /* Auxiliary info specific to a pass. */
151 PTRvoid * GTY ((skip (""))) aux;
152
153 /* The number of times the latch of the loop is executed. This can be an
154 INTEGER_CST, or a symbolic expression representing the number of
155 iterations like "N - 1", or a COND_EXPR containing the runtime
156 conditions under which the number of iterations is non zero.
157
158 Don't access this field directly: number_of_latch_executions
159 computes and caches the computed information in this field. */
160 tree nb_iterations;
161
162 /* An integer guaranteed to be greater or equal to nb_iterations. Only
163 valid if any_upper_bound is true. */
164 widest_int nb_iterations_upper_bound;
165
166 widest_int nb_iterations_likely_upper_bound;
167
168 /* An integer giving an estimate on nb_iterations. Unlike
169 nb_iterations_upper_bound, there is no guarantee that it is at least
170 nb_iterations. */
171 widest_int nb_iterations_estimate;
172
173 /* If > 0, an integer, where the user asserted that for any
174 I in [ 0, nb_iterations ) and for any J in
175 [ I, min ( I + safelen, nb_iterations ) ), the Ith and Jth iterations
176 of the loop can be safely evaluated concurrently. */
177 int safelen;
178
179 /* Preferred vectorization factor for the loop if non-zero. */
180 int simdlen;
181
182 /* Constraints are generally set by consumers and affect certain
183 semantics of niter analyzer APIs. Currently the APIs affected are
184 number_of_iterations_exit* functions and their callers. One typical
185 use case of constraints is to vectorize possibly infinite loop:
186
187 1) Compute niter->assumptions by calling niter analyzer API and
188 record it as possible condition for loop versioning.
189 2) Clear buffered result of niter/scev analyzer.
190 3) Set constraint LOOP_C_FINITE assuming the loop is finite.
191 4) Analyze data references. Since data reference analysis depends
192 on niter/scev analyzer, the point is that niter/scev analysis
193 is done under circumstance of LOOP_C_FINITE constraint.
194 5) Version the loop with niter->assumptions computed in step 1).
195 6) Vectorize the versioned loop in which niter->assumptions is
196 checked to be true.
197 7) Update constraints in versioned loops so that niter analyzer
198 in following passes can use it.
199
200 Note consumers are usually the loop optimizers and it is consumers'
201 responsibility to set/clear constraints correctly. Failing to do
202 that might result in hard to track down bugs in niter/scev consumers. */
203 unsigned constraints;
204
205 /* An integer estimation of the number of iterations. Estimate_state
206 describes what is the state of the estimation. */
207 ENUM_BITFIELD(loop_estimation)enum loop_estimation estimate_state : 8;
208
209 unsigned any_upper_bound : 1;
210 unsigned any_estimate : 1;
211 unsigned any_likely_upper_bound : 1;
212
213 /* True if the loop can be parallel. */
214 unsigned can_be_parallel : 1;
215
216 /* True if -Waggressive-loop-optimizations warned about this loop
217 already. */
218 unsigned warned_aggressive_loop_optimizations : 1;
219
220 /* True if this loop should never be vectorized. */
221 unsigned dont_vectorize : 1;
222
223 /* True if we should try harder to vectorize this loop. */
224 unsigned force_vectorize : 1;
225
226 /* True if the loop is part of an oacc kernels region. */
227 unsigned in_oacc_kernels_region : 1;
228
229 /* True if the loop is known to be finite. This is a localized
230 flag_finite_loops or similar pragmas state. */
231 unsigned finite_p : 1;
232
233 /* The number of times to unroll the loop. 0 means no information given,
234 just do what we always do. A value of 1 means do not unroll the loop.
235 A value of USHRT_MAX means unroll with no specific unrolling factor.
236 Other values means unroll with the given unrolling factor. */
237 unsigned short unroll;
238
239 /* If this loop was inlined the main clique of the callee which does
240 not need remapping when copying the loop body. */
241 unsigned short owned_clique;
242
243 /* For SIMD loops, this is a unique identifier of the loop, referenced
244 by IFN_GOMP_SIMD_VF, IFN_GOMP_SIMD_LANE and IFN_GOMP_SIMD_LAST_LANE
245 builtins. */
246 tree simduid;
247
248 /* In loop optimization, it's common to generate loops from the original
249 loop. This field records the index of the original loop which can be
250 used to track the original loop from newly generated loops. This can
251 be done by calling function get_loop (cfun, orig_loop_num). Note the
252 original loop could be destroyed for various reasons thus no longer
253 exists, as a result, function call to get_loop returns NULL pointer.
254 In this case, this field should not be used and needs to be cleared
255 whenever possible. */
256 int orig_loop_num;
257
258 /* Upper bound on number of iterations of a loop. */
259 class nb_iter_bound *bounds;
260
261 /* Non-overflow control ivs of a loop. */
262 struct control_iv *control_ivs;
263
264 /* Head of the cyclic list of the exits of the loop. */
265 struct loop_exit *exits;
266
267 /* Number of iteration analysis data for RTL. */
268 class niter_desc *simple_loop_desc;
269
270 /* For sanity checking during loop fixup we record here the former
271 loop header for loops marked for removal. Note that this prevents
272 the basic-block from being collected but its index can still be
273 reused. */
274 basic_block former_header;
275};
276
277/* Set if the loop is known to be infinite. */
278#define LOOP_C_INFINITE(1 << 0) (1 << 0)
279/* Set if the loop is known to be finite without any assumptions. */
280#define LOOP_C_FINITE(1 << 1) (1 << 1)
281
282/* Set C to the LOOP constraint. */
283static inline void
284loop_constraint_set (class loop *loop, unsigned c)
285{
286 loop->constraints |= c;
287}
288
289/* Clear C from the LOOP constraint. */
290static inline void
291loop_constraint_clear (class loop *loop, unsigned c)
292{
293 loop->constraints &= ~c;
294}
295
296/* Check if C is set in the LOOP constraint. */
297static inline bool
298loop_constraint_set_p (class loop *loop, unsigned c)
299{
300 return (loop->constraints & c) == c;
301}
302
303/* Flags for state of loop structure. */
304enum
305{
306 LOOPS_HAVE_PREHEADERS = 1,
307 LOOPS_HAVE_SIMPLE_LATCHES = 2,
308 LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS = 4,
309 LOOPS_HAVE_RECORDED_EXITS = 8,
310 LOOPS_MAY_HAVE_MULTIPLE_LATCHES = 16,
311 LOOP_CLOSED_SSA = 32,
312 LOOPS_NEED_FIXUP = 64,
313 LOOPS_HAVE_FALLTHRU_PREHEADERS = 128
314};
315
316#define LOOPS_NORMAL(LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS
)
(LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES \
317 | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS)
318#define AVOID_CFG_MODIFICATIONS(LOOPS_MAY_HAVE_MULTIPLE_LATCHES) (LOOPS_MAY_HAVE_MULTIPLE_LATCHES)
319
320/* Structure to hold CFG information about natural loops within a function. */
321struct GTY (()) loops {
322 /* State of loops. */
323 int state;
324
325 /* Array of the loops. */
326 vec<loop_p, va_gc> *larray;
327
328 /* Maps edges to the list of their descriptions as loop exits. Edges
329 whose sources or destinations have loop_father == NULL (which may
330 happen during the cfg manipulations) should not appear in EXITS. */
331 hash_table<loop_exit_hasher> *GTY(()) exits;
332
333 /* Pointer to root of loop hierarchy tree. */
334 class loop *tree_root;
335};
336
337/* Loop recognition. */
338bool bb_loop_header_p (basic_block);
339void init_loops_structure (struct function *, struct loops *, unsigned);
340extern struct loops *flow_loops_find (struct loops *);
341extern void disambiguate_loops_with_multiple_latches (void);
342extern void flow_loops_free (struct loops *);
343extern void flow_loops_dump (FILE *,
344 void (*)(const class loop *, FILE *, int), int);
345extern void flow_loop_dump (const class loop *, FILE *,
346 void (*)(const class loop *, FILE *, int), int);
347class loop *alloc_loop (void);
348extern void flow_loop_free (class loop *);
349int flow_loop_nodes_find (basic_block, class loop *);
350unsigned fix_loop_structure (bitmap changed_bbs);
351bool mark_irreducible_loops (void);
352void release_recorded_exits (function *);
353void record_loop_exits (void);
354void rescan_loop_exit (edge, bool, bool);
355void sort_sibling_loops (function *);
356
357/* Loop data structure manipulation/querying. */
358extern void flow_loop_tree_node_add (class loop *, class loop *,
359 class loop * = NULLnullptr);
360extern void flow_loop_tree_node_remove (class loop *);
361extern bool flow_loop_nested_p (const class loop *, const class loop *);
362extern bool flow_bb_inside_loop_p (const class loop *, const_basic_block);
363extern class loop * find_common_loop (class loop *, class loop *);
364class loop *superloop_at_depth (class loop *, unsigned);
365struct eni_weights;
366extern int num_loop_insns (const class loop *);
367extern int average_num_loop_insns (const class loop *);
368extern unsigned get_loop_level (const class loop *);
369extern bool loop_exit_edge_p (const class loop *, const_edge);
370extern bool loop_exits_to_bb_p (class loop *, basic_block);
371extern bool loop_exits_from_bb_p (class loop *, basic_block);
372extern void mark_loop_exit_edges (void);
373extern dump_user_location_t get_loop_location (class loop *loop);
374
375/* Loops & cfg manipulation. */
376extern basic_block *get_loop_body (const class loop *);
377extern unsigned get_loop_body_with_size (const class loop *, basic_block *,
378 unsigned);
379extern basic_block *get_loop_body_in_dom_order (const class loop *);
380extern basic_block *get_loop_body_in_bfs_order (const class loop *);
381extern basic_block *get_loop_body_in_custom_order (const class loop *,
382 int (*) (const void *, const void *));
383extern basic_block *get_loop_body_in_custom_order (const class loop *, void *,
384 int (*) (const void *, const void *, void *));
385
386extern auto_vec<edge> get_loop_exit_edges (const class loop *, basic_block * = NULLnullptr);
387extern edge single_exit (const class loop *);
388extern edge single_likely_exit (class loop *loop, const vec<edge> &);
389extern unsigned num_loop_branches (const class loop *);
390
391extern edge loop_preheader_edge (const class loop *);
392extern edge loop_latch_edge (const class loop *);
393
394extern void add_bb_to_loop (basic_block, class loop *);
395extern void remove_bb_from_loops (basic_block);
396
397extern void cancel_loop_tree (class loop *);
398extern void delete_loop (class loop *);
399
400
401extern void verify_loop_structure (void);
402
403/* Loop analysis. */
404extern bool just_once_each_iteration_p (const class loop *, const_basic_block);
405gcov_type expected_loop_iterations_unbounded (const class loop *,
406 bool *read_profile_p = NULLnullptr, bool by_profile_only = false);
407extern unsigned expected_loop_iterations (class loop *);
408extern rtx doloop_condition_get (rtx_insn *);
409
410void mark_loop_for_removal (loop_p);
411
412/* Induction variable analysis. */
413
414/* The description of induction variable. The things are a bit complicated
415 due to need to handle subregs and extends. The value of the object described
416 by it can be obtained as follows (all computations are done in extend_mode):
417
418 Value in i-th iteration is
419 delta + mult * extend_{extend_mode} (subreg_{mode} (base + i * step)).
420
421 If first_special is true, the value in the first iteration is
422 delta + mult * base
423
424 If extend = UNKNOWN, first_special must be false, delta 0, mult 1 and value is
425 subreg_{mode} (base + i * step)
426
427 The get_iv_value function can be used to obtain these expressions.
428
429 ??? Add a third mode field that would specify the mode in that inner
430 computation is done, which would enable it to be different from the
431 outer one? */
432
433class rtx_iv
434{
435public:
436 /* Its base and step (mode of base and step is supposed to be extend_mode,
437 see the description above). */
438 rtx base, step;
439
440 /* The type of extend applied to it (IV_SIGN_EXTEND, IV_ZERO_EXTEND,
441 or IV_UNKNOWN_EXTEND). */
442 enum iv_extend_code extend;
443
444 /* Operations applied in the extended mode. */
445 rtx delta, mult;
446
447 /* The mode it is extended to. */
448 scalar_int_mode extend_mode;
449
450 /* The mode the variable iterates in. */
451 scalar_int_mode mode;
452
453 /* Whether the first iteration needs to be handled specially. */
454 unsigned first_special : 1;
455};
456
457/* The description of an exit from the loop and of the number of iterations
458 till we take the exit. */
459
460class GTY(()) niter_desc
461{
462public:
463 /* The edge out of the loop. */
464 edge out_edge;
465
466 /* The other edge leading from the condition. */
467 edge in_edge;
468
469 /* True if we are able to say anything about number of iterations of the
470 loop. */
471 bool simple_p;
472
473 /* True if the loop iterates the constant number of times. */
474 bool const_iter;
475
476 /* Number of iterations if constant. */
477 uint64_t niter;
478
479 /* Assumptions under that the rest of the information is valid. */
480 rtx assumptions;
481
482 /* Assumptions under that the loop ends before reaching the latch,
483 even if value of niter_expr says otherwise. */
484 rtx noloop_assumptions;
485
486 /* Condition under that the loop is infinite. */
487 rtx infinite;
488
489 /* Whether the comparison is signed. */
490 bool signed_p;
491
492 /* The mode in that niter_expr should be computed. */
493 scalar_int_mode mode;
494
495 /* The number of iterations of the loop. */
496 rtx niter_expr;
497};
498
499extern void iv_analysis_loop_init (class loop *);
500extern bool iv_analyze (rtx_insn *, scalar_int_mode, rtx, class rtx_iv *);
501extern bool iv_analyze_result (rtx_insn *, rtx, class rtx_iv *);
502extern bool iv_analyze_expr (rtx_insn *, scalar_int_mode, rtx,
503 class rtx_iv *);
504extern rtx get_iv_value (class rtx_iv *, rtx);
505extern bool biv_p (rtx_insn *, scalar_int_mode, rtx);
506extern void iv_analysis_done (void);
507
508extern class niter_desc *get_simple_loop_desc (class loop *loop);
509extern void free_simple_loop_desc (class loop *loop);
510
511static inline class niter_desc *
512simple_loop_desc (class loop *loop)
513{
514 return loop->simple_loop_desc;
515}
516
517/* Accessors for the loop structures. */
518
519/* Returns the loop with index NUM from FNs loop tree. */
520
521static inline class loop *
522get_loop (struct function *fn, unsigned num)
523{
524 return (*loops_for_fn (fn)->larray)[num];
525}
526
527/* Returns the number of superloops of LOOP. */
528
529static inline unsigned
530loop_depth (const class loop *loop)
531{
532 return vec_safe_length (loop->superloops);
15
Calling 'vec_safe_length<loop *, va_gc>'
18
Returning from 'vec_safe_length<loop *, va_gc>'
533}
534
535/* Returns the immediate superloop of LOOP, or NULL if LOOP is the outermost
536 loop. */
537
538static inline class loop *
539loop_outer (const class loop *loop)
540{
541 unsigned n = vec_safe_length (loop->superloops);
542
543 if (n == 0)
544 return NULLnullptr;
545
546 return (*loop->superloops)[n - 1];
547}
548
549/* Returns true if LOOP has at least one exit edge. */
550
551static inline bool
552loop_has_exit_edges (const class loop *loop)
553{
554 return loop->exits->next->e != NULLnullptr;
555}
556
557/* Returns the list of loops in FN. */
558
559inline vec<loop_p, va_gc> *
560get_loops (struct function *fn)
561{
562 struct loops *loops = loops_for_fn (fn);
563 if (!loops)
564 return NULLnullptr;
565
566 return loops->larray;
567}
568
569/* Returns the number of loops in FN (including the removed
570 ones and the fake loop that forms the root of the loop tree). */
571
572static inline unsigned
573number_of_loops (struct function *fn)
574{
575 struct loops *loops = loops_for_fn (fn);
576 if (!loops)
577 return 0;
578
579 return vec_safe_length (loops->larray);
580}
581
582/* Returns true if state of the loops satisfies all properties
583 described by FLAGS. */
584
585static inline bool
586loops_state_satisfies_p (function *fn, unsigned flags)
587{
588 return (loops_for_fn (fn)->state & flags) == flags;
589}
590
591static inline bool
592loops_state_satisfies_p (unsigned flags)
593{
594 return loops_state_satisfies_p (cfun(cfun + 0), flags);
595}
596
597/* Sets FLAGS to the loops state. */
598
599static inline void
600loops_state_set (function *fn, unsigned flags)
601{
602 loops_for_fn (fn)->state |= flags;
603}
604
605static inline void
606loops_state_set (unsigned flags)
607{
608 loops_state_set (cfun(cfun + 0), flags);
609}
610
611/* Clears FLAGS from the loops state. */
612
613static inline void
614loops_state_clear (function *fn, unsigned flags)
615{
616 loops_for_fn (fn)->state &= ~flags;
617}
618
619static inline void
620loops_state_clear (unsigned flags)
621{
622 if (!current_loops((cfun + 0)->x_current_loops))
623 return;
624 loops_state_clear (cfun(cfun + 0), flags);
625}
626
627/* Check loop structure invariants, if internal consistency checks are
628 enabled. */
629
630static inline void
631checking_verify_loop_structure (void)
632{
633 /* VERIFY_LOOP_STRUCTURE essentially asserts that no loops need fixups.
634
635 The loop optimizers should never make changes to the CFG which
636 require loop fixups. But the low level CFG manipulation code may
637 set the flag conservatively.
638
639 Go ahead and clear the flag here. That avoids the assert inside
640 VERIFY_LOOP_STRUCTURE, and if there is an inconsistency in the loop
641 structures VERIFY_LOOP_STRUCTURE will detect it.
642
643 This also avoid the compile time cost of excessive fixups. */
644 loops_state_clear (LOOPS_NEED_FIXUP);
645 if (flag_checkingglobal_options.x_flag_checking)
646 verify_loop_structure ();
647}
648
649/* Loop iterators. */
650
651/* Flags for loop iteration. */
652
653enum li_flags
654{
655 LI_INCLUDE_ROOT = 1, /* Include the fake root of the loop tree. */
656 LI_FROM_INNERMOST = 2, /* Iterate over the loops in the reverse order,
657 starting from innermost ones. */
658 LI_ONLY_INNERMOST = 4 /* Iterate only over innermost loops. */
659};
660
661/* Provide the functionality of std::as_const to support range-based for
662 to use const iterator. (We can't use std::as_const itself because it's
663 a C++17 feature.) */
664template <typename T>
665constexpr const T &
666as_const (T &t)
667{
668 return t;
669}
670
671/* A list for visiting loops, which contains the loop numbers instead of
672 the loop pointers. If the loop ROOT is offered (non-null), the visiting
673 will start from it, otherwise it would start from the tree_root of
674 loops_for_fn (FN) instead. The scope is restricted in function FN and
675 the visiting order is specified by FLAGS. */
676
677class loops_list
678{
679public:
680 loops_list (function *fn, unsigned flags, class loop *root = nullptr);
681
682 template <typename T> class Iter
683 {
684 public:
685 Iter (const loops_list &l, unsigned idx) : list (l), curr_idx (idx)
686 {
687 fill_curr_loop ();
688 }
689
690 T operator* () const { return curr_loop; }
691
692 Iter &
693 operator++ ()
694 {
695 if (curr_idx < list.to_visit.length ())
696 {
697 /* Bump the index and fill a new one. */
698 curr_idx++;
699 fill_curr_loop ();
700 }
701 else
702 gcc_assert (!curr_loop)((void)(!(!curr_loop) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.h"
, 702, __FUNCTION__), 0 : 0))
;
703
704 return *this;
705 }
706
707 bool
708 operator!= (const Iter &rhs) const
709 {
710 return this->curr_idx != rhs.curr_idx;
711 }
712
713 private:
714 /* Fill the current loop starting from the current index. */
715 void fill_curr_loop ();
716
717 /* Reference to the loop list to visit. */
718 const loops_list &list;
719
720 /* The current index in the list to visit. */
721 unsigned curr_idx;
722
723 /* The loop implied by the current index. */
724 class loop *curr_loop;
725 };
726
727 using iterator = Iter<class loop *>;
728 using const_iterator = Iter<const class loop *>;
729
730 iterator
731 begin ()
732 {
733 return iterator (*this, 0);
734 }
735
736 iterator
737 end ()
738 {
739 return iterator (*this, to_visit.length ());
740 }
741
742 const_iterator
743 begin () const
744 {
745 return const_iterator (*this, 0);
746 }
747
748 const_iterator
749 end () const
750 {
751 return const_iterator (*this, to_visit.length ());
752 }
753
754private:
755 /* Walk loop tree starting from ROOT as the visiting order specified
756 by FLAGS. */
757 void walk_loop_tree (class loop *root, unsigned flags);
758
759 /* The function we are visiting. */
760 function *fn;
761
762 /* The list of loops to visit. */
763 auto_vec<int, 16> to_visit;
764};
765
766/* Starting from current index CURR_IDX (inclusive), find one index
767 which stands for one valid loop and fill the found loop as CURR_LOOP,
768 if we can't find one, set CURR_LOOP as null. */
769
770template <typename T>
771inline void
772loops_list::Iter<T>::fill_curr_loop ()
773{
774 int anum;
775
776 while (this->list.to_visit.iterate (this->curr_idx, &anum))
777 {
778 class loop *loop = get_loop (this->list.fn, anum);
779 if (loop)
780 {
781 curr_loop = loop;
782 return;
783 }
784 this->curr_idx++;
785 }
786
787 curr_loop = nullptr;
788}
789
790/* Set up the loops list to visit according to the specified
791 function scope FN and iterating order FLAGS. If ROOT is
792 not null, the visiting would start from it, otherwise it
793 will start from tree_root of loops_for_fn (FN). */
794
795inline loops_list::loops_list (function *fn, unsigned flags, class loop *root)
796{
797 struct loops *loops = loops_for_fn (fn);
798 gcc_assert (!root || loops)((void)(!(!root || loops) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.h"
, 798, __FUNCTION__), 0 : 0))
;
799
800 /* Check mutually exclusive flags should not co-exist. */
801 unsigned checked_flags = LI_ONLY_INNERMOST | LI_FROM_INNERMOST;
802 gcc_assert ((flags & checked_flags) != checked_flags)((void)(!((flags & checked_flags) != checked_flags) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.h"
, 802, __FUNCTION__), 0 : 0))
;
803
804 this->fn = fn;
805 if (!loops)
806 return;
807
808 class loop *tree_root = root ? root : loops->tree_root;
809
810 this->to_visit.reserve_exact (number_of_loops (fn));
811
812 /* When root is tree_root of loops_for_fn (fn) and the visiting
813 order is LI_ONLY_INNERMOST, we would like to use linear
814 search here since it has a more stable bound than the
815 walk_loop_tree. */
816 if (flags & LI_ONLY_INNERMOST && tree_root == loops->tree_root)
817 {
818 gcc_assert (tree_root->num == 0)((void)(!(tree_root->num == 0) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/cfgloop.h"
, 818, __FUNCTION__), 0 : 0))
;
819 if (tree_root->inner == NULLnullptr)
820 {
821 if (flags & LI_INCLUDE_ROOT)
822 this->to_visit.quick_push (0);
823
824 return;
825 }
826
827 class loop *aloop;
828 unsigned int i;
829 for (i = 1; vec_safe_iterate (loops->larray, i, &aloop); i++)
830 if (aloop != NULLnullptr && aloop->inner == NULLnullptr)
831 this->to_visit.quick_push (aloop->num);
832 }
833 else
834 walk_loop_tree (tree_root, flags);
835}
836
837/* The properties of the target. */
838struct target_cfgloop {
839 /* Number of available registers. */
840 unsigned x_target_avail_regs;
841
842 /* Number of available registers that are call-clobbered. */
843 unsigned x_target_clobbered_regs;
844
845 /* Number of registers reserved for temporary expressions. */
846 unsigned x_target_res_regs;
847
848 /* The cost for register when there still is some reserve, but we are
849 approaching the number of available registers. */
850 unsigned x_target_reg_cost[2];
851
852 /* The cost for register when we need to spill. */
853 unsigned x_target_spill_cost[2];
854};
855
856extern struct target_cfgloop default_target_cfgloop;
857#if SWITCHABLE_TARGET1
858extern struct target_cfgloop *this_target_cfgloop;
859#else
860#define this_target_cfgloop (&default_target_cfgloop)
861#endif
862
863#define target_avail_regs(this_target_cfgloop->x_target_avail_regs) \
864 (this_target_cfgloop->x_target_avail_regs)
865#define target_clobbered_regs(this_target_cfgloop->x_target_clobbered_regs) \
866 (this_target_cfgloop->x_target_clobbered_regs)
867#define target_res_regs(this_target_cfgloop->x_target_res_regs) \
868 (this_target_cfgloop->x_target_res_regs)
869#define target_reg_cost(this_target_cfgloop->x_target_reg_cost) \
870 (this_target_cfgloop->x_target_reg_cost)
871#define target_spill_cost(this_target_cfgloop->x_target_spill_cost) \
872 (this_target_cfgloop->x_target_spill_cost)
873
874/* Register pressure estimation for induction variable optimizations & loop
875 invariant motion. */
876extern unsigned estimate_reg_pressure_cost (unsigned, unsigned, bool, bool);
877extern void init_set_costs (void);
878
879/* Loop optimizer initialization. */
880extern void loop_optimizer_init (unsigned);
881extern void loop_optimizer_finalize (function *, bool = false);
882inline void
883loop_optimizer_finalize ()
884{
885 loop_optimizer_finalize (cfun(cfun + 0));
886}
887
888/* Optimization passes. */
889enum
890{
891 UAP_UNROLL = 1, /* Enables unrolling of loops if it seems profitable. */
892 UAP_UNROLL_ALL = 2 /* Enables unrolling of all loops. */
893};
894
895extern void doloop_optimize_loops (void);
896extern void move_loop_invariants (void);
897extern auto_vec<basic_block> get_loop_hot_path (const class loop *loop);
898
899/* Returns the outermost loop of the loop nest that contains LOOP.*/
900static inline class loop *
901loop_outermost (class loop *loop)
902{
903 unsigned n = vec_safe_length (loop->superloops);
904
905 if (n <= 1)
906 return loop;
907
908 return (*loop->superloops)[1];
909}
910
911extern void record_niter_bound (class loop *, const widest_int &, bool, bool);
912extern HOST_WIDE_INTlong get_estimated_loop_iterations_int (class loop *);
913extern HOST_WIDE_INTlong get_max_loop_iterations_int (const class loop *);
914extern HOST_WIDE_INTlong get_likely_max_loop_iterations_int (class loop *);
915extern bool get_estimated_loop_iterations (class loop *loop, widest_int *nit);
916extern bool get_max_loop_iterations (const class loop *loop, widest_int *nit);
917extern bool get_likely_max_loop_iterations (class loop *loop, widest_int *nit);
918extern int bb_loop_depth (const_basic_block);
919
920/* Converts VAL to widest_int. */
921
922static inline widest_int
923gcov_type_to_wide_int (gcov_type val)
924{
925 HOST_WIDE_INTlong a[2];
926
927 a[0] = (unsigned HOST_WIDE_INTlong) val;
928 /* If HOST_BITS_PER_WIDE_INT == HOST_BITS_PER_WIDEST_INT, avoid shifting by
929 the size of type. */
930 val >>= HOST_BITS_PER_WIDE_INT64 - 1;
931 val >>= 1;
932 a[1] = (unsigned HOST_WIDE_INTlong) val;
933
934 return widest_int::from_array (a, 2);
935}
936#endif /* GCC_CFGLOOP_H */

/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h

1/* Vector API for GNU compiler.
2 Copyright (C) 2004-2021 Free Software Foundation, Inc.
3 Contributed by Nathan Sidwell <nathan@codesourcery.com>
4 Re-implemented in C++ by Diego Novillo <dnovillo@google.com>
5
6This file is part of GCC.
7
8GCC is free software; you can redistribute it and/or modify it under
9the terms of the GNU General Public License as published by the Free
10Software Foundation; either version 3, or (at your option) any later
11version.
12
13GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14WARRANTY; without even the implied warranty of MERCHANTABILITY or
15FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16for more details.
17
18You should have received a copy of the GNU General Public License
19along with GCC; see the file COPYING3. If not see
20<http://www.gnu.org/licenses/>. */
21
22#ifndef GCC_VEC_H
23#define GCC_VEC_H
24
25/* Some gen* file have no ggc support as the header file gtype-desc.h is
26 missing. Provide these definitions in case ggc.h has not been included.
27 This is not a problem because any code that runs before gengtype is built
28 will never need to use GC vectors.*/
29
30extern void ggc_free (void *);
31extern size_t ggc_round_alloc_size (size_t requested_size);
32extern void *ggc_realloc (void *, size_t MEM_STAT_DECL);
33
34/* Templated vector type and associated interfaces.
35
36 The interface functions are typesafe and use inline functions,
37 sometimes backed by out-of-line generic functions. The vectors are
38 designed to interoperate with the GTY machinery.
39
40 There are both 'index' and 'iterate' accessors. The index accessor
41 is implemented by operator[]. The iterator returns a boolean
42 iteration condition and updates the iteration variable passed by
43 reference. Because the iterator will be inlined, the address-of
44 can be optimized away.
45
46 Each operation that increases the number of active elements is
47 available in 'quick' and 'safe' variants. The former presumes that
48 there is sufficient allocated space for the operation to succeed
49 (it dies if there is not). The latter will reallocate the
50 vector, if needed. Reallocation causes an exponential increase in
51 vector size. If you know you will be adding N elements, it would
52 be more efficient to use the reserve operation before adding the
53 elements with the 'quick' operation. This will ensure there are at
54 least as many elements as you ask for, it will exponentially
55 increase if there are too few spare slots. If you want reserve a
56 specific number of slots, but do not want the exponential increase
57 (for instance, you know this is the last allocation), use the
58 reserve_exact operation. You can also create a vector of a
59 specific size from the get go.
60
61 You should prefer the push and pop operations, as they append and
62 remove from the end of the vector. If you need to remove several
63 items in one go, use the truncate operation. The insert and remove
64 operations allow you to change elements in the middle of the
65 vector. There are two remove operations, one which preserves the
66 element ordering 'ordered_remove', and one which does not
67 'unordered_remove'. The latter function copies the end element
68 into the removed slot, rather than invoke a memmove operation. The
69 'lower_bound' function will determine where to place an item in the
70 array using insert that will maintain sorted order.
71
72 Vectors are template types with three arguments: the type of the
73 elements in the vector, the allocation strategy, and the physical
74 layout to use
75
76 Four allocation strategies are supported:
77
78 - Heap: allocation is done using malloc/free. This is the
79 default allocation strategy.
80
81 - GC: allocation is done using ggc_alloc/ggc_free.
82
83 - GC atomic: same as GC with the exception that the elements
84 themselves are assumed to be of an atomic type that does
85 not need to be garbage collected. This means that marking
86 routines do not need to traverse the array marking the
87 individual elements. This increases the performance of
88 GC activities.
89
90 Two physical layouts are supported:
91
92 - Embedded: The vector is structured using the trailing array
93 idiom. The last member of the structure is an array of size
94 1. When the vector is initially allocated, a single memory
95 block is created to hold the vector's control data and the
96 array of elements. These vectors cannot grow without
97 reallocation (see discussion on embeddable vectors below).
98
99 - Space efficient: The vector is structured as a pointer to an
100 embedded vector. This is the default layout. It means that
101 vectors occupy a single word of storage before initial
102 allocation. Vectors are allowed to grow (the internal
103 pointer is reallocated but the main vector instance does not
104 need to relocate).
105
106 The type, allocation and layout are specified when the vector is
107 declared.
108
109 If you need to directly manipulate a vector, then the 'address'
110 accessor will return the address of the start of the vector. Also
111 the 'space' predicate will tell you whether there is spare capacity
112 in the vector. You will not normally need to use these two functions.
113
114 Notes on the different layout strategies
115
116 * Embeddable vectors (vec<T, A, vl_embed>)
117
118 These vectors are suitable to be embedded in other data
119 structures so that they can be pre-allocated in a contiguous
120 memory block.
121
122 Embeddable vectors are implemented using the trailing array
123 idiom, thus they are not resizeable without changing the address
124 of the vector object itself. This means you cannot have
125 variables or fields of embeddable vector type -- always use a
126 pointer to a vector. The one exception is the final field of a
127 structure, which could be a vector type.
128
129 You will have to use the embedded_size & embedded_init calls to
130 create such objects, and they will not be resizeable (so the
131 'safe' allocation variants are not available).
132
133 Properties of embeddable vectors:
134
135 - The whole vector and control data are allocated in a single
136 contiguous block. It uses the trailing-vector idiom, so
137 allocation must reserve enough space for all the elements
138 in the vector plus its control data.
139 - The vector cannot be re-allocated.
140 - The vector cannot grow nor shrink.
141 - No indirections needed for access/manipulation.
142 - It requires 2 words of storage (prior to vector allocation).
143
144
145 * Space efficient vector (vec<T, A, vl_ptr>)
146
147 These vectors can grow dynamically and are allocated together
148 with their control data. They are suited to be included in data
149 structures. Prior to initial allocation, they only take a single
150 word of storage.
151
152 These vectors are implemented as a pointer to embeddable vectors.
153 The semantics allow for this pointer to be NULL to represent
154 empty vectors. This way, empty vectors occupy minimal space in
155 the structure containing them.
156
157 Properties:
158
159 - The whole vector and control data are allocated in a single
160 contiguous block.
161 - The whole vector may be re-allocated.
162 - Vector data may grow and shrink.
163 - Access and manipulation requires a pointer test and
164 indirection.
165 - It requires 1 word of storage (prior to vector allocation).
166
167 An example of their use would be,
168
169 struct my_struct {
170 // A space-efficient vector of tree pointers in GC memory.
171 vec<tree, va_gc, vl_ptr> v;
172 };
173
174 struct my_struct *s;
175
176 if (s->v.length ()) { we have some contents }
177 s->v.safe_push (decl); // append some decl onto the end
178 for (ix = 0; s->v.iterate (ix, &elt); ix++)
179 { do something with elt }
180*/
181
182/* Support function for statistics. */
183extern void dump_vec_loc_statistics (void);
184
185/* Hashtable mapping vec addresses to descriptors. */
186extern htab_t vec_mem_usage_hash;
187
188/* Control data for vectors. This contains the number of allocated
189 and used slots inside a vector. */
190
191struct vec_prefix
192{
193 /* FIXME - These fields should be private, but we need to cater to
194 compilers that have stricter notions of PODness for types. */
195
196 /* Memory allocation support routines in vec.c. */
197 void register_overhead (void *, size_t, size_t CXX_MEM_STAT_INFO);
198 void release_overhead (void *, size_t, size_t, bool CXX_MEM_STAT_INFO);
199 static unsigned calculate_allocation (vec_prefix *, unsigned, bool);
200 static unsigned calculate_allocation_1 (unsigned, unsigned);
201
202 /* Note that vec_prefix should be a base class for vec, but we use
203 offsetof() on vector fields of tree structures (e.g.,
204 tree_binfo::base_binfos), and offsetof only supports base types.
205
206 To compensate, we make vec_prefix a field inside vec and make
207 vec a friend class of vec_prefix so it can access its fields. */
208 template <typename, typename, typename> friend struct vec;
209
210 /* The allocator types also need access to our internals. */
211 friend struct va_gc;
212 friend struct va_gc_atomic;
213 friend struct va_heap;
214
215 unsigned m_alloc : 31;
216 unsigned m_using_auto_storage : 1;
217 unsigned m_num;
218};
219
220/* Calculate the number of slots to reserve a vector, making sure that
221 RESERVE slots are free. If EXACT grow exactly, otherwise grow
222 exponentially. PFX is the control data for the vector. */
223
224inline unsigned
225vec_prefix::calculate_allocation (vec_prefix *pfx, unsigned reserve,
226 bool exact)
227{
228 if (exact)
229 return (pfx ? pfx->m_num : 0) + reserve;
230 else if (!pfx)
231 return MAX (4, reserve)((4) > (reserve) ? (4) : (reserve));
232 return calculate_allocation_1 (pfx->m_alloc, pfx->m_num + reserve);
233}
234
235template<typename, typename, typename> struct vec;
236
237/* Valid vector layouts
238
239 vl_embed - Embeddable vector that uses the trailing array idiom.
240 vl_ptr - Space efficient vector that uses a pointer to an
241 embeddable vector. */
242struct vl_embed { };
243struct vl_ptr { };
244
245
246/* Types of supported allocations
247
248 va_heap - Allocation uses malloc/free.
249 va_gc - Allocation uses ggc_alloc.
250 va_gc_atomic - Same as GC, but individual elements of the array
251 do not need to be marked during collection. */
252
253/* Allocator type for heap vectors. */
254struct va_heap
255{
256 /* Heap vectors are frequently regular instances, so use the vl_ptr
257 layout for them. */
258 typedef vl_ptr default_layout;
259
260 template<typename T>
261 static void reserve (vec<T, va_heap, vl_embed> *&, unsigned, bool
262 CXX_MEM_STAT_INFO);
263
264 template<typename T>
265 static void release (vec<T, va_heap, vl_embed> *&);
266};
267
268
269/* Allocator for heap memory. Ensure there are at least RESERVE free
270 slots in V. If EXACT is true, grow exactly, else grow
271 exponentially. As a special case, if the vector had not been
272 allocated and RESERVE is 0, no vector will be created. */
273
274template<typename T>
275inline void
276va_heap::reserve (vec<T, va_heap, vl_embed> *&v, unsigned reserve, bool exact
277 MEM_STAT_DECL)
278{
279 size_t elt_size = sizeof (T);
280 unsigned alloc
281 = vec_prefix::calculate_allocation (v ? &v->m_vecpfx : 0, reserve, exact);
282 gcc_checking_assert (alloc)((void)(!(alloc) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 282, __FUNCTION__), 0 : 0))
;
283
284 if (GATHER_STATISTICS0 && v)
285 v->m_vecpfx.release_overhead (v, elt_size * v->allocated (),
286 v->allocated (), false);
287
288 size_t size = vec<T, va_heap, vl_embed>::embedded_size (alloc);
289 unsigned nelem = v ? v->length () : 0;
290 v = static_cast <vec<T, va_heap, vl_embed> *> (xrealloc (v, size));
291 v->embedded_init (alloc, nelem);
292
293 if (GATHER_STATISTICS0)
294 v->m_vecpfx.register_overhead (v, alloc, elt_size PASS_MEM_STAT);
295}
296
297
298#if GCC_VERSION(4 * 1000 + 2) >= 4007
299#pragma GCC diagnostic push
300#pragma GCC diagnostic ignored "-Wfree-nonheap-object"
301#endif
302
303/* Free the heap space allocated for vector V. */
304
305template<typename T>
306void
307va_heap::release (vec<T, va_heap, vl_embed> *&v)
308{
309 size_t elt_size = sizeof (T);
310 if (v == NULLnullptr)
311 return;
312
313 if (GATHER_STATISTICS0)
314 v->m_vecpfx.release_overhead (v, elt_size * v->allocated (),
315 v->allocated (), true);
316 ::free (v);
317 v = NULLnullptr;
318}
319
320#if GCC_VERSION(4 * 1000 + 2) >= 4007
321#pragma GCC diagnostic pop
322#endif
323
324/* Allocator type for GC vectors. Notice that we need the structure
325 declaration even if GC is not enabled. */
326
327struct va_gc
328{
329 /* Use vl_embed as the default layout for GC vectors. Due to GTY
330 limitations, GC vectors must always be pointers, so it is more
331 efficient to use a pointer to the vl_embed layout, rather than
332 using a pointer to a pointer as would be the case with vl_ptr. */
333 typedef vl_embed default_layout;
334
335 template<typename T, typename A>
336 static void reserve (vec<T, A, vl_embed> *&, unsigned, bool
337 CXX_MEM_STAT_INFO);
338
339 template<typename T, typename A>
340 static void release (vec<T, A, vl_embed> *&v);
341};
342
343
344/* Free GC memory used by V and reset V to NULL. */
345
346template<typename T, typename A>
347inline void
348va_gc::release (vec<T, A, vl_embed> *&v)
349{
350 if (v)
351 ::ggc_free (v);
352 v = NULLnullptr;
353}
354
355
356/* Allocator for GC memory. Ensure there are at least RESERVE free
357 slots in V. If EXACT is true, grow exactly, else grow
358 exponentially. As a special case, if the vector had not been
359 allocated and RESERVE is 0, no vector will be created. */
360
361template<typename T, typename A>
362void
363va_gc::reserve (vec<T, A, vl_embed> *&v, unsigned reserve, bool exact
364 MEM_STAT_DECL)
365{
366 unsigned alloc
367 = vec_prefix::calculate_allocation (v ? &v->m_vecpfx : 0, reserve, exact);
368 if (!alloc)
369 {
370 ::ggc_free (v);
371 v = NULLnullptr;
372 return;
373 }
374
375 /* Calculate the amount of space we want. */
376 size_t size = vec<T, A, vl_embed>::embedded_size (alloc);
377
378 /* Ask the allocator how much space it will really give us. */
379 size = ::ggc_round_alloc_size (size);
380
381 /* Adjust the number of slots accordingly. */
382 size_t vec_offset = sizeof (vec_prefix);
383 size_t elt_size = sizeof (T);
384 alloc = (size - vec_offset) / elt_size;
385
386 /* And finally, recalculate the amount of space we ask for. */
387 size = vec_offset + alloc * elt_size;
388
389 unsigned nelem = v ? v->length () : 0;
390 v = static_cast <vec<T, A, vl_embed> *> (::ggc_realloc (v, size
391 PASS_MEM_STAT));
392 v->embedded_init (alloc, nelem);
393}
394
395
396/* Allocator type for GC vectors. This is for vectors of types
397 atomics w.r.t. collection, so allocation and deallocation is
398 completely inherited from va_gc. */
399struct va_gc_atomic : va_gc
400{
401};
402
403
404/* Generic vector template. Default values for A and L indicate the
405 most commonly used strategies.
406
407 FIXME - Ideally, they would all be vl_ptr to encourage using regular
408 instances for vectors, but the existing GTY machinery is limited
409 in that it can only deal with GC objects that are pointers
410 themselves.
411
412 This means that vector operations that need to deal with
413 potentially NULL pointers, must be provided as free
414 functions (see the vec_safe_* functions above). */
415template<typename T,
416 typename A = va_heap,
417 typename L = typename A::default_layout>
418struct GTY((user)) vec
419{
420};
421
422/* Allow C++11 range-based 'for' to work directly on vec<T>*. */
423template<typename T, typename A, typename L>
424T* begin (vec<T,A,L> *v) { return v ? v->begin () : nullptr; }
425template<typename T, typename A, typename L>
426T* end (vec<T,A,L> *v) { return v ? v->end () : nullptr; }
427template<typename T, typename A, typename L>
428const T* begin (const vec<T,A,L> *v) { return v ? v->begin () : nullptr; }
429template<typename T, typename A, typename L>
430const T* end (const vec<T,A,L> *v) { return v ? v->end () : nullptr; }
431
432/* Generic vec<> debug helpers.
433
434 These need to be instantiated for each vec<TYPE> used throughout
435 the compiler like this:
436
437 DEFINE_DEBUG_VEC (TYPE)
438
439 The reason we have a debug_helper() is because GDB can't
440 disambiguate a plain call to debug(some_vec), and it must be called
441 like debug<TYPE>(some_vec). */
442
443template<typename T>
444void
445debug_helper (vec<T> &ref)
446{
447 unsigned i;
448 for (i = 0; i < ref.length (); ++i)
449 {
450 fprintf (stderrstderr, "[%d] = ", i);
451 debug_slim (ref[i]);
452 fputc ('\n', stderrstderr);
453 }
454}
455
456/* We need a separate va_gc variant here because default template
457 argument for functions cannot be used in c++-98. Once this
458 restriction is removed, those variant should be folded with the
459 above debug_helper. */
460
461template<typename T>
462void
463debug_helper (vec<T, va_gc> &ref)
464{
465 unsigned i;
466 for (i = 0; i < ref.length (); ++i)
467 {
468 fprintf (stderrstderr, "[%d] = ", i);
469 debug_slim (ref[i]);
470 fputc ('\n', stderrstderr);
471 }
472}
473
474/* Macro to define debug(vec<T>) and debug(vec<T, va_gc>) helper
475 functions for a type T. */
476
477#define DEFINE_DEBUG_VEC(T)template void debug_helper (vec<T> &); template void
debug_helper (vec<T, va_gc> &); __attribute__ ((__used__
)) void debug (vec<T> &ref) { debug_helper <T>
(ref); } __attribute__ ((__used__)) void debug (vec<T>
*ptr) { if (ptr) debug (*ptr); else fprintf (stderr, "<nil>\n"
); } __attribute__ ((__used__)) void debug (vec<T, va_gc>
&ref) { debug_helper <T> (ref); } __attribute__ ((
__used__)) void debug (vec<T, va_gc> *ptr) { if (ptr) debug
(*ptr); else fprintf (stderr, "<nil>\n"); }
\
478 template void debug_helper (vec<T> &); \
479 template void debug_helper (vec<T, va_gc> &); \
480 /* Define the vec<T> debug functions. */ \
481 DEBUG_FUNCTION__attribute__ ((__used__)) void \
482 debug (vec<T> &ref) \
483 { \
484 debug_helper <T> (ref); \
485 } \
486 DEBUG_FUNCTION__attribute__ ((__used__)) void \
487 debug (vec<T> *ptr) \
488 { \
489 if (ptr) \
490 debug (*ptr); \
491 else \
492 fprintf (stderrstderr, "<nil>\n"); \
493 } \
494 /* Define the vec<T, va_gc> debug functions. */ \
495 DEBUG_FUNCTION__attribute__ ((__used__)) void \
496 debug (vec<T, va_gc> &ref) \
497 { \
498 debug_helper <T> (ref); \
499 } \
500 DEBUG_FUNCTION__attribute__ ((__used__)) void \
501 debug (vec<T, va_gc> *ptr) \
502 { \
503 if (ptr) \
504 debug (*ptr); \
505 else \
506 fprintf (stderrstderr, "<nil>\n"); \
507 }
508
509/* Default-construct N elements in DST. */
510
511template <typename T>
512inline void
513vec_default_construct (T *dst, unsigned n)
514{
515#ifdef BROKEN_VALUE_INITIALIZATION
516 /* Versions of GCC before 4.4 sometimes leave certain objects
517 uninitialized when value initialized, though if the type has
518 user defined default ctor, that ctor is invoked. As a workaround
519 perform clearing first and then the value initialization, which
520 fixes the case when value initialization doesn't initialize due to
521 the bugs and should initialize to all zeros, but still allows
522 vectors for types with user defined default ctor that initializes
523 some or all elements to non-zero. If T has no user defined
524 default ctor and some non-static data members have user defined
525 default ctors that initialize to non-zero the workaround will
526 still not work properly; in that case we just need to provide
527 user defined default ctor. */
528 memset (dst, '\0', sizeof (T) * n);
529#endif
530 for ( ; n; ++dst, --n)
531 ::new (static_cast<void*>(dst)) T ();
532}
533
534/* Copy-construct N elements in DST from *SRC. */
535
536template <typename T>
537inline void
538vec_copy_construct (T *dst, const T *src, unsigned n)
539{
540 for ( ; n; ++dst, ++src, --n)
541 ::new (static_cast<void*>(dst)) T (*src);
542}
543
544/* Type to provide zero-initialized values for vec<T, A, L>. This is
545 used to provide nil initializers for vec instances. Since vec must
546 be a trivially copyable type that can be copied by memcpy and zeroed
547 out by memset, it must have defaulted default and copy ctor and copy
548 assignment. To initialize a vec either use value initialization
549 (e.g., vec() or vec v{ };) or assign it the value vNULL. This isn't
550 needed for file-scope and function-local static vectors, which are
551 zero-initialized by default. */
552struct vnull { };
553constexpr vnull vNULL{ };
554
555
556/* Embeddable vector. These vectors are suitable to be embedded
557 in other data structures so that they can be pre-allocated in a
558 contiguous memory block.
559
560 Embeddable vectors are implemented using the trailing array idiom,
561 thus they are not resizeable without changing the address of the
562 vector object itself. This means you cannot have variables or
563 fields of embeddable vector type -- always use a pointer to a
564 vector. The one exception is the final field of a structure, which
565 could be a vector type.
566
567 You will have to use the embedded_size & embedded_init calls to
568 create such objects, and they will not be resizeable (so the 'safe'
569 allocation variants are not available).
570
571 Properties:
572
573 - The whole vector and control data are allocated in a single
574 contiguous block. It uses the trailing-vector idiom, so
575 allocation must reserve enough space for all the elements
576 in the vector plus its control data.
577 - The vector cannot be re-allocated.
578 - The vector cannot grow nor shrink.
579 - No indirections needed for access/manipulation.
580 - It requires 2 words of storage (prior to vector allocation). */
581
582template<typename T, typename A>
583struct GTY((user)) vec<T, A, vl_embed>
584{
585public:
586 unsigned allocated (void) const { return m_vecpfx.m_alloc; }
587 unsigned length (void) const { return m_vecpfx.m_num; }
588 bool is_empty (void) const { return m_vecpfx.m_num == 0; }
589 T *address (void) { return m_vecdata; }
590 const T *address (void) const { return m_vecdata; }
591 T *begin () { return address (); }
592 const T *begin () const { return address (); }
593 T *end () { return address () + length (); }
594 const T *end () const { return address () + length (); }
595 const T &operator[] (unsigned) const;
596 T &operator[] (unsigned);
597 T &last (void);
598 bool space (unsigned) const;
599 bool iterate (unsigned, T *) const;
600 bool iterate (unsigned, T **) const;
601 vec *copy (ALONE_CXX_MEM_STAT_INFO) const;
602 void splice (const vec &);
603 void splice (const vec *src);
604 T *quick_push (const T &);
605 T &pop (void);
606 void truncate (unsigned);
607 void quick_insert (unsigned, const T &);
608 void ordered_remove (unsigned);
609 void unordered_remove (unsigned);
610 void block_remove (unsigned, unsigned);
611 void qsort (int (*) (const void *, const void *))qsort (int (*) (const void *, const void *));
612 void sort (int (*) (const void *, const void *, void *), void *);
613 void stablesort (int (*) (const void *, const void *, void *), void *);
614 T *bsearch (const void *key, int (*compar)(const void *, const void *));
615 T *bsearch (const void *key,
616 int (*compar)(const void *, const void *, void *), void *);
617 unsigned lower_bound (T, bool (*)(const T &, const T &)) const;
618 bool contains (const T &search) const;
619 static size_t embedded_size (unsigned);
620 void embedded_init (unsigned, unsigned = 0, unsigned = 0);
621 void quick_grow (unsigned len);
622 void quick_grow_cleared (unsigned len);
623
624 /* vec class can access our internal data and functions. */
625 template <typename, typename, typename> friend struct vec;
626
627 /* The allocator types also need access to our internals. */
628 friend struct va_gc;
629 friend struct va_gc_atomic;
630 friend struct va_heap;
631
632 /* FIXME - These fields should be private, but we need to cater to
633 compilers that have stricter notions of PODness for types. */
634 vec_prefix m_vecpfx;
635 T m_vecdata[1];
636};
637
638
639/* Convenience wrapper functions to use when dealing with pointers to
640 embedded vectors. Some functionality for these vectors must be
641 provided via free functions for these reasons:
642
643 1- The pointer may be NULL (e.g., before initial allocation).
644
645 2- When the vector needs to grow, it must be reallocated, so
646 the pointer will change its value.
647
648 Because of limitations with the current GC machinery, all vectors
649 in GC memory *must* be pointers. */
650
651
652/* If V contains no room for NELEMS elements, return false. Otherwise,
653 return true. */
654template<typename T, typename A>
655inline bool
656vec_safe_space (const vec<T, A, vl_embed> *v, unsigned nelems)
657{
658 return v ? v->space (nelems) : nelems == 0;
659}
660
661
662/* If V is NULL, return 0. Otherwise, return V->length(). */
663template<typename T, typename A>
664inline unsigned
665vec_safe_length (const vec<T, A, vl_embed> *v)
666{
667 return v ? v->length () : 0;
16
Assuming 'v' is non-null, which participates in a condition later
17
'?' condition is true
668}
669
670
671/* If V is NULL, return NULL. Otherwise, return V->address(). */
672template<typename T, typename A>
673inline T *
674vec_safe_address (vec<T, A, vl_embed> *v)
675{
676 return v ? v->address () : NULLnullptr;
677}
678
679
680/* If V is NULL, return true. Otherwise, return V->is_empty(). */
681template<typename T, typename A>
682inline bool
683vec_safe_is_empty (vec<T, A, vl_embed> *v)
684{
685 return v ? v->is_empty () : true;
686}
687
688/* If V does not have space for NELEMS elements, call
689 V->reserve(NELEMS, EXACT). */
690template<typename T, typename A>
691inline bool
692vec_safe_reserve (vec<T, A, vl_embed> *&v, unsigned nelems, bool exact = false
693 CXX_MEM_STAT_INFO)
694{
695 bool extend = nelems ? !vec_safe_space (v, nelems) : false;
23
Assuming 'nelems' is 0
24
'?' condition is false
696 if (extend
24.1
'extend' is false
24.1
'extend' is false
24.1
'extend' is false
)
25
Taking false branch
697 A::reserve (v, nelems, exact PASS_MEM_STAT);
698 return extend;
26
Returning without writing to 'v'
699}
700
701template<typename T, typename A>
702inline bool
703vec_safe_reserve_exact (vec<T, A, vl_embed> *&v, unsigned nelems
704 CXX_MEM_STAT_INFO)
705{
706 return vec_safe_reserve (v, nelems, true PASS_MEM_STAT);
707}
708
709
710/* Allocate GC memory for V with space for NELEMS slots. If NELEMS
711 is 0, V is initialized to NULL. */
712
713template<typename T, typename A>
714inline void
715vec_alloc (vec<T, A, vl_embed> *&v, unsigned nelems CXX_MEM_STAT_INFO)
716{
717 v = NULLnullptr;
21
Null pointer value stored to field 'superloops'
718 vec_safe_reserve (v, nelems, false PASS_MEM_STAT);
22
Calling 'vec_safe_reserve<loop *, va_gc>'
27
Returning from 'vec_safe_reserve<loop *, va_gc>'
719}
720
721
722/* Free the GC memory allocated by vector V and set it to NULL. */
723
724template<typename T, typename A>
725inline void
726vec_free (vec<T, A, vl_embed> *&v)
727{
728 A::release (v);
729}
730
731
732/* Grow V to length LEN. Allocate it, if necessary. */
733template<typename T, typename A>
734inline void
735vec_safe_grow (vec<T, A, vl_embed> *&v, unsigned len,
736 bool exact = false CXX_MEM_STAT_INFO)
737{
738 unsigned oldlen = vec_safe_length (v);
739 gcc_checking_assert (len >= oldlen)((void)(!(len >= oldlen) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 739, __FUNCTION__), 0 : 0))
;
740 vec_safe_reserve (v, len - oldlen, exact PASS_MEM_STAT);
741 v->quick_grow (len);
742}
743
744
745/* If V is NULL, allocate it. Call V->safe_grow_cleared(LEN). */
746template<typename T, typename A>
747inline void
748vec_safe_grow_cleared (vec<T, A, vl_embed> *&v, unsigned len,
749 bool exact = false CXX_MEM_STAT_INFO)
750{
751 unsigned oldlen = vec_safe_length (v);
752 vec_safe_grow (v, len, exact PASS_MEM_STAT);
753 vec_default_construct (v->address () + oldlen, len - oldlen);
754}
755
756
757/* Assume V is not NULL. */
758
759template<typename T>
760inline void
761vec_safe_grow_cleared (vec<T, va_heap, vl_ptr> *&v,
762 unsigned len, bool exact = false CXX_MEM_STAT_INFO)
763{
764 v->safe_grow_cleared (len, exact PASS_MEM_STAT);
765}
766
767/* If V does not have space for NELEMS elements, call
768 V->reserve(NELEMS, EXACT). */
769
770template<typename T>
771inline bool
772vec_safe_reserve (vec<T, va_heap, vl_ptr> *&v, unsigned nelems, bool exact = false
773 CXX_MEM_STAT_INFO)
774{
775 return v->reserve (nelems, exact);
776}
777
778
779/* If V is NULL return false, otherwise return V->iterate(IX, PTR). */
780template<typename T, typename A>
781inline bool
782vec_safe_iterate (const vec<T, A, vl_embed> *v, unsigned ix, T **ptr)
783{
784 if (v)
785 return v->iterate (ix, ptr);
786 else
787 {
788 *ptr = 0;
789 return false;
790 }
791}
792
793template<typename T, typename A>
794inline bool
795vec_safe_iterate (const vec<T, A, vl_embed> *v, unsigned ix, T *ptr)
796{
797 if (v
29.1
'v' is non-null
29.1
'v' is non-null
29.1
'v' is non-null
)
30
Taking true branch
798 return v->iterate (ix, ptr);
31
Returning value, which participates in a condition later
799 else
800 {
801 *ptr = 0;
802 return false;
803 }
804}
805
806
807/* If V has no room for one more element, reallocate it. Then call
808 V->quick_push(OBJ). */
809template<typename T, typename A>
810inline T *
811vec_safe_push (vec<T, A, vl_embed> *&v, const T &obj CXX_MEM_STAT_INFO)
812{
813 vec_safe_reserve (v, 1, false PASS_MEM_STAT);
814 return v->quick_push (obj);
815}
816
817
818/* if V has no room for one more element, reallocate it. Then call
819 V->quick_insert(IX, OBJ). */
820template<typename T, typename A>
821inline void
822vec_safe_insert (vec<T, A, vl_embed> *&v, unsigned ix, const T &obj
823 CXX_MEM_STAT_INFO)
824{
825 vec_safe_reserve (v, 1, false PASS_MEM_STAT);
826 v->quick_insert (ix, obj);
827}
828
829
830/* If V is NULL, do nothing. Otherwise, call V->truncate(SIZE). */
831template<typename T, typename A>
832inline void
833vec_safe_truncate (vec<T, A, vl_embed> *v, unsigned size)
834{
835 if (v)
836 v->truncate (size);
837}
838
839
840/* If SRC is not NULL, return a pointer to a copy of it. */
841template<typename T, typename A>
842inline vec<T, A, vl_embed> *
843vec_safe_copy (vec<T, A, vl_embed> *src CXX_MEM_STAT_INFO)
844{
845 return src ? src->copy (ALONE_PASS_MEM_STAT) : NULLnullptr;
846}
847
848/* Copy the elements from SRC to the end of DST as if by memcpy.
849 Reallocate DST, if necessary. */
850template<typename T, typename A>
851inline void
852vec_safe_splice (vec<T, A, vl_embed> *&dst, const vec<T, A, vl_embed> *src
853 CXX_MEM_STAT_INFO)
854{
855 unsigned src_len = vec_safe_length (src);
856 if (src_len)
857 {
858 vec_safe_reserve_exact (dst, vec_safe_length (dst) + src_len
859 PASS_MEM_STAT);
860 dst->splice (*src);
861 }
862}
863
864/* Return true if SEARCH is an element of V. Note that this is O(N) in the
865 size of the vector and so should be used with care. */
866
867template<typename T, typename A>
868inline bool
869vec_safe_contains (vec<T, A, vl_embed> *v, const T &search)
870{
871 return v ? v->contains (search) : false;
872}
873
874/* Index into vector. Return the IX'th element. IX must be in the
875 domain of the vector. */
876
877template<typename T, typename A>
878inline const T &
879vec<T, A, vl_embed>::operator[] (unsigned ix) const
880{
881 gcc_checking_assert (ix < m_vecpfx.m_num)((void)(!(ix < m_vecpfx.m_num) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 881, __FUNCTION__), 0 : 0))
;
882 return m_vecdata[ix];
883}
884
885template<typename T, typename A>
886inline T &
887vec<T, A, vl_embed>::operator[] (unsigned ix)
888{
889 gcc_checking_assert (ix < m_vecpfx.m_num)((void)(!(ix < m_vecpfx.m_num) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 889, __FUNCTION__), 0 : 0))
;
890 return m_vecdata[ix];
891}
892
893
894/* Get the final element of the vector, which must not be empty. */
895
896template<typename T, typename A>
897inline T &
898vec<T, A, vl_embed>::last (void)
899{
900 gcc_checking_assert (m_vecpfx.m_num > 0)((void)(!(m_vecpfx.m_num > 0) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 900, __FUNCTION__), 0 : 0))
;
901 return (*this)[m_vecpfx.m_num - 1];
902}
903
904
905/* If this vector has space for NELEMS additional entries, return
906 true. You usually only need to use this if you are doing your
907 own vector reallocation, for instance on an embedded vector. This
908 returns true in exactly the same circumstances that vec::reserve
909 will. */
910
911template<typename T, typename A>
912inline bool
913vec<T, A, vl_embed>::space (unsigned nelems) const
914{
915 return m_vecpfx.m_alloc - m_vecpfx.m_num >= nelems;
916}
917
918
919/* Return iteration condition and update PTR to point to the IX'th
920 element of this vector. Use this to iterate over the elements of a
921 vector as follows,
922
923 for (ix = 0; vec<T, A>::iterate (v, ix, &ptr); ix++)
924 continue; */
925
926template<typename T, typename A>
927inline bool
928vec<T, A, vl_embed>::iterate (unsigned ix, T *ptr) const
929{
930 if (ix < m_vecpfx.m_num)
931 {
932 *ptr = m_vecdata[ix];
933 return true;
934 }
935 else
936 {
937 *ptr = 0;
938 return false;
939 }
940}
941
942
943/* Return iteration condition and update *PTR to point to the
944 IX'th element of this vector. Use this to iterate over the
945 elements of a vector as follows,
946
947 for (ix = 0; v->iterate (ix, &ptr); ix++)
948 continue;
949
950 This variant is for vectors of objects. */
951
952template<typename T, typename A>
953inline bool
954vec<T, A, vl_embed>::iterate (unsigned ix, T **ptr) const
955{
956 if (ix < m_vecpfx.m_num)
957 {
958 *ptr = CONST_CAST (T *, &m_vecdata[ix])(const_cast<T *> ((&m_vecdata[ix])));
959 return true;
960 }
961 else
962 {
963 *ptr = 0;
964 return false;
965 }
966}
967
968
969/* Return a pointer to a copy of this vector. */
970
971template<typename T, typename A>
972inline vec<T, A, vl_embed> *
973vec<T, A, vl_embed>::copy (ALONE_MEM_STAT_DECLvoid) const
974{
975 vec<T, A, vl_embed> *new_vec = NULLnullptr;
976 unsigned len = length ();
977 if (len)
978 {
979 vec_alloc (new_vec, len PASS_MEM_STAT);
980 new_vec->embedded_init (len, len);
981 vec_copy_construct (new_vec->address (), m_vecdata, len);
982 }
983 return new_vec;
984}
985
986
987/* Copy the elements from SRC to the end of this vector as if by memcpy.
988 The vector must have sufficient headroom available. */
989
990template<typename T, typename A>
991inline void
992vec<T, A, vl_embed>::splice (const vec<T, A, vl_embed> &src)
993{
994 unsigned len = src.length ();
995 if (len)
996 {
997 gcc_checking_assert (space (len))((void)(!(space (len)) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 997, __FUNCTION__), 0 : 0))
;
998 vec_copy_construct (end (), src.address (), len);
999 m_vecpfx.m_num += len;
1000 }
1001}
1002
1003template<typename T, typename A>
1004inline void
1005vec<T, A, vl_embed>::splice (const vec<T, A, vl_embed> *src)
1006{
1007 if (src)
1008 splice (*src);
1009}
1010
1011
1012/* Push OBJ (a new element) onto the end of the vector. There must be
1013 sufficient space in the vector. Return a pointer to the slot
1014 where OBJ was inserted. */
1015
1016template<typename T, typename A>
1017inline T *
1018vec<T, A, vl_embed>::quick_push (const T &obj)
1019{
1020 gcc_checking_assert (space (1))((void)(!(space (1)) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1020, __FUNCTION__), 0 : 0))
;
1021 T *slot = &m_vecdata[m_vecpfx.m_num++];
1022 *slot = obj;
1023 return slot;
1024}
1025
1026
1027/* Pop and return the last element off the end of the vector. */
1028
1029template<typename T, typename A>
1030inline T &
1031vec<T, A, vl_embed>::pop (void)
1032{
1033 gcc_checking_assert (length () > 0)((void)(!(length () > 0) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1033, __FUNCTION__), 0 : 0))
;
1034 return m_vecdata[--m_vecpfx.m_num];
1035}
1036
1037
1038/* Set the length of the vector to SIZE. The new length must be less
1039 than or equal to the current length. This is an O(1) operation. */
1040
1041template<typename T, typename A>
1042inline void
1043vec<T, A, vl_embed>::truncate (unsigned size)
1044{
1045 gcc_checking_assert (length () >= size)((void)(!(length () >= size) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1045, __FUNCTION__), 0 : 0))
;
1046 m_vecpfx.m_num = size;
1047}
1048
1049
1050/* Insert an element, OBJ, at the IXth position of this vector. There
1051 must be sufficient space. */
1052
1053template<typename T, typename A>
1054inline void
1055vec<T, A, vl_embed>::quick_insert (unsigned ix, const T &obj)
1056{
1057 gcc_checking_assert (length () < allocated ())((void)(!(length () < allocated ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1057, __FUNCTION__), 0 : 0))
;
1058 gcc_checking_assert (ix <= length ())((void)(!(ix <= length ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1058, __FUNCTION__), 0 : 0))
;
1059 T *slot = &m_vecdata[ix];
1060 memmove (slot + 1, slot, (m_vecpfx.m_num++ - ix) * sizeof (T));
1061 *slot = obj;
1062}
1063
1064
1065/* Remove an element from the IXth position of this vector. Ordering of
1066 remaining elements is preserved. This is an O(N) operation due to
1067 memmove. */
1068
1069template<typename T, typename A>
1070inline void
1071vec<T, A, vl_embed>::ordered_remove (unsigned ix)
1072{
1073 gcc_checking_assert (ix < length ())((void)(!(ix < length ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1073, __FUNCTION__), 0 : 0))
;
1074 T *slot = &m_vecdata[ix];
1075 memmove (slot, slot + 1, (--m_vecpfx.m_num - ix) * sizeof (T));
1076}
1077
1078
1079/* Remove elements in [START, END) from VEC for which COND holds. Ordering of
1080 remaining elements is preserved. This is an O(N) operation. */
1081
1082#define VEC_ORDERED_REMOVE_IF_FROM_TO(vec, read_index, write_index, \{ ((void)(!((end) <= (vec).length ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1083, __FUNCTION__), 0 : 0)); for (read_index = write_index
= (start); read_index < (end); ++read_index) { elem_ptr =
&(vec)[read_index]; bool remove_p = (cond); if (remove_p
) continue; if (read_index != write_index) (vec)[write_index]
= (vec)[read_index]; write_index++; } if (read_index - write_index
> 0) (vec).block_remove (write_index, read_index - write_index
); }
1083 elem_ptr, start, end, cond){ ((void)(!((end) <= (vec).length ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1083, __FUNCTION__), 0 : 0)); for (read_index = write_index
= (start); read_index < (end); ++read_index) { elem_ptr =
&(vec)[read_index]; bool remove_p = (cond); if (remove_p
) continue; if (read_index != write_index) (vec)[write_index]
= (vec)[read_index]; write_index++; } if (read_index - write_index
> 0) (vec).block_remove (write_index, read_index - write_index
); }
\
1084 { \
1085 gcc_assert ((end) <= (vec).length ())((void)(!((end) <= (vec).length ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1085, __FUNCTION__), 0 : 0))
; \
1086 for (read_index = write_index = (start); read_index < (end); \
1087 ++read_index) \
1088 { \
1089 elem_ptr = &(vec)[read_index]; \
1090 bool remove_p = (cond); \
1091 if (remove_p) \
1092 continue; \
1093 \
1094 if (read_index != write_index) \
1095 (vec)[write_index] = (vec)[read_index]; \
1096 \
1097 write_index++; \
1098 } \
1099 \
1100 if (read_index - write_index > 0) \
1101 (vec).block_remove (write_index, read_index - write_index); \
1102 }
1103
1104
1105/* Remove elements from VEC for which COND holds. Ordering of remaining
1106 elements is preserved. This is an O(N) operation. */
1107
1108#define VEC_ORDERED_REMOVE_IF(vec, read_index, write_index, elem_ptr, \{ ((void)(!(((vec).length ()) <= ((vec)).length ()) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1109, __FUNCTION__), 0 : 0)); for (read_index = write_index
= (0); read_index < ((vec).length ()); ++read_index) { elem_ptr
= &((vec))[read_index]; bool remove_p = ((cond)); if (remove_p
) continue; if (read_index != write_index) ((vec))[write_index
] = ((vec))[read_index]; write_index++; } if (read_index - write_index
> 0) ((vec)).block_remove (write_index, read_index - write_index
); }
1109 cond){ ((void)(!(((vec).length ()) <= ((vec)).length ()) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1109, __FUNCTION__), 0 : 0)); for (read_index = write_index
= (0); read_index < ((vec).length ()); ++read_index) { elem_ptr
= &((vec))[read_index]; bool remove_p = ((cond)); if (remove_p
) continue; if (read_index != write_index) ((vec))[write_index
] = ((vec))[read_index]; write_index++; } if (read_index - write_index
> 0) ((vec)).block_remove (write_index, read_index - write_index
); }
\
1110 VEC_ORDERED_REMOVE_IF_FROM_TO ((vec), read_index, write_index, \{ ((void)(!(((vec).length ()) <= ((vec)).length ()) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1111, __FUNCTION__), 0 : 0)); for (read_index = write_index
= (0); read_index < ((vec).length ()); ++read_index) { elem_ptr
= &((vec))[read_index]; bool remove_p = ((cond)); if (remove_p
) continue; if (read_index != write_index) ((vec))[write_index
] = ((vec))[read_index]; write_index++; } if (read_index - write_index
> 0) ((vec)).block_remove (write_index, read_index - write_index
); }
1111 elem_ptr, 0, (vec).length (), (cond)){ ((void)(!(((vec).length ()) <= ((vec)).length ()) ? fancy_abort
("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1111, __FUNCTION__), 0 : 0)); for (read_index = write_index
= (0); read_index < ((vec).length ()); ++read_index) { elem_ptr
= &((vec))[read_index]; bool remove_p = ((cond)); if (remove_p
) continue; if (read_index != write_index) ((vec))[write_index
] = ((vec))[read_index]; write_index++; } if (read_index - write_index
> 0) ((vec)).block_remove (write_index, read_index - write_index
); }
1112
1113/* Remove an element from the IXth position of this vector. Ordering of
1114 remaining elements is destroyed. This is an O(1) operation. */
1115
1116template<typename T, typename A>
1117inline void
1118vec<T, A, vl_embed>::unordered_remove (unsigned ix)
1119{
1120 gcc_checking_assert (ix < length ())((void)(!(ix < length ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1120, __FUNCTION__), 0 : 0))
;
1121 m_vecdata[ix] = m_vecdata[--m_vecpfx.m_num];
1122}
1123
1124
1125/* Remove LEN elements starting at the IXth. Ordering is retained.
1126 This is an O(N) operation due to memmove. */
1127
1128template<typename T, typename A>
1129inline void
1130vec<T, A, vl_embed>::block_remove (unsigned ix, unsigned len)
1131{
1132 gcc_checking_assert (ix + len <= length ())((void)(!(ix + len <= length ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1132, __FUNCTION__), 0 : 0))
;
1133 T *slot = &m_vecdata[ix];
1134 m_vecpfx.m_num -= len;
1135 memmove (slot, slot + len, (m_vecpfx.m_num - ix) * sizeof (T));
1136}
1137
1138
1139/* Sort the contents of this vector with qsort. CMP is the comparison
1140 function to pass to qsort. */
1141
1142template<typename T, typename A>
1143inline void
1144vec<T, A, vl_embed>::qsort (int (*cmp) (const void *, const void *))qsort (int (*cmp) (const void *, const void *))
1145{
1146 if (length () > 1)
1147 gcc_qsort (address (), length (), sizeof (T), cmp);
1148}
1149
1150/* Sort the contents of this vector with qsort. CMP is the comparison
1151 function to pass to qsort. */
1152
1153template<typename T, typename A>
1154inline void
1155vec<T, A, vl_embed>::sort (int (*cmp) (const void *, const void *, void *),
1156 void *data)
1157{
1158 if (length () > 1)
1159 gcc_sort_r (address (), length (), sizeof (T), cmp, data);
1160}
1161
1162/* Sort the contents of this vector with gcc_stablesort_r. CMP is the
1163 comparison function to pass to qsort. */
1164
1165template<typename T, typename A>
1166inline void
1167vec<T, A, vl_embed>::stablesort (int (*cmp) (const void *, const void *,
1168 void *), void *data)
1169{
1170 if (length () > 1)
1171 gcc_stablesort_r (address (), length (), sizeof (T), cmp, data);
1172}
1173
1174/* Search the contents of the sorted vector with a binary search.
1175 CMP is the comparison function to pass to bsearch. */
1176
1177template<typename T, typename A>
1178inline T *
1179vec<T, A, vl_embed>::bsearch (const void *key,
1180 int (*compar) (const void *, const void *))
1181{
1182 const void *base = this->address ();
1183 size_t nmemb = this->length ();
1184 size_t size = sizeof (T);
1185 /* The following is a copy of glibc stdlib-bsearch.h. */
1186 size_t l, u, idx;
1187 const void *p;
1188 int comparison;
1189
1190 l = 0;
1191 u = nmemb;
1192 while (l < u)
1193 {
1194 idx = (l + u) / 2;
1195 p = (const void *) (((const char *) base) + (idx * size));
1196 comparison = (*compar) (key, p);
1197 if (comparison < 0)
1198 u = idx;
1199 else if (comparison > 0)
1200 l = idx + 1;
1201 else
1202 return (T *)const_cast<void *>(p);
1203 }
1204
1205 return NULLnullptr;
1206}
1207
1208/* Search the contents of the sorted vector with a binary search.
1209 CMP is the comparison function to pass to bsearch. */
1210
1211template<typename T, typename A>
1212inline T *
1213vec<T, A, vl_embed>::bsearch (const void *key,
1214 int (*compar) (const void *, const void *,
1215 void *), void *data)
1216{
1217 const void *base = this->address ();
1218 size_t nmemb = this->length ();
1219 size_t size = sizeof (T);
1220 /* The following is a copy of glibc stdlib-bsearch.h. */
1221 size_t l, u, idx;
1222 const void *p;
1223 int comparison;
1224
1225 l = 0;
1226 u = nmemb;
1227 while (l < u)
1228 {
1229 idx = (l + u) / 2;
1230 p = (const void *) (((const char *) base) + (idx * size));
1231 comparison = (*compar) (key, p, data);
1232 if (comparison < 0)
1233 u = idx;
1234 else if (comparison > 0)
1235 l = idx + 1;
1236 else
1237 return (T *)const_cast<void *>(p);
1238 }
1239
1240 return NULLnullptr;
1241}
1242
1243/* Return true if SEARCH is an element of V. Note that this is O(N) in the
1244 size of the vector and so should be used with care. */
1245
1246template<typename T, typename A>
1247inline bool
1248vec<T, A, vl_embed>::contains (const T &search) const
1249{
1250 unsigned int len = length ();
1251 for (unsigned int i = 0; i < len; i++)
1252 if ((*this)[i] == search)
1253 return true;
1254
1255 return false;
1256}
1257
1258/* Find and return the first position in which OBJ could be inserted
1259 without changing the ordering of this vector. LESSTHAN is a
1260 function that returns true if the first argument is strictly less
1261 than the second. */
1262
1263template<typename T, typename A>
1264unsigned
1265vec<T, A, vl_embed>::lower_bound (T obj, bool (*lessthan)(const T &, const T &))
1266 const
1267{
1268 unsigned int len = length ();
1269 unsigned int half, middle;
1270 unsigned int first = 0;
1271 while (len > 0)
1272 {
1273 half = len / 2;
1274 middle = first;
1275 middle += half;
1276 T middle_elem = (*this)[middle];
1277 if (lessthan (middle_elem, obj))
1278 {
1279 first = middle;
1280 ++first;
1281 len = len - half - 1;
1282 }
1283 else
1284 len = half;
1285 }
1286 return first;
1287}
1288
1289
1290/* Return the number of bytes needed to embed an instance of an
1291 embeddable vec inside another data structure.
1292
1293 Use these methods to determine the required size and initialization
1294 of a vector V of type T embedded within another structure (as the
1295 final member):
1296
1297 size_t vec<T, A, vl_embed>::embedded_size (unsigned alloc);
1298 void v->embedded_init (unsigned alloc, unsigned num);
1299
1300 These allow the caller to perform the memory allocation. */
1301
1302template<typename T, typename A>
1303inline size_t
1304vec<T, A, vl_embed>::embedded_size (unsigned alloc)
1305{
1306 struct alignas (T) U { char data[sizeof (T)]; };
1307 typedef vec<U, A, vl_embed> vec_embedded;
1308 typedef typename std::conditional<std::is_standard_layout<T>::value,
1309 vec, vec_embedded>::type vec_stdlayout;
1310 static_assert (sizeof (vec_stdlayout) == sizeof (vec), "");
1311 static_assert (alignof (vec_stdlayout) == alignof (vec), "");
1312 return offsetof (vec_stdlayout, m_vecdata)__builtin_offsetof(vec_stdlayout, m_vecdata) + alloc * sizeof (T);
1313}
1314
1315
1316/* Initialize the vector to contain room for ALLOC elements and
1317 NUM active elements. */
1318
1319template<typename T, typename A>
1320inline void
1321vec<T, A, vl_embed>::embedded_init (unsigned alloc, unsigned num, unsigned aut)
1322{
1323 m_vecpfx.m_alloc = alloc;
1324 m_vecpfx.m_using_auto_storage = aut;
1325 m_vecpfx.m_num = num;
1326}
1327
1328
1329/* Grow the vector to a specific length. LEN must be as long or longer than
1330 the current length. The new elements are uninitialized. */
1331
1332template<typename T, typename A>
1333inline void
1334vec<T, A, vl_embed>::quick_grow (unsigned len)
1335{
1336 gcc_checking_assert (length () <= len && len <= m_vecpfx.m_alloc)((void)(!(length () <= len && len <= m_vecpfx.m_alloc
) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1336, __FUNCTION__), 0 : 0))
;
1337 m_vecpfx.m_num = len;
1338}
1339
1340
1341/* Grow the vector to a specific length. LEN must be as long or longer than
1342 the current length. The new elements are initialized to zero. */
1343
1344template<typename T, typename A>
1345inline void
1346vec<T, A, vl_embed>::quick_grow_cleared (unsigned len)
1347{
1348 unsigned oldlen = length ();
1349 size_t growby = len - oldlen;
1350 quick_grow (len);
1351 if (growby != 0)
1352 vec_default_construct (address () + oldlen, growby);
1353}
1354
1355/* Garbage collection support for vec<T, A, vl_embed>. */
1356
1357template<typename T>
1358void
1359gt_ggc_mx (vec<T, va_gc> *v)
1360{
1361 extern void gt_ggc_mx (T &);
1362 for (unsigned i = 0; i < v->length (); i++)
1363 gt_ggc_mx ((*v)[i]);
1364}
1365
1366template<typename T>
1367void
1368gt_ggc_mx (vec<T, va_gc_atomic, vl_embed> *v ATTRIBUTE_UNUSED__attribute__ ((__unused__)))
1369{
1370 /* Nothing to do. Vectors of atomic types wrt GC do not need to
1371 be traversed. */
1372}
1373
1374
1375/* PCH support for vec<T, A, vl_embed>. */
1376
1377template<typename T, typename A>
1378void
1379gt_pch_nx (vec<T, A, vl_embed> *v)
1380{
1381 extern void gt_pch_nx (T &);
1382 for (unsigned i = 0; i < v->length (); i++)
1383 gt_pch_nx ((*v)[i]);
1384}
1385
1386template<typename T, typename A>
1387void
1388gt_pch_nx (vec<T *, A, vl_embed> *v, gt_pointer_operator op, void *cookie)
1389{
1390 for (unsigned i = 0; i < v->length (); i++)
1391 op (&((*v)[i]), cookie);
1392}
1393
1394template<typename T, typename A>
1395void
1396gt_pch_nx (vec<T, A, vl_embed> *v, gt_pointer_operator op, void *cookie)
1397{
1398 extern void gt_pch_nx (T *, gt_pointer_operator, void *);
1399 for (unsigned i = 0; i < v->length (); i++)
1400 gt_pch_nx (&((*v)[i]), op, cookie);
1401}
1402
1403
1404/* Space efficient vector. These vectors can grow dynamically and are
1405 allocated together with their control data. They are suited to be
1406 included in data structures. Prior to initial allocation, they
1407 only take a single word of storage.
1408
1409 These vectors are implemented as a pointer to an embeddable vector.
1410 The semantics allow for this pointer to be NULL to represent empty
1411 vectors. This way, empty vectors occupy minimal space in the
1412 structure containing them.
1413
1414 Properties:
1415
1416 - The whole vector and control data are allocated in a single
1417 contiguous block.
1418 - The whole vector may be re-allocated.
1419 - Vector data may grow and shrink.
1420 - Access and manipulation requires a pointer test and
1421 indirection.
1422 - It requires 1 word of storage (prior to vector allocation).
1423
1424
1425 Limitations:
1426
1427 These vectors must be PODs because they are stored in unions.
1428 (http://en.wikipedia.org/wiki/Plain_old_data_structures).
1429 As long as we use C++03, we cannot have constructors nor
1430 destructors in classes that are stored in unions. */
1431
1432template<typename T, size_t N = 0>
1433class auto_vec;
1434
1435template<typename T>
1436struct vec<T, va_heap, vl_ptr>
1437{
1438public:
1439 /* Default ctors to ensure triviality. Use value-initialization
1440 (e.g., vec() or vec v{ };) or vNULL to create a zero-initialized
1441 instance. */
1442 vec () = default;
1443 vec (const vec &) = default;
1444 /* Initialization from the generic vNULL. */
1445 vec (vnull): m_vec () { }
1446 /* Same as default ctor: vec storage must be released manually. */
1447 ~vec () = default;
1448
1449 /* Defaulted same as copy ctor. */
1450 vec& operator= (const vec &) = default;
1451
1452 /* Prevent implicit conversion from auto_vec. Use auto_vec::to_vec()
1453 instead. */
1454 template <size_t N>
1455 vec (auto_vec<T, N> &) = delete;
1456
1457 template <size_t N>
1458 void operator= (auto_vec<T, N> &) = delete;
1459
1460 /* Memory allocation and deallocation for the embedded vector.
1461 Needed because we cannot have proper ctors/dtors defined. */
1462 void create (unsigned nelems CXX_MEM_STAT_INFO);
1463 void release (void);
1464
1465 /* Vector operations. */
1466 bool exists (void) const
1467 { return m_vec != NULLnullptr; }
1468
1469 bool is_empty (void) const
1470 { return m_vec ? m_vec->is_empty () : true; }
1471
1472 unsigned length (void) const
1473 { return m_vec ? m_vec->length () : 0; }
1474
1475 T *address (void)
1476 { return m_vec ? m_vec->m_vecdata : NULLnullptr; }
1477
1478 const T *address (void) const
1479 { return m_vec ? m_vec->m_vecdata : NULLnullptr; }
1480
1481 T *begin () { return address (); }
1482 const T *begin () const { return address (); }
1483 T *end () { return begin () + length (); }
1484 const T *end () const { return begin () + length (); }
1485 const T &operator[] (unsigned ix) const
1486 { return (*m_vec)[ix]; }
1487
1488 bool operator!=(const vec &other) const
1489 { return !(*this == other); }
1490
1491 bool operator==(const vec &other) const
1492 { return address () == other.address (); }
1493
1494 T &operator[] (unsigned ix)
1495 { return (*m_vec)[ix]; }
1496
1497 T &last (void)
1498 { return m_vec->last (); }
1499
1500 bool space (int nelems) const
1501 { return m_vec ? m_vec->space (nelems) : nelems == 0; }
1502
1503 bool iterate (unsigned ix, T *p) const;
1504 bool iterate (unsigned ix, T **p) const;
1505 vec copy (ALONE_CXX_MEM_STAT_INFO) const;
1506 bool reserve (unsigned, bool = false CXX_MEM_STAT_INFO);
1507 bool reserve_exact (unsigned CXX_MEM_STAT_INFO);
1508 void splice (const vec &);
1509 void safe_splice (const vec & CXX_MEM_STAT_INFO);
1510 T *quick_push (const T &);
1511 T *safe_push (const T &CXX_MEM_STAT_INFO);
1512 T &pop (void);
1513 void truncate (unsigned);
1514 void safe_grow (unsigned, bool = false CXX_MEM_STAT_INFO);
1515 void safe_grow_cleared (unsigned, bool = false CXX_MEM_STAT_INFO);
1516 void quick_grow (unsigned);
1517 void quick_grow_cleared (unsigned);
1518 void quick_insert (unsigned, const T &);
1519 void safe_insert (unsigned, const T & CXX_MEM_STAT_INFO);
1520 void ordered_remove (unsigned);
1521 void unordered_remove (unsigned);
1522 void block_remove (unsigned, unsigned);
1523 void qsort (int (*) (const void *, const void *))qsort (int (*) (const void *, const void *));
1524 void sort (int (*) (const void *, const void *, void *), void *);
1525 void stablesort (int (*) (const void *, const void *, void *), void *);
1526 T *bsearch (const void *key, int (*compar)(const void *, const void *));
1527 T *bsearch (const void *key,
1528 int (*compar)(const void *, const void *, void *), void *);
1529 unsigned lower_bound (T, bool (*)(const T &, const T &)) const;
1530 bool contains (const T &search) const;
1531 void reverse (void);
1532
1533 bool using_auto_storage () const;
1534
1535 /* FIXME - This field should be private, but we need to cater to
1536 compilers that have stricter notions of PODness for types. */
1537 vec<T, va_heap, vl_embed> *m_vec;
1538};
1539
1540
1541/* auto_vec is a subclass of vec that automatically manages creating and
1542 releasing the internal vector. If N is non zero then it has N elements of
1543 internal storage. The default is no internal storage, and you probably only
1544 want to ask for internal storage for vectors on the stack because if the
1545 size of the vector is larger than the internal storage that space is wasted.
1546 */
1547template<typename T, size_t N /* = 0 */>
1548class auto_vec : public vec<T, va_heap>
1549{
1550public:
1551 auto_vec ()
1552 {
1553 m_auto.embedded_init (MAX (N, 2)((N) > (2) ? (N) : (2)), 0, 1);
1554 this->m_vec = &m_auto;
1555 }
1556
1557 auto_vec (size_t s CXX_MEM_STAT_INFO)
1558 {
1559 if (s > N)
1560 {
1561 this->create (s PASS_MEM_STAT);
1562 return;
1563 }
1564
1565 m_auto.embedded_init (MAX (N, 2)((N) > (2) ? (N) : (2)), 0, 1);
1566 this->m_vec = &m_auto;
1567 }
1568
1569 ~auto_vec ()
1570 {
1571 this->release ();
1572 }
1573
1574 /* Explicitly convert to the base class. There is no conversion
1575 from a const auto_vec because a copy of the returned vec can
1576 be used to modify *THIS.
1577 This is a legacy function not to be used in new code. */
1578 vec<T, va_heap> to_vec_legacy () {
1579 return *static_cast<vec<T, va_heap> *>(this);
1580 }
1581
1582private:
1583 vec<T, va_heap, vl_embed> m_auto;
1584 T m_data[MAX (N - 1, 1)((N - 1) > (1) ? (N - 1) : (1))];
1585};
1586
1587/* auto_vec is a sub class of vec whose storage is released when it is
1588 destroyed. */
1589template<typename T>
1590class auto_vec<T, 0> : public vec<T, va_heap>
1591{
1592public:
1593 auto_vec () { this->m_vec = NULLnullptr; }
1594 auto_vec (size_t n CXX_MEM_STAT_INFO) { this->create (n PASS_MEM_STAT); }
1595 ~auto_vec () { this->release (); }
1596
1597 auto_vec (vec<T, va_heap>&& r)
1598 {
1599 gcc_assert (!r.using_auto_storage ())((void)(!(!r.using_auto_storage ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1599, __FUNCTION__), 0 : 0))
;
1600 this->m_vec = r.m_vec;
1601 r.m_vec = NULLnullptr;
1602 }
1603
1604 auto_vec (auto_vec<T> &&r)
1605 {
1606 gcc_assert (!r.using_auto_storage ())((void)(!(!r.using_auto_storage ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1606, __FUNCTION__), 0 : 0))
;
1607 this->m_vec = r.m_vec;
1608 r.m_vec = NULLnullptr;
1609 }
1610
1611 auto_vec& operator= (vec<T, va_heap>&& r)
1612 {
1613 if (this == &r)
1614 return *this;
1615
1616 gcc_assert (!r.using_auto_storage ())((void)(!(!r.using_auto_storage ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1616, __FUNCTION__), 0 : 0))
;
1617 this->release ();
1618 this->m_vec = r.m_vec;
1619 r.m_vec = NULLnullptr;
1620 return *this;
1621 }
1622
1623 auto_vec& operator= (auto_vec<T> &&r)
1624 {
1625 if (this == &r)
1626 return *this;
1627
1628 gcc_assert (!r.using_auto_storage ())((void)(!(!r.using_auto_storage ()) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1628, __FUNCTION__), 0 : 0))
;
1629 this->release ();
1630 this->m_vec = r.m_vec;
1631 r.m_vec = NULLnullptr;
1632 return *this;
1633 }
1634
1635 /* Explicitly convert to the base class. There is no conversion
1636 from a const auto_vec because a copy of the returned vec can
1637 be used to modify *THIS.
1638 This is a legacy function not to be used in new code. */
1639 vec<T, va_heap> to_vec_legacy () {
1640 return *static_cast<vec<T, va_heap> *>(this);
1641 }
1642
1643 // You probably don't want to copy a vector, so these are deleted to prevent
1644 // unintentional use. If you really need a copy of the vectors contents you
1645 // can use copy ().
1646 auto_vec(const auto_vec &) = delete;
1647 auto_vec &operator= (const auto_vec &) = delete;
1648};
1649
1650
1651/* Allocate heap memory for pointer V and create the internal vector
1652 with space for NELEMS elements. If NELEMS is 0, the internal
1653 vector is initialized to empty. */
1654
1655template<typename T>
1656inline void
1657vec_alloc (vec<T> *&v, unsigned nelems CXX_MEM_STAT_INFO)
1658{
1659 v = new vec<T>;
1660 v->create (nelems PASS_MEM_STAT);
1661}
1662
1663
1664/* A subclass of auto_vec <char *> that frees all of its elements on
1665 deletion. */
1666
1667class auto_string_vec : public auto_vec <char *>
1668{
1669 public:
1670 ~auto_string_vec ();
1671};
1672
1673/* A subclass of auto_vec <T *> that deletes all of its elements on
1674 destruction.
1675
1676 This is a crude way for a vec to "own" the objects it points to
1677 and clean up automatically.
1678
1679 For example, no attempt is made to delete elements when an item
1680 within the vec is overwritten.
1681
1682 We can't rely on gnu::unique_ptr within a container,
1683 since we can't rely on move semantics in C++98. */
1684
1685template <typename T>
1686class auto_delete_vec : public auto_vec <T *>
1687{
1688 public:
1689 auto_delete_vec () {}
1690 auto_delete_vec (size_t s) : auto_vec <T *> (s) {}
1691
1692 ~auto_delete_vec ();
1693
1694private:
1695 DISABLE_COPY_AND_ASSIGN(auto_delete_vec)auto_delete_vec (const auto_delete_vec&) = delete; void operator
= (const auto_delete_vec &) = delete
;
1696};
1697
1698/* Conditionally allocate heap memory for VEC and its internal vector. */
1699
1700template<typename T>
1701inline void
1702vec_check_alloc (vec<T, va_heap> *&vec, unsigned nelems CXX_MEM_STAT_INFO)
1703{
1704 if (!vec)
1705 vec_alloc (vec, nelems PASS_MEM_STAT);
1706}
1707
1708
1709/* Free the heap memory allocated by vector V and set it to NULL. */
1710
1711template<typename T>
1712inline void
1713vec_free (vec<T> *&v)
1714{
1715 if (v == NULLnullptr)
1716 return;
1717
1718 v->release ();
1719 delete v;
1720 v = NULLnullptr;
1721}
1722
1723
1724/* Return iteration condition and update PTR to point to the IX'th
1725 element of this vector. Use this to iterate over the elements of a
1726 vector as follows,
1727
1728 for (ix = 0; v.iterate (ix, &ptr); ix++)
1729 continue; */
1730
1731template<typename T>
1732inline bool
1733vec<T, va_heap, vl_ptr>::iterate (unsigned ix, T *ptr) const
1734{
1735 if (m_vec)
1736 return m_vec->iterate (ix, ptr);
1737 else
1738 {
1739 *ptr = 0;
1740 return false;
1741 }
1742}
1743
1744
1745/* Return iteration condition and update *PTR to point to the
1746 IX'th element of this vector. Use this to iterate over the
1747 elements of a vector as follows,
1748
1749 for (ix = 0; v->iterate (ix, &ptr); ix++)
1750 continue;
1751
1752 This variant is for vectors of objects. */
1753
1754template<typename T>
1755inline bool
1756vec<T, va_heap, vl_ptr>::iterate (unsigned ix, T **ptr) const
1757{
1758 if (m_vec)
1759 return m_vec->iterate (ix, ptr);
1760 else
1761 {
1762 *ptr = 0;
1763 return false;
1764 }
1765}
1766
1767
1768/* Convenience macro for forward iteration. */
1769#define FOR_EACH_VEC_ELT(V, I, P)for (I = 0; (V).iterate ((I), &(P)); ++(I)) \
1770 for (I = 0; (V).iterate ((I), &(P)); ++(I))
1771
1772#define FOR_EACH_VEC_SAFE_ELT(V, I, P)for (I = 0; vec_safe_iterate ((V), (I), &(P)); ++(I)) \
1773 for (I = 0; vec_safe_iterate ((V), (I), &(P)); ++(I))
1774
1775/* Likewise, but start from FROM rather than 0. */
1776#define FOR_EACH_VEC_ELT_FROM(V, I, P, FROM)for (I = (FROM); (V).iterate ((I), &(P)); ++(I)) \
1777 for (I = (FROM); (V).iterate ((I), &(P)); ++(I))
1778
1779/* Convenience macro for reverse iteration. */
1780#define FOR_EACH_VEC_ELT_REVERSE(V, I, P)for (I = (V).length () - 1; (V).iterate ((I), &(P)); (I)--
)
\
1781 for (I = (V).length () - 1; \
1782 (V).iterate ((I), &(P)); \
1783 (I)--)
1784
1785#define FOR_EACH_VEC_SAFE_ELT_REVERSE(V, I, P)for (I = vec_safe_length (V) - 1; vec_safe_iterate ((V), (I),
&(P)); (I)--)
\
1786 for (I = vec_safe_length (V) - 1; \
1787 vec_safe_iterate ((V), (I), &(P)); \
1788 (I)--)
1789
1790/* auto_string_vec's dtor, freeing all contained strings, automatically
1791 chaining up to ~auto_vec <char *>, which frees the internal buffer. */
1792
1793inline
1794auto_string_vec::~auto_string_vec ()
1795{
1796 int i;
1797 char *str;
1798 FOR_EACH_VEC_ELT (*this, i, str)for (i = 0; (*this).iterate ((i), &(str)); ++(i))
1799 free (str);
1800}
1801
1802/* auto_delete_vec's dtor, deleting all contained items, automatically
1803 chaining up to ~auto_vec <T*>, which frees the internal buffer. */
1804
1805template <typename T>
1806inline
1807auto_delete_vec<T>::~auto_delete_vec ()
1808{
1809 int i;
1810 T *item;
1811 FOR_EACH_VEC_ELT (*this, i, item)for (i = 0; (*this).iterate ((i), &(item)); ++(i))
1812 delete item;
1813}
1814
1815
1816/* Return a copy of this vector. */
1817
1818template<typename T>
1819inline vec<T, va_heap, vl_ptr>
1820vec<T, va_heap, vl_ptr>::copy (ALONE_MEM_STAT_DECLvoid) const
1821{
1822 vec<T, va_heap, vl_ptr> new_vec{ };
1823 if (length ())
1824 new_vec.m_vec = m_vec->copy (ALONE_PASS_MEM_STAT);
1825 return new_vec;
1826}
1827
1828
1829/* Ensure that the vector has at least RESERVE slots available (if
1830 EXACT is false), or exactly RESERVE slots available (if EXACT is
1831 true).
1832
1833 This may create additional headroom if EXACT is false.
1834
1835 Note that this can cause the embedded vector to be reallocated.
1836 Returns true iff reallocation actually occurred. */
1837
1838template<typename T>
1839inline bool
1840vec<T, va_heap, vl_ptr>::reserve (unsigned nelems, bool exact MEM_STAT_DECL)
1841{
1842 if (space (nelems))
1843 return false;
1844
1845 /* For now play a game with va_heap::reserve to hide our auto storage if any,
1846 this is necessary because it doesn't have enough information to know the
1847 embedded vector is in auto storage, and so should not be freed. */
1848 vec<T, va_heap, vl_embed> *oldvec = m_vec;
1849 unsigned int oldsize = 0;
1850 bool handle_auto_vec = m_vec && using_auto_storage ();
1851 if (handle_auto_vec)
1852 {
1853 m_vec = NULLnullptr;
1854 oldsize = oldvec->length ();
1855 nelems += oldsize;
1856 }
1857
1858 va_heap::reserve (m_vec, nelems, exact PASS_MEM_STAT);
1859 if (handle_auto_vec)
1860 {
1861 vec_copy_construct (m_vec->address (), oldvec->address (), oldsize);
1862 m_vec->m_vecpfx.m_num = oldsize;
1863 }
1864
1865 return true;
1866}
1867
1868
1869/* Ensure that this vector has exactly NELEMS slots available. This
1870 will not create additional headroom. Note this can cause the
1871 embedded vector to be reallocated. Returns true iff reallocation
1872 actually occurred. */
1873
1874template<typename T>
1875inline bool
1876vec<T, va_heap, vl_ptr>::reserve_exact (unsigned nelems MEM_STAT_DECL)
1877{
1878 return reserve (nelems, true PASS_MEM_STAT);
1879}
1880
1881
1882/* Create the internal vector and reserve NELEMS for it. This is
1883 exactly like vec::reserve, but the internal vector is
1884 unconditionally allocated from scratch. The old one, if it
1885 existed, is lost. */
1886
1887template<typename T>
1888inline void
1889vec<T, va_heap, vl_ptr>::create (unsigned nelems MEM_STAT_DECL)
1890{
1891 m_vec = NULLnullptr;
1892 if (nelems > 0)
1893 reserve_exact (nelems PASS_MEM_STAT);
1894}
1895
1896
1897/* Free the memory occupied by the embedded vector. */
1898
1899template<typename T>
1900inline void
1901vec<T, va_heap, vl_ptr>::release (void)
1902{
1903 if (!m_vec)
1904 return;
1905
1906 if (using_auto_storage ())
1907 {
1908 m_vec->m_vecpfx.m_num = 0;
1909 return;
1910 }
1911
1912 va_heap::release (m_vec);
1913}
1914
1915/* Copy the elements from SRC to the end of this vector as if by memcpy.
1916 SRC and this vector must be allocated with the same memory
1917 allocation mechanism. This vector is assumed to have sufficient
1918 headroom available. */
1919
1920template<typename T>
1921inline void
1922vec<T, va_heap, vl_ptr>::splice (const vec<T, va_heap, vl_ptr> &src)
1923{
1924 if (src.length ())
1925 m_vec->splice (*(src.m_vec));
1926}
1927
1928
1929/* Copy the elements in SRC to the end of this vector as if by memcpy.
1930 SRC and this vector must be allocated with the same mechanism.
1931 If there is not enough headroom in this vector, it will be reallocated
1932 as needed. */
1933
1934template<typename T>
1935inline void
1936vec<T, va_heap, vl_ptr>::safe_splice (const vec<T, va_heap, vl_ptr> &src
1937 MEM_STAT_DECL)
1938{
1939 if (src.length ())
1940 {
1941 reserve_exact (src.length ());
1942 splice (src);
1943 }
1944}
1945
1946
1947/* Push OBJ (a new element) onto the end of the vector. There must be
1948 sufficient space in the vector. Return a pointer to the slot
1949 where OBJ was inserted. */
1950
1951template<typename T>
1952inline T *
1953vec<T, va_heap, vl_ptr>::quick_push (const T &obj)
1954{
1955 return m_vec->quick_push (obj);
1956}
1957
1958
1959/* Push a new element OBJ onto the end of this vector. Reallocates
1960 the embedded vector, if needed. Return a pointer to the slot where
1961 OBJ was inserted. */
1962
1963template<typename T>
1964inline T *
1965vec<T, va_heap, vl_ptr>::safe_push (const T &obj MEM_STAT_DECL)
1966{
1967 reserve (1, false PASS_MEM_STAT);
1968 return quick_push (obj);
1969}
1970
1971
1972/* Pop and return the last element off the end of the vector. */
1973
1974template<typename T>
1975inline T &
1976vec<T, va_heap, vl_ptr>::pop (void)
1977{
1978 return m_vec->pop ();
1979}
1980
1981
1982/* Set the length of the vector to LEN. The new length must be less
1983 than or equal to the current length. This is an O(1) operation. */
1984
1985template<typename T>
1986inline void
1987vec<T, va_heap, vl_ptr>::truncate (unsigned size)
1988{
1989 if (m_vec)
1990 m_vec->truncate (size);
1991 else
1992 gcc_checking_assert (size == 0)((void)(!(size == 0) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 1992, __FUNCTION__), 0 : 0))
;
1993}
1994
1995
1996/* Grow the vector to a specific length. LEN must be as long or
1997 longer than the current length. The new elements are
1998 uninitialized. Reallocate the internal vector, if needed. */
1999
2000template<typename T>
2001inline void
2002vec<T, va_heap, vl_ptr>::safe_grow (unsigned len, bool exact MEM_STAT_DECL)
2003{
2004 unsigned oldlen = length ();
2005 gcc_checking_assert (oldlen <= len)((void)(!(oldlen <= len) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2005, __FUNCTION__), 0 : 0))
;
2006 reserve (len - oldlen, exact PASS_MEM_STAT);
2007 if (m_vec)
2008 m_vec->quick_grow (len);
2009 else
2010 gcc_checking_assert (len == 0)((void)(!(len == 0) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2010, __FUNCTION__), 0 : 0))
;
2011}
2012
2013
2014/* Grow the embedded vector to a specific length. LEN must be as
2015 long or longer than the current length. The new elements are
2016 initialized to zero. Reallocate the internal vector, if needed. */
2017
2018template<typename T>
2019inline void
2020vec<T, va_heap, vl_ptr>::safe_grow_cleared (unsigned len, bool exact
2021 MEM_STAT_DECL)
2022{
2023 unsigned oldlen = length ();
2024 size_t growby = len - oldlen;
2025 safe_grow (len, exact PASS_MEM_STAT);
2026 if (growby != 0)
2027 vec_default_construct (address () + oldlen, growby);
2028}
2029
2030
2031/* Same as vec::safe_grow but without reallocation of the internal vector.
2032 If the vector cannot be extended, a runtime assertion will be triggered. */
2033
2034template<typename T>
2035inline void
2036vec<T, va_heap, vl_ptr>::quick_grow (unsigned len)
2037{
2038 gcc_checking_assert (m_vec)((void)(!(m_vec) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2038, __FUNCTION__), 0 : 0))
;
2039 m_vec->quick_grow (len);
2040}
2041
2042
2043/* Same as vec::quick_grow_cleared but without reallocation of the
2044 internal vector. If the vector cannot be extended, a runtime
2045 assertion will be triggered. */
2046
2047template<typename T>
2048inline void
2049vec<T, va_heap, vl_ptr>::quick_grow_cleared (unsigned len)
2050{
2051 gcc_checking_assert (m_vec)((void)(!(m_vec) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2051, __FUNCTION__), 0 : 0))
;
2052 m_vec->quick_grow_cleared (len);
2053}
2054
2055
2056/* Insert an element, OBJ, at the IXth position of this vector. There
2057 must be sufficient space. */
2058
2059template<typename T>
2060inline void
2061vec<T, va_heap, vl_ptr>::quick_insert (unsigned ix, const T &obj)
2062{
2063 m_vec->quick_insert (ix, obj);
2064}
2065
2066
2067/* Insert an element, OBJ, at the IXth position of the vector.
2068 Reallocate the embedded vector, if necessary. */
2069
2070template<typename T>
2071inline void
2072vec<T, va_heap, vl_ptr>::safe_insert (unsigned ix, const T &obj MEM_STAT_DECL)
2073{
2074 reserve (1, false PASS_MEM_STAT);
2075 quick_insert (ix, obj);
2076}
2077
2078
2079/* Remove an element from the IXth position of this vector. Ordering of
2080 remaining elements is preserved. This is an O(N) operation due to
2081 a memmove. */
2082
2083template<typename T>
2084inline void
2085vec<T, va_heap, vl_ptr>::ordered_remove (unsigned ix)
2086{
2087 m_vec->ordered_remove (ix);
2088}
2089
2090
2091/* Remove an element from the IXth position of this vector. Ordering
2092 of remaining elements is destroyed. This is an O(1) operation. */
2093
2094template<typename T>
2095inline void
2096vec<T, va_heap, vl_ptr>::unordered_remove (unsigned ix)
2097{
2098 m_vec->unordered_remove (ix);
2099}
2100
2101
2102/* Remove LEN elements starting at the IXth. Ordering is retained.
2103 This is an O(N) operation due to memmove. */
2104
2105template<typename T>
2106inline void
2107vec<T, va_heap, vl_ptr>::block_remove (unsigned ix, unsigned len)
2108{
2109 m_vec->block_remove (ix, len);
2110}
2111
2112
2113/* Sort the contents of this vector with qsort. CMP is the comparison
2114 function to pass to qsort. */
2115
2116template<typename T>
2117inline void
2118vec<T, va_heap, vl_ptr>::qsort (int (*cmp) (const void *, const void *))qsort (int (*cmp) (const void *, const void *))
2119{
2120 if (m_vec)
2121 m_vec->qsort (cmp)qsort (cmp);
2122}
2123
2124/* Sort the contents of this vector with qsort. CMP is the comparison
2125 function to pass to qsort. */
2126
2127template<typename T>
2128inline void
2129vec<T, va_heap, vl_ptr>::sort (int (*cmp) (const void *, const void *,
2130 void *), void *data)
2131{
2132 if (m_vec)
2133 m_vec->sort (cmp, data);
2134}
2135
2136/* Sort the contents of this vector with gcc_stablesort_r. CMP is the
2137 comparison function to pass to qsort. */
2138
2139template<typename T>
2140inline void
2141vec<T, va_heap, vl_ptr>::stablesort (int (*cmp) (const void *, const void *,
2142 void *), void *data)
2143{
2144 if (m_vec)
2145 m_vec->stablesort (cmp, data);
2146}
2147
2148/* Search the contents of the sorted vector with a binary search.
2149 CMP is the comparison function to pass to bsearch. */
2150
2151template<typename T>
2152inline T *
2153vec<T, va_heap, vl_ptr>::bsearch (const void *key,
2154 int (*cmp) (const void *, const void *))
2155{
2156 if (m_vec)
2157 return m_vec->bsearch (key, cmp);
2158 return NULLnullptr;
2159}
2160
2161/* Search the contents of the sorted vector with a binary search.
2162 CMP is the comparison function to pass to bsearch. */
2163
2164template<typename T>
2165inline T *
2166vec<T, va_heap, vl_ptr>::bsearch (const void *key,
2167 int (*cmp) (const void *, const void *,
2168 void *), void *data)
2169{
2170 if (m_vec)
2171 return m_vec->bsearch (key, cmp, data);
2172 return NULLnullptr;
2173}
2174
2175
2176/* Find and return the first position in which OBJ could be inserted
2177 without changing the ordering of this vector. LESSTHAN is a
2178 function that returns true if the first argument is strictly less
2179 than the second. */
2180
2181template<typename T>
2182inline unsigned
2183vec<T, va_heap, vl_ptr>::lower_bound (T obj,
2184 bool (*lessthan)(const T &, const T &))
2185 const
2186{
2187 return m_vec ? m_vec->lower_bound (obj, lessthan) : 0;
2188}
2189
2190/* Return true if SEARCH is an element of V. Note that this is O(N) in the
2191 size of the vector and so should be used with care. */
2192
2193template<typename T>
2194inline bool
2195vec<T, va_heap, vl_ptr>::contains (const T &search) const
2196{
2197 return m_vec ? m_vec->contains (search) : false;
2198}
2199
2200/* Reverse content of the vector. */
2201
2202template<typename T>
2203inline void
2204vec<T, va_heap, vl_ptr>::reverse (void)
2205{
2206 unsigned l = length ();
2207 T *ptr = address ();
2208
2209 for (unsigned i = 0; i < l / 2; i++)
2210 std::swap (ptr[i], ptr[l - i - 1]);
2211}
2212
2213template<typename T>
2214inline bool
2215vec<T, va_heap, vl_ptr>::using_auto_storage () const
2216{
2217 return m_vec ? m_vec->m_vecpfx.m_using_auto_storage : false;
2218}
2219
2220/* Release VEC and call release of all element vectors. */
2221
2222template<typename T>
2223inline void
2224release_vec_vec (vec<vec<T> > &vec)
2225{
2226 for (unsigned i = 0; i < vec.length (); i++)
2227 vec[i].release ();
2228
2229 vec.release ();
2230}
2231
2232// Provide a subset of the std::span functionality. (We can't use std::span
2233// itself because it's a C++20 feature.)
2234//
2235// In addition, provide an invalid value that is distinct from all valid
2236// sequences (including the empty sequence). This can be used to return
2237// failure without having to use std::optional.
2238//
2239// There is no operator bool because it would be ambiguous whether it is
2240// testing for a valid value or an empty sequence.
2241template<typename T>
2242class array_slice
2243{
2244 template<typename OtherT> friend class array_slice;
2245
2246public:
2247 using value_type = T;
2248 using iterator = T *;
2249 using const_iterator = const T *;
2250
2251 array_slice () : m_base (nullptr), m_size (0) {}
2252
2253 template<typename OtherT>
2254 array_slice (array_slice<OtherT> other)
2255 : m_base (other.m_base), m_size (other.m_size) {}
2256
2257 array_slice (iterator base, unsigned int size)
2258 : m_base (base), m_size (size) {}
2259
2260 template<size_t N>
2261 array_slice (T (&array)[N]) : m_base (array), m_size (N) {}
2262
2263 template<typename OtherT>
2264 array_slice (const vec<OtherT> &v)
2265 : m_base (v.address ()), m_size (v.length ()) {}
2266
2267 iterator begin () { return m_base; }
2268 iterator end () { return m_base + m_size; }
2269
2270 const_iterator begin () const { return m_base; }
2271 const_iterator end () const { return m_base + m_size; }
2272
2273 value_type &front ();
2274 value_type &back ();
2275 value_type &operator[] (unsigned int i);
2276
2277 const value_type &front () const;
2278 const value_type &back () const;
2279 const value_type &operator[] (unsigned int i) const;
2280
2281 size_t size () const { return m_size; }
2282 size_t size_bytes () const { return m_size * sizeof (T); }
2283 bool empty () const { return m_size == 0; }
2284
2285 // An invalid array_slice that represents a failed operation. This is
2286 // distinct from an empty slice, which is a valid result in some contexts.
2287 static array_slice invalid () { return { nullptr, ~0U }; }
2288
2289 // True if the array is valid, false if it is an array like INVALID.
2290 bool is_valid () const { return m_base || m_size == 0; }
2291
2292private:
2293 iterator m_base;
2294 unsigned int m_size;
2295};
2296
2297template<typename T>
2298inline typename array_slice<T>::value_type &
2299array_slice<T>::front ()
2300{
2301 gcc_checking_assert (m_size)((void)(!(m_size) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2301, __FUNCTION__), 0 : 0))
;
2302 return m_base[0];
2303}
2304
2305template<typename T>
2306inline const typename array_slice<T>::value_type &
2307array_slice<T>::front () const
2308{
2309 gcc_checking_assert (m_size)((void)(!(m_size) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2309, __FUNCTION__), 0 : 0))
;
2310 return m_base[0];
2311}
2312
2313template<typename T>
2314inline typename array_slice<T>::value_type &
2315array_slice<T>::back ()
2316{
2317 gcc_checking_assert (m_size)((void)(!(m_size) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2317, __FUNCTION__), 0 : 0))
;
2318 return m_base[m_size - 1];
2319}
2320
2321template<typename T>
2322inline const typename array_slice<T>::value_type &
2323array_slice<T>::back () const
2324{
2325 gcc_checking_assert (m_size)((void)(!(m_size) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2325, __FUNCTION__), 0 : 0))
;
2326 return m_base[m_size - 1];
2327}
2328
2329template<typename T>
2330inline typename array_slice<T>::value_type &
2331array_slice<T>::operator[] (unsigned int i)
2332{
2333 gcc_checking_assert (i < m_size)((void)(!(i < m_size) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2333, __FUNCTION__), 0 : 0))
;
2334 return m_base[i];
2335}
2336
2337template<typename T>
2338inline const typename array_slice<T>::value_type &
2339array_slice<T>::operator[] (unsigned int i) const
2340{
2341 gcc_checking_assert (i < m_size)((void)(!(i < m_size) ? fancy_abort ("/home/marxin/BIG/buildbot/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/vec.h"
, 2341, __FUNCTION__), 0 : 0))
;
2342 return m_base[i];
2343}
2344
2345template<typename T>
2346array_slice<T>
2347make_array_slice (T *base, unsigned int size)
2348{
2349 return array_slice<T> (base, size);
2350}
2351
2352#if (GCC_VERSION(4 * 1000 + 2) >= 3000)
2353# pragma GCC poison m_vec m_vecpfx m_vecdata
2354#endif
2355
2356#endif // GCC_VEC_H