Bug Summary

File:build/gcc/wide-int.h
Warning:line 1135, column 3
Undefined or garbage value returned to caller

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-suse-linux -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name tree-ssa-loop-niter.cc -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model static -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -resource-dir /usr/lib64/clang/15.0.7 -D IN_GCC -D HAVE_CONFIG_H -I . -I . -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/. -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../include -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcpp/include -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcody -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber/bid -I ../libdecnumber -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libbacktrace -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13 -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13/x86_64-suse-linux -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13/backward -internal-isystem /usr/lib64/clang/15.0.7/include -internal-isystem /usr/local/include -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../x86_64-suse-linux/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-narrowing -Wwrite-strings -Wno-long-long -Wno-variadic-macros -Wno-overlength-strings -fdeprecated-macro -fdebug-compilation-dir=/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -ferror-limit 19 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=plist-html -analyzer-config silence-checkers=core.NullDereference -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /buildworker/marxinbox-gcc-clang-static-analyzer/objdir/clang-static-analyzer/2023-03-27-141847-20772-1/report-hYGKid.plist -x c++ /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc

/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc

1/* Functions to determine/estimate number of iterations of a loop.
2 Copyright (C) 2004-2023 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it
7under the terms of the GNU General Public License as published by the
8Free Software Foundation; either version 3, or (at your option) any
9later version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT
12ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "backend.h"
24#include "rtl.h"
25#include "tree.h"
26#include "gimple.h"
27#include "tree-pass.h"
28#include "ssa.h"
29#include "gimple-pretty-print.h"
30#include "diagnostic-core.h"
31#include "stor-layout.h"
32#include "fold-const.h"
33#include "calls.h"
34#include "intl.h"
35#include "gimplify.h"
36#include "gimple-iterator.h"
37#include "tree-cfg.h"
38#include "tree-ssa-loop-ivopts.h"
39#include "tree-ssa-loop-niter.h"
40#include "tree-ssa-loop.h"
41#include "cfgloop.h"
42#include "tree-chrec.h"
43#include "tree-scalar-evolution.h"
44#include "tree-dfa.h"
45#include "internal-fn.h"
46#include "gimple-range.h"
47
48
49/* The maximum number of dominator BBs we search for conditions
50 of loop header copies we use for simplifying a conditional
51 expression. */
52#define MAX_DOMINATORS_TO_WALK8 8
53
54/*
55
56 Analysis of number of iterations of an affine exit test.
57
58*/
59
60/* Bounds on some value, BELOW <= X <= UP. */
61
62struct bounds
63{
64 mpz_t below, up;
65};
66
67/* Splits expression EXPR to a variable part VAR and constant OFFSET. */
68
69static void
70split_to_var_and_offset (tree expr, tree *var, mpz_t offset)
71{
72 tree type = TREE_TYPE (expr)((contains_struct_check ((expr), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 72, __FUNCTION__))->typed.type)
;
73 tree op0, op1;
74 bool negate = false;
75
76 *var = expr;
77 mpz_set_ui__gmpz_set_ui (offset, 0);
78
79 switch (TREE_CODE (expr)((enum tree_code) (expr)->base.code))
80 {
81 case MINUS_EXPR:
82 negate = true;
83 /* Fallthru. */
84
85 case PLUS_EXPR:
86 case POINTER_PLUS_EXPR:
87 op0 = TREE_OPERAND (expr, 0)(*((const_cast<tree*> (tree_operand_check ((expr), (0),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 87, __FUNCTION__)))))
;
88 op1 = TREE_OPERAND (expr, 1)(*((const_cast<tree*> (tree_operand_check ((expr), (1),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 88, __FUNCTION__)))))
;
89
90 if (TREE_CODE (op1)((enum tree_code) (op1)->base.code) != INTEGER_CST)
91 break;
92
93 *var = op0;
94 /* Always sign extend the offset. */
95 wi::to_mpz (wi::to_wide (op1), offset, SIGNED);
96 if (negate)
97 mpz_neg__gmpz_neg (offset, offset);
98 break;
99
100 case INTEGER_CST:
101 *var = build_int_cst_type (type, 0);
102 wi::to_mpz (wi::to_wide (expr), offset, TYPE_SIGN (type)((signop) ((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 102, __FUNCTION__))->base.u.bits.unsigned_flag))
);
103 break;
104
105 default:
106 break;
107 }
108}
109
110/* From condition C0 CMP C1 derives information regarding the value range
111 of VAR, which is of TYPE. Results are stored in to BELOW and UP. */
112
113static void
114refine_value_range_using_guard (tree type, tree var,
115 tree c0, enum tree_code cmp, tree c1,
116 mpz_t below, mpz_t up)
117{
118 tree varc0, varc1, ctype;
119 mpz_t offc0, offc1;
120 mpz_t mint, maxt, minc1, maxc1;
121 bool no_wrap = nowrap_type_p (type);
122 bool c0_ok, c1_ok;
123 signop sgn = TYPE_SIGN (type)((signop) ((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 123, __FUNCTION__))->base.u.bits.unsigned_flag))
;
124
125 switch (cmp)
126 {
127 case LT_EXPR:
128 case LE_EXPR:
129 case GT_EXPR:
130 case GE_EXPR:
131 STRIP_SIGN_NOPS (c0)(c0) = tree_strip_sign_nop_conversions ((const_cast<union tree_node
*> (((c0)))))
;
132 STRIP_SIGN_NOPS (c1)(c1) = tree_strip_sign_nop_conversions ((const_cast<union tree_node
*> (((c1)))))
;
133 ctype = TREE_TYPE (c0)((contains_struct_check ((c0), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 133, __FUNCTION__))->typed.type)
;
134 if (!useless_type_conversion_p (ctype, type))
135 return;
136
137 break;
138
139 case EQ_EXPR:
140 /* We could derive quite precise information from EQ_EXPR, however,
141 such a guard is unlikely to appear, so we do not bother with
142 handling it. */
143 return;
144
145 case NE_EXPR:
146 /* NE_EXPR comparisons do not contain much of useful information,
147 except for cases of comparing with bounds. */
148 if (TREE_CODE (c1)((enum tree_code) (c1)->base.code) != INTEGER_CST
149 || !INTEGRAL_TYPE_P (type)(((enum tree_code) (type)->base.code) == ENUMERAL_TYPE || (
(enum tree_code) (type)->base.code) == BOOLEAN_TYPE || ((enum
tree_code) (type)->base.code) == INTEGER_TYPE)
)
150 return;
151
152 /* Ensure that the condition speaks about an expression in the same
153 type as X and Y. */
154 ctype = TREE_TYPE (c0)((contains_struct_check ((c0), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 154, __FUNCTION__))->typed.type)
;
155 if (TYPE_PRECISION (ctype)((tree_class_check ((ctype), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 155, __FUNCTION__))->type_common.precision)
!= TYPE_PRECISION (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 155, __FUNCTION__))->type_common.precision)
)
156 return;
157 c0 = fold_convert (type, c0)fold_convert_loc (((location_t) 0), type, c0);
158 c1 = fold_convert (type, c1)fold_convert_loc (((location_t) 0), type, c1);
159
160 if (operand_equal_p (var, c0, 0))
161 {
162 mpz_t valc1;
163
164 /* Case of comparing VAR with its below/up bounds. */
165 mpz_init__gmpz_init (valc1);
166 wi::to_mpz (wi::to_wide (c1), valc1, TYPE_SIGN (type)((signop) ((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 166, __FUNCTION__))->base.u.bits.unsigned_flag))
);
167 if (mpz_cmp__gmpz_cmp (valc1, below) == 0)
168 cmp = GT_EXPR;
169 if (mpz_cmp__gmpz_cmp (valc1, up) == 0)
170 cmp = LT_EXPR;
171
172 mpz_clear__gmpz_clear (valc1);
173 }
174 else
175 {
176 /* Case of comparing with the bounds of the type. */
177 wide_int min = wi::min_value (type);
178 wide_int max = wi::max_value (type);
179
180 if (wi::to_wide (c1) == min)
181 cmp = GT_EXPR;
182 if (wi::to_wide (c1) == max)
183 cmp = LT_EXPR;
184 }
185
186 /* Quick return if no useful information. */
187 if (cmp == NE_EXPR)
188 return;
189
190 break;
191
192 default:
193 return;
194 }
195
196 mpz_init__gmpz_init (offc0);
197 mpz_init__gmpz_init (offc1);
198 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
199 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
200
201 /* We are only interested in comparisons of expressions based on VAR. */
202 if (operand_equal_p (var, varc1, 0))
203 {
204 std::swap (varc0, varc1);
205 mpz_swap__gmpz_swap (offc0, offc1);
206 cmp = swap_tree_comparison (cmp);
207 }
208 else if (!operand_equal_p (var, varc0, 0))
209 {
210 mpz_clear__gmpz_clear (offc0);
211 mpz_clear__gmpz_clear (offc1);
212 return;
213 }
214
215 mpz_init__gmpz_init (mint);
216 mpz_init__gmpz_init (maxt);
217 get_type_static_bounds (type, mint, maxt);
218 mpz_init__gmpz_init (minc1);
219 mpz_init__gmpz_init (maxc1);
220 Value_Range r (TREE_TYPE (varc1)((contains_struct_check ((varc1), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 220, __FUNCTION__))->typed.type)
);
221 /* Setup range information for varc1. */
222 if (integer_zerop (varc1))
223 {
224 wi::to_mpz (0, minc1, TYPE_SIGN (type)((signop) ((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 224, __FUNCTION__))->base.u.bits.unsigned_flag))
);
225 wi::to_mpz (0, maxc1, TYPE_SIGN (type)((signop) ((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 225, __FUNCTION__))->base.u.bits.unsigned_flag))
);
226 }
227 else if (TREE_CODE (varc1)((enum tree_code) (varc1)->base.code) == SSA_NAME
228 && INTEGRAL_TYPE_P (type)(((enum tree_code) (type)->base.code) == ENUMERAL_TYPE || (
(enum tree_code) (type)->base.code) == BOOLEAN_TYPE || ((enum
tree_code) (type)->base.code) == INTEGER_TYPE)
229 && get_range_query (cfun(cfun + 0))->range_of_expr (r, varc1)
230 && r.kind () == VR_RANGE)
231 {
232 gcc_assert (wi::le_p (r.lower_bound (), r.upper_bound (), sgn))((void)(!(wi::le_p (r.lower_bound (), r.upper_bound (), sgn))
? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 232, __FUNCTION__), 0 : 0))
;
233 wi::to_mpz (r.lower_bound (), minc1, sgn);
234 wi::to_mpz (r.upper_bound (), maxc1, sgn);
235 }
236 else
237 {
238 mpz_set__gmpz_set (minc1, mint);
239 mpz_set__gmpz_set (maxc1, maxt);
240 }
241
242 /* Compute valid range information for varc1 + offc1. Note nothing
243 useful can be derived if it overflows or underflows. Overflow or
244 underflow could happen when:
245
246 offc1 > 0 && varc1 + offc1 > MAX_VAL (type)
247 offc1 < 0 && varc1 + offc1 < MIN_VAL (type). */
248 mpz_add__gmpz_add (minc1, minc1, offc1);
249 mpz_add__gmpz_add (maxc1, maxc1, offc1);
250 c1_ok = (no_wrap
251 || mpz_sgn (offc1)((offc1)->_mp_size < 0 ? -1 : (offc1)->_mp_size >
0)
== 0
252 || (mpz_sgn (offc1)((offc1)->_mp_size < 0 ? -1 : (offc1)->_mp_size >
0)
< 0 && mpz_cmp__gmpz_cmp (minc1, mint) >= 0)
253 || (mpz_sgn (offc1)((offc1)->_mp_size < 0 ? -1 : (offc1)->_mp_size >
0)
> 0 && mpz_cmp__gmpz_cmp (maxc1, maxt) <= 0));
254 if (!c1_ok)
255 goto end;
256
257 if (mpz_cmp__gmpz_cmp (minc1, mint) < 0)
258 mpz_set__gmpz_set (minc1, mint);
259 if (mpz_cmp__gmpz_cmp (maxc1, maxt) > 0)
260 mpz_set__gmpz_set (maxc1, maxt);
261
262 if (cmp == LT_EXPR)
263 {
264 cmp = LE_EXPR;
265 mpz_sub_ui__gmpz_sub_ui (maxc1, maxc1, 1);
266 }
267 if (cmp == GT_EXPR)
268 {
269 cmp = GE_EXPR;
270 mpz_add_ui__gmpz_add_ui (minc1, minc1, 1);
271 }
272
273 /* Compute range information for varc0. If there is no overflow,
274 the condition implied that
275
276 (varc0) cmp (varc1 + offc1 - offc0)
277
278 We can possibly improve the upper bound of varc0 if cmp is LE_EXPR,
279 or the below bound if cmp is GE_EXPR.
280
281 To prove there is no overflow/underflow, we need to check below
282 four cases:
283 1) cmp == LE_EXPR && offc0 > 0
284
285 (varc0 + offc0) doesn't overflow
286 && (varc1 + offc1 - offc0) doesn't underflow
287
288 2) cmp == LE_EXPR && offc0 < 0
289
290 (varc0 + offc0) doesn't underflow
291 && (varc1 + offc1 - offc0) doesn't overfloe
292
293 In this case, (varc0 + offc0) will never underflow if we can
294 prove (varc1 + offc1 - offc0) doesn't overflow.
295
296 3) cmp == GE_EXPR && offc0 < 0
297
298 (varc0 + offc0) doesn't underflow
299 && (varc1 + offc1 - offc0) doesn't overflow
300
301 4) cmp == GE_EXPR && offc0 > 0
302
303 (varc0 + offc0) doesn't overflow
304 && (varc1 + offc1 - offc0) doesn't underflow
305
306 In this case, (varc0 + offc0) will never overflow if we can
307 prove (varc1 + offc1 - offc0) doesn't underflow.
308
309 Note we only handle case 2 and 4 in below code. */
310
311 mpz_sub__gmpz_sub (minc1, minc1, offc0);
312 mpz_sub__gmpz_sub (maxc1, maxc1, offc0);
313 c0_ok = (no_wrap
314 || mpz_sgn (offc0)((offc0)->_mp_size < 0 ? -1 : (offc0)->_mp_size >
0)
== 0
315 || (cmp == LE_EXPR
316 && mpz_sgn (offc0)((offc0)->_mp_size < 0 ? -1 : (offc0)->_mp_size >
0)
< 0 && mpz_cmp__gmpz_cmp (maxc1, maxt) <= 0)
317 || (cmp == GE_EXPR
318 && mpz_sgn (offc0)((offc0)->_mp_size < 0 ? -1 : (offc0)->_mp_size >
0)
> 0 && mpz_cmp__gmpz_cmp (minc1, mint) >= 0));
319 if (!c0_ok)
320 goto end;
321
322 if (cmp == LE_EXPR)
323 {
324 if (mpz_cmp__gmpz_cmp (up, maxc1) > 0)
325 mpz_set__gmpz_set (up, maxc1);
326 }
327 else
328 {
329 if (mpz_cmp__gmpz_cmp (below, minc1) < 0)
330 mpz_set__gmpz_set (below, minc1);
331 }
332
333end:
334 mpz_clear__gmpz_clear (mint);
335 mpz_clear__gmpz_clear (maxt);
336 mpz_clear__gmpz_clear (minc1);
337 mpz_clear__gmpz_clear (maxc1);
338 mpz_clear__gmpz_clear (offc0);
339 mpz_clear__gmpz_clear (offc1);
340}
341
342/* Stores estimate on the minimum/maximum value of the expression VAR + OFF
343 in TYPE to MIN and MAX. */
344
345static void
346determine_value_range (class loop *loop, tree type, tree var, mpz_t off,
347 mpz_t min, mpz_t max)
348{
349 int cnt = 0;
350 mpz_t minm, maxm;
351 basic_block bb;
352 wide_int minv, maxv;
353 enum value_range_kind rtype = VR_VARYING;
354
355 /* If the expression is a constant, we know its value exactly. */
356 if (integer_zerop (var))
357 {
358 mpz_set__gmpz_set (min, off);
359 mpz_set__gmpz_set (max, off);
360 return;
361 }
362
363 get_type_static_bounds (type, min, max);
364
365 /* See if we have some range info from VRP. */
366 if (TREE_CODE (var)((enum tree_code) (var)->base.code) == SSA_NAME && INTEGRAL_TYPE_P (type)(((enum tree_code) (type)->base.code) == ENUMERAL_TYPE || (
(enum tree_code) (type)->base.code) == BOOLEAN_TYPE || ((enum
tree_code) (type)->base.code) == INTEGER_TYPE)
)
367 {
368 edge e = loop_preheader_edge (loop);
369 signop sgn = TYPE_SIGN (type)((signop) ((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 369, __FUNCTION__))->base.u.bits.unsigned_flag))
;
370 gphi_iterator gsi;
371
372 /* Either for VAR itself... */
373 Value_Range var_range (TREE_TYPE (var)((contains_struct_check ((var), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 373, __FUNCTION__))->typed.type)
);
374 get_range_query (cfun(cfun + 0))->range_of_expr (var_range, var);
375 rtype = var_range.kind ();
376 if (!var_range.undefined_p ())
377 {
378 minv = var_range.lower_bound ();
379 maxv = var_range.upper_bound ();
380 }
381
382 /* Or for PHI results in loop->header where VAR is used as
383 PHI argument from the loop preheader edge. */
384 Value_Range phi_range (TREE_TYPE (var)((contains_struct_check ((var), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 384, __FUNCTION__))->typed.type)
);
385 for (gsi = gsi_start_phis (loop->header); !gsi_end_p (gsi); gsi_next (&gsi))
386 {
387 gphi *phi = gsi.phi ();
388 if (PHI_ARG_DEF_FROM_EDGE (phi, e)gimple_phi_arg_def (((phi)), ((e)->dest_idx)) == var
389 && get_range_query (cfun(cfun + 0))->range_of_expr (phi_range,
390 gimple_phi_result (phi))
391 && phi_range.kind () == VR_RANGE)
392 {
393 if (rtype != VR_RANGE)
394 {
395 rtype = VR_RANGE;
396 minv = phi_range.lower_bound ();
397 maxv = phi_range.upper_bound ();
398 }
399 else
400 {
401 minv = wi::max (minv, phi_range.lower_bound (), sgn);
402 maxv = wi::min (maxv, phi_range.upper_bound (), sgn);
403 /* If the PHI result range are inconsistent with
404 the VAR range, give up on looking at the PHI
405 results. This can happen if VR_UNDEFINED is
406 involved. */
407 if (wi::gt_p (minv, maxv, sgn))
408 {
409 Value_Range vr (TREE_TYPE (var)((contains_struct_check ((var), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 409, __FUNCTION__))->typed.type)
);
410 get_range_query (cfun(cfun + 0))->range_of_expr (vr, var);
411 rtype = vr.kind ();
412 if (!vr.undefined_p ())
413 {
414 minv = vr.lower_bound ();
415 maxv = vr.upper_bound ();
416 }
417 break;
418 }
419 }
420 }
421 }
422 mpz_init__gmpz_init (minm);
423 mpz_init__gmpz_init (maxm);
424 if (rtype != VR_RANGE)
425 {
426 mpz_set__gmpz_set (minm, min);
427 mpz_set__gmpz_set (maxm, max);
428 }
429 else
430 {
431 gcc_assert (wi::le_p (minv, maxv, sgn))((void)(!(wi::le_p (minv, maxv, sgn)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 431, __FUNCTION__), 0 : 0))
;
432 wi::to_mpz (minv, minm, sgn);
433 wi::to_mpz (maxv, maxm, sgn);
434 }
435 /* Now walk the dominators of the loop header and use the entry
436 guards to refine the estimates. */
437 for (bb = loop->header;
438 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) && cnt < MAX_DOMINATORS_TO_WALK8;
439 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
440 {
441 edge e;
442 tree c0, c1;
443 gimple *cond;
444 enum tree_code cmp;
445
446 if (!single_pred_p (bb))
447 continue;
448 e = single_pred_edge (bb);
449
450 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
451 continue;
452
453 cond = last_stmt (e->src);
454 c0 = gimple_cond_lhs (cond);
455 cmp = gimple_cond_code (cond);
456 c1 = gimple_cond_rhs (cond);
457
458 if (e->flags & EDGE_FALSE_VALUE)
459 cmp = invert_tree_comparison (cmp, false);
460
461 refine_value_range_using_guard (type, var, c0, cmp, c1, minm, maxm);
462 ++cnt;
463 }
464
465 mpz_add__gmpz_add (minm, minm, off);
466 mpz_add__gmpz_add (maxm, maxm, off);
467 /* If the computation may not wrap or off is zero, then this
468 is always fine. If off is negative and minv + off isn't
469 smaller than type's minimum, or off is positive and
470 maxv + off isn't bigger than type's maximum, use the more
471 precise range too. */
472 if (nowrap_type_p (type)
473 || mpz_sgn (off)((off)->_mp_size < 0 ? -1 : (off)->_mp_size > 0) == 0
474 || (mpz_sgn (off)((off)->_mp_size < 0 ? -1 : (off)->_mp_size > 0) < 0 && mpz_cmp__gmpz_cmp (minm, min) >= 0)
475 || (mpz_sgn (off)((off)->_mp_size < 0 ? -1 : (off)->_mp_size > 0) > 0 && mpz_cmp__gmpz_cmp (maxm, max) <= 0))
476 {
477 mpz_set__gmpz_set (min, minm);
478 mpz_set__gmpz_set (max, maxm);
479 mpz_clear__gmpz_clear (minm);
480 mpz_clear__gmpz_clear (maxm);
481 return;
482 }
483 mpz_clear__gmpz_clear (minm);
484 mpz_clear__gmpz_clear (maxm);
485 }
486
487 /* If the computation may wrap, we know nothing about the value, except for
488 the range of the type. */
489 if (!nowrap_type_p (type))
490 return;
491
492 /* Since the addition of OFF does not wrap, if OFF is positive, then we may
493 add it to MIN, otherwise to MAX. */
494 if (mpz_sgn (off)((off)->_mp_size < 0 ? -1 : (off)->_mp_size > 0) < 0)
495 mpz_add__gmpz_add (max, max, off);
496 else
497 mpz_add__gmpz_add (min, min, off);
498}
499
500/* Stores the bounds on the difference of the values of the expressions
501 (var + X) and (var + Y), computed in TYPE, to BNDS. */
502
503static void
504bound_difference_of_offsetted_base (tree type, mpz_t x, mpz_t y,
505 bounds *bnds)
506{
507 int rel = mpz_cmp__gmpz_cmp (x, y);
508 bool may_wrap = !nowrap_type_p (type);
509 mpz_t m;
510
511 /* If X == Y, then the expressions are always equal.
512 If X > Y, there are the following possibilities:
513 a) neither of var + X and var + Y overflow or underflow, or both of
514 them do. Then their difference is X - Y.
515 b) var + X overflows, and var + Y does not. Then the values of the
516 expressions are var + X - M and var + Y, where M is the range of
517 the type, and their difference is X - Y - M.
518 c) var + Y underflows and var + X does not. Their difference again
519 is M - X + Y.
520 Therefore, if the arithmetics in type does not overflow, then the
521 bounds are (X - Y, X - Y), otherwise they are (X - Y - M, X - Y)
522 Similarly, if X < Y, the bounds are either (X - Y, X - Y) or
523 (X - Y, X - Y + M). */
524
525 if (rel == 0)
526 {
527 mpz_set_ui__gmpz_set_ui (bnds->below, 0);
528 mpz_set_ui__gmpz_set_ui (bnds->up, 0);
529 return;
530 }
531
532 mpz_init__gmpz_init (m);
533 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 533, __FUNCTION__))->type_common.precision)
), m, UNSIGNED);
534 mpz_add_ui__gmpz_add_ui (m, m, 1);
535 mpz_sub__gmpz_sub (bnds->up, x, y);
536 mpz_set__gmpz_set (bnds->below, bnds->up);
537
538 if (may_wrap)
539 {
540 if (rel > 0)
541 mpz_sub__gmpz_sub (bnds->below, bnds->below, m);
542 else
543 mpz_add__gmpz_add (bnds->up, bnds->up, m);
544 }
545
546 mpz_clear__gmpz_clear (m);
547}
548
549/* From condition C0 CMP C1 derives information regarding the
550 difference of values of VARX + OFFX and VARY + OFFY, computed in TYPE,
551 and stores it to BNDS. */
552
553static void
554refine_bounds_using_guard (tree type, tree varx, mpz_t offx,
555 tree vary, mpz_t offy,
556 tree c0, enum tree_code cmp, tree c1,
557 bounds *bnds)
558{
559 tree varc0, varc1, ctype;
560 mpz_t offc0, offc1, loffx, loffy, bnd;
561 bool lbound = false;
562 bool no_wrap = nowrap_type_p (type);
563 bool x_ok, y_ok;
564
565 switch (cmp)
566 {
567 case LT_EXPR:
568 case LE_EXPR:
569 case GT_EXPR:
570 case GE_EXPR:
571 STRIP_SIGN_NOPS (c0)(c0) = tree_strip_sign_nop_conversions ((const_cast<union tree_node
*> (((c0)))))
;
572 STRIP_SIGN_NOPS (c1)(c1) = tree_strip_sign_nop_conversions ((const_cast<union tree_node
*> (((c1)))))
;
573 ctype = TREE_TYPE (c0)((contains_struct_check ((c0), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 573, __FUNCTION__))->typed.type)
;
574 if (!useless_type_conversion_p (ctype, type))
575 return;
576
577 break;
578
579 case EQ_EXPR:
580 /* We could derive quite precise information from EQ_EXPR, however, such
581 a guard is unlikely to appear, so we do not bother with handling
582 it. */
583 return;
584
585 case NE_EXPR:
586 /* NE_EXPR comparisons do not contain much of useful information, except for
587 special case of comparing with the bounds of the type. */
588 if (TREE_CODE (c1)((enum tree_code) (c1)->base.code) != INTEGER_CST
589 || !INTEGRAL_TYPE_P (type)(((enum tree_code) (type)->base.code) == ENUMERAL_TYPE || (
(enum tree_code) (type)->base.code) == BOOLEAN_TYPE || ((enum
tree_code) (type)->base.code) == INTEGER_TYPE)
)
590 return;
591
592 /* Ensure that the condition speaks about an expression in the same type
593 as X and Y. */
594 ctype = TREE_TYPE (c0)((contains_struct_check ((c0), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 594, __FUNCTION__))->typed.type)
;
595 if (TYPE_PRECISION (ctype)((tree_class_check ((ctype), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 595, __FUNCTION__))->type_common.precision)
!= TYPE_PRECISION (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 595, __FUNCTION__))->type_common.precision)
)
596 return;
597 c0 = fold_convert (type, c0)fold_convert_loc (((location_t) 0), type, c0);
598 c1 = fold_convert (type, c1)fold_convert_loc (((location_t) 0), type, c1);
599
600 if (TYPE_MIN_VALUE (type)((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 600, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.minval
)
601 && operand_equal_p (c1, TYPE_MIN_VALUE (type)((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 601, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.minval
)
, 0))
602 {
603 cmp = GT_EXPR;
604 break;
605 }
606 if (TYPE_MAX_VALUE (type)((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 606, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
)
607 && operand_equal_p (c1, TYPE_MAX_VALUE (type)((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 607, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
)
, 0))
608 {
609 cmp = LT_EXPR;
610 break;
611 }
612
613 return;
614 default:
615 return;
616 }
617
618 mpz_init__gmpz_init (offc0);
619 mpz_init__gmpz_init (offc1);
620 split_to_var_and_offset (expand_simple_operations (c0), &varc0, offc0);
621 split_to_var_and_offset (expand_simple_operations (c1), &varc1, offc1);
622
623 /* We are only interested in comparisons of expressions based on VARX and
624 VARY. TODO -- we might also be able to derive some bounds from
625 expressions containing just one of the variables. */
626
627 if (operand_equal_p (varx, varc1, 0))
628 {
629 std::swap (varc0, varc1);
630 mpz_swap__gmpz_swap (offc0, offc1);
631 cmp = swap_tree_comparison (cmp);
632 }
633
634 if (!operand_equal_p (varx, varc0, 0)
635 || !operand_equal_p (vary, varc1, 0))
636 goto end;
637
638 mpz_init_set__gmpz_init_set (loffx, offx);
639 mpz_init_set__gmpz_init_set (loffy, offy);
640
641 if (cmp == GT_EXPR || cmp == GE_EXPR)
642 {
643 std::swap (varx, vary);
644 mpz_swap__gmpz_swap (offc0, offc1);
645 mpz_swap__gmpz_swap (loffx, loffy);
646 cmp = swap_tree_comparison (cmp);
647 lbound = true;
648 }
649
650 /* If there is no overflow, the condition implies that
651
652 (VARX + OFFX) cmp (VARY + OFFY) + (OFFX - OFFY + OFFC1 - OFFC0).
653
654 The overflows and underflows may complicate things a bit; each
655 overflow decreases the appropriate offset by M, and underflow
656 increases it by M. The above inequality would not necessarily be
657 true if
658
659 -- VARX + OFFX underflows and VARX + OFFC0 does not, or
660 VARX + OFFC0 overflows, but VARX + OFFX does not.
661 This may only happen if OFFX < OFFC0.
662 -- VARY + OFFY overflows and VARY + OFFC1 does not, or
663 VARY + OFFC1 underflows and VARY + OFFY does not.
664 This may only happen if OFFY > OFFC1. */
665
666 if (no_wrap)
667 {
668 x_ok = true;
669 y_ok = true;
670 }
671 else
672 {
673 x_ok = (integer_zerop (varx)
674 || mpz_cmp__gmpz_cmp (loffx, offc0) >= 0);
675 y_ok = (integer_zerop (vary)
676 || mpz_cmp__gmpz_cmp (loffy, offc1) <= 0);
677 }
678
679 if (x_ok && y_ok)
680 {
681 mpz_init__gmpz_init (bnd);
682 mpz_sub__gmpz_sub (bnd, loffx, loffy);
683 mpz_add__gmpz_add (bnd, bnd, offc1);
684 mpz_sub__gmpz_sub (bnd, bnd, offc0);
685
686 if (cmp == LT_EXPR)
687 mpz_sub_ui__gmpz_sub_ui (bnd, bnd, 1);
688
689 if (lbound)
690 {
691 mpz_neg__gmpz_neg (bnd, bnd);
692 if (mpz_cmp__gmpz_cmp (bnds->below, bnd) < 0)
693 mpz_set__gmpz_set (bnds->below, bnd);
694 }
695 else
696 {
697 if (mpz_cmp__gmpz_cmp (bnd, bnds->up) < 0)
698 mpz_set__gmpz_set (bnds->up, bnd);
699 }
700 mpz_clear__gmpz_clear (bnd);
701 }
702
703 mpz_clear__gmpz_clear (loffx);
704 mpz_clear__gmpz_clear (loffy);
705end:
706 mpz_clear__gmpz_clear (offc0);
707 mpz_clear__gmpz_clear (offc1);
708}
709
710/* Stores the bounds on the value of the expression X - Y in LOOP to BNDS.
711 The subtraction is considered to be performed in arbitrary precision,
712 without overflows.
713
714 We do not attempt to be too clever regarding the value ranges of X and
715 Y; most of the time, they are just integers or ssa names offsetted by
716 integer. However, we try to use the information contained in the
717 comparisons before the loop (usually created by loop header copying). */
718
719static void
720bound_difference (class loop *loop, tree x, tree y, bounds *bnds)
721{
722 tree type = TREE_TYPE (x)((contains_struct_check ((x), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 722, __FUNCTION__))->typed.type)
;
723 tree varx, vary;
724 mpz_t offx, offy;
725 mpz_t minx, maxx, miny, maxy;
726 int cnt = 0;
727 edge e;
728 basic_block bb;
729 tree c0, c1;
730 gimple *cond;
731 enum tree_code cmp;
732
733 /* Get rid of unnecessary casts, but preserve the value of
734 the expressions. */
735 STRIP_SIGN_NOPS (x)(x) = tree_strip_sign_nop_conversions ((const_cast<union tree_node
*> (((x)))))
;
736 STRIP_SIGN_NOPS (y)(y) = tree_strip_sign_nop_conversions ((const_cast<union tree_node
*> (((y)))))
;
737
738 mpz_init__gmpz_init (bnds->below);
739 mpz_init__gmpz_init (bnds->up);
740 mpz_init__gmpz_init (offx);
741 mpz_init__gmpz_init (offy);
742 split_to_var_and_offset (x, &varx, offx);
743 split_to_var_and_offset (y, &vary, offy);
744
745 if (!integer_zerop (varx)
746 && operand_equal_p (varx, vary, 0))
747 {
748 /* Special case VARX == VARY -- we just need to compare the
749 offsets. The matters are a bit more complicated in the
750 case addition of offsets may wrap. */
751 bound_difference_of_offsetted_base (type, offx, offy, bnds);
752 }
753 else
754 {
755 /* Otherwise, use the value ranges to determine the initial
756 estimates on below and up. */
757 mpz_init__gmpz_init (minx);
758 mpz_init__gmpz_init (maxx);
759 mpz_init__gmpz_init (miny);
760 mpz_init__gmpz_init (maxy);
761 determine_value_range (loop, type, varx, offx, minx, maxx);
762 determine_value_range (loop, type, vary, offy, miny, maxy);
763
764 mpz_sub__gmpz_sub (bnds->below, minx, maxy);
765 mpz_sub__gmpz_sub (bnds->up, maxx, miny);
766 mpz_clear__gmpz_clear (minx);
767 mpz_clear__gmpz_clear (maxx);
768 mpz_clear__gmpz_clear (miny);
769 mpz_clear__gmpz_clear (maxy);
770 }
771
772 /* If both X and Y are constants, we cannot get any more precise. */
773 if (integer_zerop (varx) && integer_zerop (vary))
774 goto end;
775
776 /* Now walk the dominators of the loop header and use the entry
777 guards to refine the estimates. */
778 for (bb = loop->header;
779 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) && cnt < MAX_DOMINATORS_TO_WALK8;
780 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
781 {
782 if (!single_pred_p (bb))
783 continue;
784 e = single_pred_edge (bb);
785
786 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
787 continue;
788
789 cond = last_stmt (e->src);
790 c0 = gimple_cond_lhs (cond);
791 cmp = gimple_cond_code (cond);
792 c1 = gimple_cond_rhs (cond);
793
794 if (e->flags & EDGE_FALSE_VALUE)
795 cmp = invert_tree_comparison (cmp, false);
796
797 refine_bounds_using_guard (type, varx, offx, vary, offy,
798 c0, cmp, c1, bnds);
799 ++cnt;
800 }
801
802end:
803 mpz_clear__gmpz_clear (offx);
804 mpz_clear__gmpz_clear (offy);
805}
806
807/* Update the bounds in BNDS that restrict the value of X to the bounds
808 that restrict the value of X + DELTA. X can be obtained as a
809 difference of two values in TYPE. */
810
811static void
812bounds_add (bounds *bnds, const widest_int &delta, tree type)
813{
814 mpz_t mdelta, max;
815
816 mpz_init__gmpz_init (mdelta);
817 wi::to_mpz (delta, mdelta, SIGNED);
818
819 mpz_init__gmpz_init (max);
820 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 820, __FUNCTION__))->type_common.precision)
), max, UNSIGNED);
821
822 mpz_add__gmpz_add (bnds->up, bnds->up, mdelta);
823 mpz_add__gmpz_add (bnds->below, bnds->below, mdelta);
824
825 if (mpz_cmp__gmpz_cmp (bnds->up, max) > 0)
826 mpz_set__gmpz_set (bnds->up, max);
827
828 mpz_neg__gmpz_neg (max, max);
829 if (mpz_cmp__gmpz_cmp (bnds->below, max) < 0)
830 mpz_set__gmpz_set (bnds->below, max);
831
832 mpz_clear__gmpz_clear (mdelta);
833 mpz_clear__gmpz_clear (max);
834}
835
836/* Update the bounds in BNDS that restrict the value of X to the bounds
837 that restrict the value of -X. */
838
839static void
840bounds_negate (bounds *bnds)
841{
842 mpz_t tmp;
843
844 mpz_init_set__gmpz_init_set (tmp, bnds->up);
845 mpz_neg__gmpz_neg (bnds->up, bnds->below);
846 mpz_neg__gmpz_neg (bnds->below, tmp);
847 mpz_clear__gmpz_clear (tmp);
848}
849
850/* Returns inverse of X modulo 2^s, where MASK = 2^s-1. */
851
852static tree
853inverse (tree x, tree mask)
854{
855 tree type = TREE_TYPE (x)((contains_struct_check ((x), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 855, __FUNCTION__))->typed.type)
;
856 tree rslt;
857 unsigned ctr = tree_floor_log2 (mask);
858
859 if (TYPE_PRECISION (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 859, __FUNCTION__))->type_common.precision)
<= HOST_BITS_PER_WIDE_INT64)
860 {
861 unsigned HOST_WIDE_INTlong ix;
862 unsigned HOST_WIDE_INTlong imask;
863 unsigned HOST_WIDE_INTlong irslt = 1;
864
865 gcc_assert (cst_and_fits_in_hwi (x))((void)(!(cst_and_fits_in_hwi (x)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 865, __FUNCTION__), 0 : 0))
;
866 gcc_assert (cst_and_fits_in_hwi (mask))((void)(!(cst_and_fits_in_hwi (mask)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 866, __FUNCTION__), 0 : 0))
;
867
868 ix = int_cst_value (x);
869 imask = int_cst_value (mask);
870
871 for (; ctr; ctr--)
872 {
873 irslt *= ix;
874 ix *= ix;
875 }
876 irslt &= imask;
877
878 rslt = build_int_cst_type (type, irslt);
879 }
880 else
881 {
882 rslt = build_int_cst (type, 1);
883 for (; ctr; ctr--)
884 {
885 rslt = int_const_binop (MULT_EXPR, rslt, x);
886 x = int_const_binop (MULT_EXPR, x, x);
887 }
888 rslt = int_const_binop (BIT_AND_EXPR, rslt, mask);
889 }
890
891 return rslt;
892}
893
894/* Derives the upper bound BND on the number of executions of loop with exit
895 condition S * i <> C. If NO_OVERFLOW is true, then the control variable of
896 the loop does not overflow. EXIT_MUST_BE_TAKEN is true if we are guaranteed
897 that the loop ends through this exit, i.e., the induction variable ever
898 reaches the value of C.
899
900 The value C is equal to final - base, where final and base are the final and
901 initial value of the actual induction variable in the analysed loop. BNDS
902 bounds the value of this difference when computed in signed type with
903 unbounded range, while the computation of C is performed in an unsigned
904 type with the range matching the range of the type of the induction variable.
905 In particular, BNDS.up contains an upper bound on C in the following cases:
906 -- if the iv must reach its final value without overflow, i.e., if
907 NO_OVERFLOW && EXIT_MUST_BE_TAKEN is true, or
908 -- if final >= base, which we know to hold when BNDS.below >= 0. */
909
910static void
911number_of_iterations_ne_max (mpz_t bnd, bool no_overflow, tree c, tree s,
912 bounds *bnds, bool exit_must_be_taken)
913{
914 widest_int max;
915 mpz_t d;
916 tree type = TREE_TYPE (c)((contains_struct_check ((c), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 916, __FUNCTION__))->typed.type)
;
917 bool bnds_u_valid = ((no_overflow && exit_must_be_taken)
918 || mpz_sgn (bnds->below)((bnds->below)->_mp_size < 0 ? -1 : (bnds->below)
->_mp_size > 0)
>= 0);
919
920 if (integer_onep (s)
921 || (TREE_CODE (c)((enum tree_code) (c)->base.code) == INTEGER_CST
922 && TREE_CODE (s)((enum tree_code) (s)->base.code) == INTEGER_CST
923 && wi::mod_trunc (wi::to_wide (c), wi::to_wide (s),
924 TYPE_SIGN (type)((signop) ((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 924, __FUNCTION__))->base.u.bits.unsigned_flag))
) == 0)
925 || (TYPE_OVERFLOW_UNDEFINED (type)((((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE) ? !
global_options.x_flag_wrapv_pointer : (!(any_integral_type_check
((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 925, __FUNCTION__))->base.u.bits.unsigned_flag &&
!global_options.x_flag_wrapv && !global_options.x_flag_trapv
))
926 && multiple_of_p (type, c, s)))
927 {
928 /* If C is an exact multiple of S, then its value will be reached before
929 the induction variable overflows (unless the loop is exited in some
930 other way before). Note that the actual induction variable in the
931 loop (which ranges from base to final instead of from 0 to C) may
932 overflow, in which case BNDS.up will not be giving a correct upper
933 bound on C; thus, BNDS_U_VALID had to be computed in advance. */
934 no_overflow = true;
935 exit_must_be_taken = true;
936 }
937
938 /* If the induction variable can overflow, the number of iterations is at
939 most the period of the control variable (or infinite, but in that case
940 the whole # of iterations analysis will fail). */
941 if (!no_overflow)
942 {
943 max = wi::mask <widest_int> (TYPE_PRECISION (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 943, __FUNCTION__))->type_common.precision)
944 - wi::ctz (wi::to_wide (s)), false);
945 wi::to_mpz (max, bnd, UNSIGNED);
946 return;
947 }
948
949 /* Now we know that the induction variable does not overflow, so the loop
950 iterates at most (range of type / S) times. */
951 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 951, __FUNCTION__))->type_common.precision)
), bnd, UNSIGNED);
952
953 /* If the induction variable is guaranteed to reach the value of C before
954 overflow, ... */
955 if (exit_must_be_taken)
956 {
957 /* ... then we can strengthen this to C / S, and possibly we can use
958 the upper bound on C given by BNDS. */
959 if (TREE_CODE (c)((enum tree_code) (c)->base.code) == INTEGER_CST)
960 wi::to_mpz (wi::to_wide (c), bnd, UNSIGNED);
961 else if (bnds_u_valid)
962 mpz_set__gmpz_set (bnd, bnds->up);
963 }
964
965 mpz_init__gmpz_init (d);
966 wi::to_mpz (wi::to_wide (s), d, UNSIGNED);
967 mpz_fdiv_q__gmpz_fdiv_q (bnd, bnd, d);
968 mpz_clear__gmpz_clear (d);
969}
970
971/* Determines number of iterations of loop whose ending condition
972 is IV <> FINAL. TYPE is the type of the iv. The number of
973 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
974 we know that the exit must be taken eventually, i.e., that the IV
975 ever reaches the value FINAL (we derived this earlier, and possibly set
976 NITER->assumptions to make sure this is the case). BNDS contains the
977 bounds on the difference FINAL - IV->base. */
978
979static bool
980number_of_iterations_ne (class loop *loop, tree type, affine_iv *iv,
981 tree final, class tree_niter_desc *niter,
982 bool exit_must_be_taken, bounds *bnds)
983{
984 tree niter_type = unsigned_type_for (type);
985 tree s, c, d, bits, assumption, tmp, bound;
986 mpz_t max;
987
988 niter->control = *iv;
989 niter->bound = final;
990 niter->cmp = NE_EXPR;
991
992 /* Rearrange the terms so that we get inequality S * i <> C, with S
993 positive. Also cast everything to the unsigned type. If IV does
994 not overflow, BNDS bounds the value of C. Also, this is the
995 case if the computation |FINAL - IV->base| does not overflow, i.e.,
996 if BNDS->below in the result is nonnegative. */
997 if (tree_int_cst_sign_bit (iv->step))
998 {
999 s = fold_convert (niter_type,fold_convert_loc (((location_t) 0), niter_type, fold_build1_loc
(((location_t) 0), NEGATE_EXPR, type, iv->step ))
1000 fold_build1 (NEGATE_EXPR, type, iv->step))fold_convert_loc (((location_t) 0), niter_type, fold_build1_loc
(((location_t) 0), NEGATE_EXPR, type, iv->step ))
;
1001 c = fold_build2 (MINUS_EXPR, niter_type,fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, iv->base), fold_convert_loc
(((location_t) 0), niter_type, final) )
1002 fold_convert (niter_type, iv->base),fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, iv->base), fold_convert_loc
(((location_t) 0), niter_type, final) )
1003 fold_convert (niter_type, final))fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, iv->base), fold_convert_loc
(((location_t) 0), niter_type, final) )
;
1004 bounds_negate (bnds);
1005 }
1006 else
1007 {
1008 s = fold_convert (niter_type, iv->step)fold_convert_loc (((location_t) 0), niter_type, iv->step);
1009 c = fold_build2 (MINUS_EXPR, niter_type,fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, final), fold_convert_loc (((location_t
) 0), niter_type, iv->base) )
1010 fold_convert (niter_type, final),fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, final), fold_convert_loc (((location_t
) 0), niter_type, iv->base) )
1011 fold_convert (niter_type, iv->base))fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, final), fold_convert_loc (((location_t
) 0), niter_type, iv->base) )
;
1012 }
1013
1014 mpz_init__gmpz_init (max);
1015 number_of_iterations_ne_max (max, iv->no_overflow, c, s, bnds,
1016 exit_must_be_taken);
1017 niter->max = widest_int::from (wi::from_mpz (niter_type, max, false),
1018 TYPE_SIGN (niter_type)((signop) ((tree_class_check ((niter_type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1018, __FUNCTION__))->base.u.bits.unsigned_flag))
);
1019 mpz_clear__gmpz_clear (max);
1020
1021 /* Compute no-overflow information for the control iv. This can be
1022 proven when below two conditions are satisfied:
1023
1024 1) IV evaluates toward FINAL at beginning, i.e:
1025 base <= FINAL ; step > 0
1026 base >= FINAL ; step < 0
1027
1028 2) |FINAL - base| is an exact multiple of step.
1029
1030 Unfortunately, it's hard to prove above conditions after pass loop-ch
1031 because loop with exit condition (IV != FINAL) usually will be guarded
1032 by initial-condition (IV.base - IV.step != FINAL). In this case, we
1033 can alternatively try to prove below conditions:
1034
1035 1') IV evaluates toward FINAL at beginning, i.e:
1036 new_base = base - step < FINAL ; step > 0
1037 && base - step doesn't underflow
1038 new_base = base - step > FINAL ; step < 0
1039 && base - step doesn't overflow
1040
1041 Please refer to PR34114 as an example of loop-ch's impact.
1042
1043 Note, for NE_EXPR, base equals to FINAL is a special case, in
1044 which the loop exits immediately, and the iv does not overflow.
1045
1046 Also note, we prove condition 2) by checking base and final seperately
1047 along with condition 1) or 1'). Since we ensure the difference
1048 computation of c does not wrap with cond below and the adjusted s
1049 will fit a signed type as well as an unsigned we can safely do
1050 this using the type of the IV if it is not pointer typed. */
1051 tree mtype = type;
1052 if (POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
)
1053 mtype = niter_type;
1054 if (!niter->control.no_overflow
1055 && (integer_onep (s)
1056 || (multiple_of_p (mtype, fold_convert (mtype, iv->base)fold_convert_loc (((location_t) 0), mtype, iv->base),
1057 fold_convert (mtype, s)fold_convert_loc (((location_t) 0), mtype, s), false)
1058 && multiple_of_p (mtype, fold_convert (mtype, final)fold_convert_loc (((location_t) 0), mtype, final),
1059 fold_convert (mtype, s)fold_convert_loc (((location_t) 0), mtype, s), false))))
1060 {
1061 tree t, cond, relaxed_cond = boolean_false_nodeglobal_trees[TI_BOOLEAN_FALSE];
1062
1063 if (tree_int_cst_sign_bit (iv->step))
1064 {
1065 cond = fold_build2 (GE_EXPR, boolean_type_node, iv->base, final)fold_build2_loc (((location_t) 0), GE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv->base, final )
;
1066 if (TREE_CODE (type)((enum tree_code) (type)->base.code) == INTEGER_TYPE)
1067 {
1068 /* Only when base - step doesn't overflow. */
1069 t = TYPE_MAX_VALUE (type)((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1069, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
)
;
1070 t = fold_build2 (PLUS_EXPR, type, t, iv->step)fold_build2_loc (((location_t) 0), PLUS_EXPR, type, t, iv->
step )
;
1071 t = fold_build2 (GE_EXPR, boolean_type_node, t, iv->base)fold_build2_loc (((location_t) 0), GE_EXPR, global_trees[TI_BOOLEAN_TYPE
], t, iv->base )
;
1072 if (integer_nonzerop (t))
1073 {
1074 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step)fold_build2_loc (((location_t) 0), MINUS_EXPR, type, iv->base
, iv->step )
;
1075 relaxed_cond = fold_build2 (GT_EXPR, boolean_type_node, t,fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], t, final )
1076 final)fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], t, final )
;
1077 }
1078 }
1079 }
1080 else
1081 {
1082 cond = fold_build2 (LE_EXPR, boolean_type_node, iv->base, final)fold_build2_loc (((location_t) 0), LE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv->base, final )
;
1083 if (TREE_CODE (type)((enum tree_code) (type)->base.code) == INTEGER_TYPE)
1084 {
1085 /* Only when base - step doesn't underflow. */
1086 t = TYPE_MIN_VALUE (type)((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1086, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.minval
)
;
1087 t = fold_build2 (PLUS_EXPR, type, t, iv->step)fold_build2_loc (((location_t) 0), PLUS_EXPR, type, t, iv->
step )
;
1088 t = fold_build2 (LE_EXPR, boolean_type_node, t, iv->base)fold_build2_loc (((location_t) 0), LE_EXPR, global_trees[TI_BOOLEAN_TYPE
], t, iv->base )
;
1089 if (integer_nonzerop (t))
1090 {
1091 t = fold_build2 (MINUS_EXPR, type, iv->base, iv->step)fold_build2_loc (((location_t) 0), MINUS_EXPR, type, iv->base
, iv->step )
;
1092 relaxed_cond = fold_build2 (LT_EXPR, boolean_type_node, t,fold_build2_loc (((location_t) 0), LT_EXPR, global_trees[TI_BOOLEAN_TYPE
], t, final )
1093 final)fold_build2_loc (((location_t) 0), LT_EXPR, global_trees[TI_BOOLEAN_TYPE
], t, final )
;
1094 }
1095 }
1096 }
1097
1098 t = simplify_using_initial_conditions (loop, cond);
1099 if (!t || !integer_onep (t))
1100 t = simplify_using_initial_conditions (loop, relaxed_cond);
1101
1102 if (t && integer_onep (t))
1103 {
1104 niter->control.no_overflow = true;
1105 niter->niter = fold_build2 (EXACT_DIV_EXPR, niter_type, c, s)fold_build2_loc (((location_t) 0), EXACT_DIV_EXPR, niter_type
, c, s )
;
1106 return true;
1107 }
1108 }
1109
1110 /* Let nsd (step, size of mode) = d. If d does not divide c, the loop
1111 is infinite. Otherwise, the number of iterations is
1112 (inverse(s/d) * (c/d)) mod (size of mode/d). */
1113 bits = num_ending_zeros (s);
1114 bound = build_low_bits_mask (niter_type,
1115 (TYPE_PRECISION (niter_type)((tree_class_check ((niter_type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1115, __FUNCTION__))->type_common.precision)
1116 - tree_to_uhwi (bits)));
1117
1118 d = fold_binary_to_constant (LSHIFT_EXPR, niter_type,
1119 build_int_cst (niter_type, 1), bits);
1120 s = fold_binary_to_constant (RSHIFT_EXPR, niter_type, s, bits);
1121
1122 if (!exit_must_be_taken)
1123 {
1124 /* If we cannot assume that the exit is taken eventually, record the
1125 assumptions for divisibility of c. */
1126 assumption = fold_build2 (FLOOR_MOD_EXPR, niter_type, c, d)fold_build2_loc (((location_t) 0), FLOOR_MOD_EXPR, niter_type
, c, d )
;
1127 assumption = fold_build2 (EQ_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), EQ_EXPR, global_trees[TI_BOOLEAN_TYPE
], assumption, build_int_cst (niter_type, 0) )
1128 assumption, build_int_cst (niter_type, 0))fold_build2_loc (((location_t) 0), EQ_EXPR, global_trees[TI_BOOLEAN_TYPE
], assumption, build_int_cst (niter_type, 0) )
;
1129 if (!integer_nonzerop (assumption))
1130 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->assumptions, assumption )
1131 niter->assumptions, assumption)fold_build2_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->assumptions, assumption )
;
1132 }
1133
1134 c = fold_build2 (EXACT_DIV_EXPR, niter_type, c, d)fold_build2_loc (((location_t) 0), EXACT_DIV_EXPR, niter_type
, c, d )
;
1135 if (integer_onep (s))
1136 {
1137 niter->niter = c;
1138 }
1139 else
1140 {
1141 tmp = fold_build2 (MULT_EXPR, niter_type, c, inverse (s, bound))fold_build2_loc (((location_t) 0), MULT_EXPR, niter_type, c, inverse
(s, bound) )
;
1142 niter->niter = fold_build2 (BIT_AND_EXPR, niter_type, tmp, bound)fold_build2_loc (((location_t) 0), BIT_AND_EXPR, niter_type, tmp
, bound )
;
1143 }
1144 return true;
1145}
1146
1147/* Checks whether we can determine the final value of the control variable
1148 of the loop with ending condition IV0 < IV1 (computed in TYPE).
1149 DELTA is the difference IV1->base - IV0->base, STEP is the absolute value
1150 of the step. The assumptions necessary to ensure that the computation
1151 of the final value does not overflow are recorded in NITER. If we
1152 find the final value, we adjust DELTA and return TRUE. Otherwise
1153 we return false. BNDS bounds the value of IV1->base - IV0->base,
1154 and will be updated by the same amount as DELTA. EXIT_MUST_BE_TAKEN is
1155 true if we know that the exit must be taken eventually. */
1156
1157static bool
1158number_of_iterations_lt_to_ne (tree type, affine_iv *iv0, affine_iv *iv1,
1159 class tree_niter_desc *niter,
1160 tree *delta, tree step,
1161 bool exit_must_be_taken, bounds *bnds)
1162{
1163 tree niter_type = TREE_TYPE (step)((contains_struct_check ((step), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1163, __FUNCTION__))->typed.type)
;
1164 tree mod = fold_build2 (FLOOR_MOD_EXPR, niter_type, *delta, step)fold_build2_loc (((location_t) 0), FLOOR_MOD_EXPR, niter_type
, *delta, step )
;
1165 tree tmod;
1166 mpz_t mmod;
1167 tree assumption = boolean_true_nodeglobal_trees[TI_BOOLEAN_TRUE], bound, noloop;
1168 bool ret = false, fv_comp_no_overflow;
1169 tree type1 = type;
1170 if (POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
)
1171 type1 = sizetypesizetype_tab[(int) stk_sizetype];
1172
1173 if (TREE_CODE (mod)((enum tree_code) (mod)->base.code) != INTEGER_CST)
1174 return false;
1175 if (integer_nonzerop (mod))
1176 mod = fold_build2 (MINUS_EXPR, niter_type, step, mod)fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, step
, mod )
;
1177 tmod = fold_convert (type1, mod)fold_convert_loc (((location_t) 0), type1, mod);
1178
1179 mpz_init__gmpz_init (mmod);
1180 wi::to_mpz (wi::to_wide (mod), mmod, UNSIGNED);
1181 mpz_neg__gmpz_neg (mmod, mmod);
1182
1183 /* If the induction variable does not overflow and the exit is taken,
1184 then the computation of the final value does not overflow. This is
1185 also obviously the case if the new final value is equal to the
1186 current one. Finally, we postulate this for pointer type variables,
1187 as the code cannot rely on the object to that the pointer points being
1188 placed at the end of the address space (and more pragmatically,
1189 TYPE_{MIN,MAX}_VALUE is not defined for pointers). */
1190 if (integer_zerop (mod) || POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
)
1191 fv_comp_no_overflow = true;
1192 else if (!exit_must_be_taken)
1193 fv_comp_no_overflow = false;
1194 else
1195 fv_comp_no_overflow =
1196 (iv0->no_overflow && integer_nonzerop (iv0->step))
1197 || (iv1->no_overflow && integer_nonzerop (iv1->step));
1198
1199 if (integer_nonzerop (iv0->step))
1200 {
1201 /* The final value of the iv is iv1->base + MOD, assuming that this
1202 computation does not overflow, and that
1203 iv0->base <= iv1->base + MOD. */
1204 if (!fv_comp_no_overflow)
1205 {
1206 bound = fold_build2 (MINUS_EXPR, type1,fold_build2_loc (((location_t) 0), MINUS_EXPR, type1, ((tree_check5
((type1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1207, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
), tmod )
1207 TYPE_MAX_VALUE (type1), tmod)fold_build2_loc (((location_t) 0), MINUS_EXPR, type1, ((tree_check5
((type1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1207, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
), tmod )
;
1208 assumption = fold_build2 (LE_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), LE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv1->base, bound )
1209 iv1->base, bound)fold_build2_loc (((location_t) 0), LE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv1->base, bound )
;
1210 if (integer_zerop (assumption))
1211 goto end;
1212 }
1213 if (mpz_cmp__gmpz_cmp (mmod, bnds->below) < 0)
1214 noloop = boolean_false_nodeglobal_trees[TI_BOOLEAN_FALSE];
1215 else if (POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
)
1216 noloop = fold_build2 (GT_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv0->base, fold_build_pointer_plus_loc (((location_t) 0
), iv1->base, tmod) )
1217 iv0->base,fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv0->base, fold_build_pointer_plus_loc (((location_t) 0
), iv1->base, tmod) )
1218 fold_build_pointer_plus (iv1->base, tmod))fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv0->base, fold_build_pointer_plus_loc (((location_t) 0
), iv1->base, tmod) )
;
1219 else
1220 noloop = fold_build2 (GT_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv0->base, fold_build2_loc (((location_t) 0), PLUS_EXPR
, type1, iv1->base, tmod ) )
1221 iv0->base,fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv0->base, fold_build2_loc (((location_t) 0), PLUS_EXPR
, type1, iv1->base, tmod ) )
1222 fold_build2 (PLUS_EXPR, type1,fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv0->base, fold_build2_loc (((location_t) 0), PLUS_EXPR
, type1, iv1->base, tmod ) )
1223 iv1->base, tmod))fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv0->base, fold_build2_loc (((location_t) 0), PLUS_EXPR
, type1, iv1->base, tmod ) )
;
1224 }
1225 else
1226 {
1227 /* The final value of the iv is iv0->base - MOD, assuming that this
1228 computation does not overflow, and that
1229 iv0->base - MOD <= iv1->base. */
1230 if (!fv_comp_no_overflow)
1231 {
1232 bound = fold_build2 (PLUS_EXPR, type1,fold_build2_loc (((location_t) 0), PLUS_EXPR, type1, ((tree_check5
((type1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1233, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.minval
), tmod )
1233 TYPE_MIN_VALUE (type1), tmod)fold_build2_loc (((location_t) 0), PLUS_EXPR, type1, ((tree_check5
((type1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1233, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.minval
), tmod )
;
1234 assumption = fold_build2 (GE_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), GE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv0->base, bound )
1235 iv0->base, bound)fold_build2_loc (((location_t) 0), GE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv0->base, bound )
;
1236 if (integer_zerop (assumption))
1237 goto end;
1238 }
1239 if (mpz_cmp__gmpz_cmp (mmod, bnds->below) < 0)
1240 noloop = boolean_false_nodeglobal_trees[TI_BOOLEAN_FALSE];
1241 else if (POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
)
1242 noloop = fold_build2 (GT_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], fold_build_pointer_plus_loc (((location_t) 0), iv0->base
, fold_build1_loc (((location_t) 0), NEGATE_EXPR, type1, tmod
)), iv1->base )
1243 fold_build_pointer_plus (iv0->base,fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], fold_build_pointer_plus_loc (((location_t) 0), iv0->base
, fold_build1_loc (((location_t) 0), NEGATE_EXPR, type1, tmod
)), iv1->base )
1244 fold_build1 (NEGATE_EXPR,fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], fold_build_pointer_plus_loc (((location_t) 0), iv0->base
, fold_build1_loc (((location_t) 0), NEGATE_EXPR, type1, tmod
)), iv1->base )
1245 type1, tmod)),fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], fold_build_pointer_plus_loc (((location_t) 0), iv0->base
, fold_build1_loc (((location_t) 0), NEGATE_EXPR, type1, tmod
)), iv1->base )
1246 iv1->base)fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], fold_build_pointer_plus_loc (((location_t) 0), iv0->base
, fold_build1_loc (((location_t) 0), NEGATE_EXPR, type1, tmod
)), iv1->base )
;
1247 else
1248 noloop = fold_build2 (GT_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], fold_build2_loc (((location_t) 0), MINUS_EXPR, type1, iv0->
base, tmod ), iv1->base )
1249 fold_build2 (MINUS_EXPR, type1,fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], fold_build2_loc (((location_t) 0), MINUS_EXPR, type1, iv0->
base, tmod ), iv1->base )
1250 iv0->base, tmod),fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], fold_build2_loc (((location_t) 0), MINUS_EXPR, type1, iv0->
base, tmod ), iv1->base )
1251 iv1->base)fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], fold_build2_loc (((location_t) 0), MINUS_EXPR, type1, iv0->
base, tmod ), iv1->base )
;
1252 }
1253
1254 if (!integer_nonzerop (assumption))
1255 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->assumptions, assumption )
1256 niter->assumptions,fold_build2_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->assumptions, assumption )
1257 assumption)fold_build2_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->assumptions, assumption )
;
1258 if (!integer_zerop (noloop))
1259 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), TRUTH_OR_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->may_be_zero, noloop )
1260 niter->may_be_zero,fold_build2_loc (((location_t) 0), TRUTH_OR_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->may_be_zero, noloop )
1261 noloop)fold_build2_loc (((location_t) 0), TRUTH_OR_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->may_be_zero, noloop )
;
1262 bounds_add (bnds, wi::to_widest (mod), type);
1263 *delta = fold_build2 (PLUS_EXPR, niter_type, *delta, mod)fold_build2_loc (((location_t) 0), PLUS_EXPR, niter_type, *delta
, mod )
;
1264
1265 ret = true;
1266end:
1267 mpz_clear__gmpz_clear (mmod);
1268 return ret;
1269}
1270
1271/* Add assertions to NITER that ensure that the control variable of the loop
1272 with ending condition IV0 < IV1 does not overflow. Types of IV0 and IV1
1273 are TYPE. Returns false if we can prove that there is an overflow, true
1274 otherwise. STEP is the absolute value of the step. */
1275
1276static bool
1277assert_no_overflow_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1278 class tree_niter_desc *niter, tree step)
1279{
1280 tree bound, d, assumption, diff;
1281 tree niter_type = TREE_TYPE (step)((contains_struct_check ((step), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1281, __FUNCTION__))->typed.type)
;
1282
1283 if (integer_nonzerop (iv0->step))
1284 {
1285 /* for (i = iv0->base; i < iv1->base; i += iv0->step) */
1286 if (iv0->no_overflow)
1287 return true;
1288
1289 /* If iv0->base is a constant, we can determine the last value before
1290 overflow precisely; otherwise we conservatively assume
1291 MAX - STEP + 1. */
1292
1293 if (TREE_CODE (iv0->base)((enum tree_code) (iv0->base)->base.code) == INTEGER_CST)
1294 {
1295 d = fold_build2 (MINUS_EXPR, niter_type,fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, ((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1296, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
)), fold_convert_loc (((location_t) 0), niter_type, iv0->base
) )
1296 fold_convert (niter_type, TYPE_MAX_VALUE (type)),fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, ((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1296, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
)), fold_convert_loc (((location_t) 0), niter_type, iv0->base
) )
1297 fold_convert (niter_type, iv0->base))fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, ((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1296, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
)), fold_convert_loc (((location_t) 0), niter_type, iv0->base
) )
;
1298 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step)fold_build2_loc (((location_t) 0), FLOOR_MOD_EXPR, niter_type
, d, step )
;
1299 }
1300 else
1301 diff = fold_build2 (MINUS_EXPR, niter_type, step,fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, step
, build_int_cst (niter_type, 1) )
1302 build_int_cst (niter_type, 1))fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, step
, build_int_cst (niter_type, 1) )
;
1303 bound = fold_build2 (MINUS_EXPR, type,fold_build2_loc (((location_t) 0), MINUS_EXPR, type, ((tree_check5
((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1304, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
), fold_convert_loc (((location_t) 0), type, diff) )
1304 TYPE_MAX_VALUE (type), fold_convert (type, diff))fold_build2_loc (((location_t) 0), MINUS_EXPR, type, ((tree_check5
((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1304, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
), fold_convert_loc (((location_t) 0), type, diff) )
;
1305 assumption = fold_build2 (LE_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), LE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv1->base, bound )
1306 iv1->base, bound)fold_build2_loc (((location_t) 0), LE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv1->base, bound )
;
1307 }
1308 else
1309 {
1310 /* for (i = iv1->base; i > iv0->base; i += iv1->step) */
1311 if (iv1->no_overflow)
1312 return true;
1313
1314 if (TREE_CODE (iv1->base)((enum tree_code) (iv1->base)->base.code) == INTEGER_CST)
1315 {
1316 d = fold_build2 (MINUS_EXPR, niter_type,fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, iv1->base), fold_convert_loc
(((location_t) 0), niter_type, ((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1318, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.minval
)) )
1317 fold_convert (niter_type, iv1->base),fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, iv1->base), fold_convert_loc
(((location_t) 0), niter_type, ((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1318, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.minval
)) )
1318 fold_convert (niter_type, TYPE_MIN_VALUE (type)))fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, iv1->base), fold_convert_loc
(((location_t) 0), niter_type, ((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1318, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.minval
)) )
;
1319 diff = fold_build2 (FLOOR_MOD_EXPR, niter_type, d, step)fold_build2_loc (((location_t) 0), FLOOR_MOD_EXPR, niter_type
, d, step )
;
1320 }
1321 else
1322 diff = fold_build2 (MINUS_EXPR, niter_type, step,fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, step
, build_int_cst (niter_type, 1) )
1323 build_int_cst (niter_type, 1))fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, step
, build_int_cst (niter_type, 1) )
;
1324 bound = fold_build2 (PLUS_EXPR, type,fold_build2_loc (((location_t) 0), PLUS_EXPR, type, ((tree_check5
((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1325, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.minval
), fold_convert_loc (((location_t) 0), type, diff) )
1325 TYPE_MIN_VALUE (type), fold_convert (type, diff))fold_build2_loc (((location_t) 0), PLUS_EXPR, type, ((tree_check5
((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1325, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.minval
), fold_convert_loc (((location_t) 0), type, diff) )
;
1326 assumption = fold_build2 (GE_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), GE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv0->base, bound )
1327 iv0->base, bound)fold_build2_loc (((location_t) 0), GE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv0->base, bound )
;
1328 }
1329
1330 if (integer_zerop (assumption))
1331 return false;
1332 if (!integer_nonzerop (assumption))
1333 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->assumptions, assumption )
1334 niter->assumptions, assumption)fold_build2_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->assumptions, assumption )
;
1335
1336 iv0->no_overflow = true;
1337 iv1->no_overflow = true;
1338 return true;
1339}
1340
1341/* Add an assumption to NITER that a loop whose ending condition
1342 is IV0 < IV1 rolls. TYPE is the type of the control iv. BNDS
1343 bounds the value of IV1->base - IV0->base. */
1344
1345static void
1346assert_loop_rolls_lt (tree type, affine_iv *iv0, affine_iv *iv1,
1347 class tree_niter_desc *niter, bounds *bnds)
1348{
1349 tree assumption = boolean_true_nodeglobal_trees[TI_BOOLEAN_TRUE], bound, diff;
1350 tree mbz, mbzl, mbzr, type1;
1351 bool rolls_p, no_overflow_p;
1352 widest_int dstep;
1353 mpz_t mstep, max;
1354
1355 /* We are going to compute the number of iterations as
1356 (iv1->base - iv0->base + step - 1) / step, computed in the unsigned
1357 variant of TYPE. This formula only works if
1358
1359 -step + 1 <= (iv1->base - iv0->base) <= MAX - step + 1
1360
1361 (where MAX is the maximum value of the unsigned variant of TYPE, and
1362 the computations in this formula are performed in full precision,
1363 i.e., without overflows).
1364
1365 Usually, for loops with exit condition iv0->base + step * i < iv1->base,
1366 we have a condition of the form iv0->base - step < iv1->base before the loop,
1367 and for loops iv0->base < iv1->base - step * i the condition
1368 iv0->base < iv1->base + step, due to loop header copying, which enable us
1369 to prove the lower bound.
1370
1371 The upper bound is more complicated. Unless the expressions for initial
1372 and final value themselves contain enough information, we usually cannot
1373 derive it from the context. */
1374
1375 /* First check whether the answer does not follow from the bounds we gathered
1376 before. */
1377 if (integer_nonzerop (iv0->step))
1378 dstep = wi::to_widest (iv0->step);
1379 else
1380 {
1381 dstep = wi::sext (wi::to_widest (iv1->step), TYPE_PRECISION (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1381, __FUNCTION__))->type_common.precision)
);
1382 dstep = -dstep;
1383 }
1384
1385 mpz_init__gmpz_init (mstep);
1386 wi::to_mpz (dstep, mstep, UNSIGNED);
1387 mpz_neg__gmpz_neg (mstep, mstep);
1388 mpz_add_ui__gmpz_add_ui (mstep, mstep, 1);
1389
1390 rolls_p = mpz_cmp__gmpz_cmp (mstep, bnds->below) <= 0;
1391
1392 mpz_init__gmpz_init (max);
1393 wi::to_mpz (wi::minus_one (TYPE_PRECISION (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1393, __FUNCTION__))->type_common.precision)
), max, UNSIGNED);
1394 mpz_add__gmpz_add (max, max, mstep);
1395 no_overflow_p = (mpz_cmp__gmpz_cmp (bnds->up, max) <= 0
1396 /* For pointers, only values lying inside a single object
1397 can be compared or manipulated by pointer arithmetics.
1398 Gcc in general does not allow or handle objects larger
1399 than half of the address space, hence the upper bound
1400 is satisfied for pointers. */
1401 || POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
);
1402 mpz_clear__gmpz_clear (mstep);
1403 mpz_clear__gmpz_clear (max);
1404
1405 if (rolls_p && no_overflow_p)
1406 return;
1407
1408 type1 = type;
1409 if (POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
)
1410 type1 = sizetypesizetype_tab[(int) stk_sizetype];
1411
1412 /* Now the hard part; we must formulate the assumption(s) as expressions, and
1413 we must be careful not to introduce overflow. */
1414
1415 if (integer_nonzerop (iv0->step))
1416 {
1417 diff = fold_build2 (MINUS_EXPR, type1,fold_build2_loc (((location_t) 0), MINUS_EXPR, type1, iv0->
step, build_int_cst (type1, 1) )
1418 iv0->step, build_int_cst (type1, 1))fold_build2_loc (((location_t) 0), MINUS_EXPR, type1, iv0->
step, build_int_cst (type1, 1) )
;
1419
1420 /* We need to know that iv0->base >= MIN + iv0->step - 1. Since
1421 0 address never belongs to any object, we can assume this for
1422 pointers. */
1423 if (!POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
)
1424 {
1425 bound = fold_build2 (PLUS_EXPR, type1,fold_build2_loc (((location_t) 0), PLUS_EXPR, type1, ((tree_check5
((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1426, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.minval
), diff )
1426 TYPE_MIN_VALUE (type), diff)fold_build2_loc (((location_t) 0), PLUS_EXPR, type1, ((tree_check5
((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1426, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.minval
), diff )
;
1427 assumption = fold_build2 (GE_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), GE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv0->base, bound )
1428 iv0->base, bound)fold_build2_loc (((location_t) 0), GE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv0->base, bound )
;
1429 }
1430
1431 /* And then we can compute iv0->base - diff, and compare it with
1432 iv1->base. */
1433 mbzl = fold_build2 (MINUS_EXPR, type1,fold_build2_loc (((location_t) 0), MINUS_EXPR, type1, fold_convert_loc
(((location_t) 0), type1, iv0->base), diff )
1434 fold_convert (type1, iv0->base), diff)fold_build2_loc (((location_t) 0), MINUS_EXPR, type1, fold_convert_loc
(((location_t) 0), type1, iv0->base), diff )
;
1435 mbzr = fold_convert (type1, iv1->base)fold_convert_loc (((location_t) 0), type1, iv1->base);
1436 }
1437 else
1438 {
1439 diff = fold_build2 (PLUS_EXPR, type1,fold_build2_loc (((location_t) 0), PLUS_EXPR, type1, iv1->
step, build_int_cst (type1, 1) )
1440 iv1->step, build_int_cst (type1, 1))fold_build2_loc (((location_t) 0), PLUS_EXPR, type1, iv1->
step, build_int_cst (type1, 1) )
;
1441
1442 if (!POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
)
1443 {
1444 bound = fold_build2 (PLUS_EXPR, type1,fold_build2_loc (((location_t) 0), PLUS_EXPR, type1, ((tree_check5
((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1445, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
), diff )
1445 TYPE_MAX_VALUE (type), diff)fold_build2_loc (((location_t) 0), PLUS_EXPR, type1, ((tree_check5
((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1445, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
), diff )
;
1446 assumption = fold_build2 (LE_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), LE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv1->base, bound )
1447 iv1->base, bound)fold_build2_loc (((location_t) 0), LE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv1->base, bound )
;
1448 }
1449
1450 mbzl = fold_convert (type1, iv0->base)fold_convert_loc (((location_t) 0), type1, iv0->base);
1451 mbzr = fold_build2 (MINUS_EXPR, type1,fold_build2_loc (((location_t) 0), MINUS_EXPR, type1, fold_convert_loc
(((location_t) 0), type1, iv1->base), diff )
1452 fold_convert (type1, iv1->base), diff)fold_build2_loc (((location_t) 0), MINUS_EXPR, type1, fold_convert_loc
(((location_t) 0), type1, iv1->base), diff )
;
1453 }
1454
1455 if (!integer_nonzerop (assumption))
1456 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->assumptions, assumption )
1457 niter->assumptions, assumption)fold_build2_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->assumptions, assumption )
;
1458 if (!rolls_p)
1459 {
1460 mbz = fold_build2 (GT_EXPR, boolean_type_node, mbzl, mbzr)fold_build2_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], mbzl, mbzr )
;
1461 niter->may_be_zero = fold_build2 (TRUTH_OR_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), TRUTH_OR_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->may_be_zero, mbz )
1462 niter->may_be_zero, mbz)fold_build2_loc (((location_t) 0), TRUTH_OR_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->may_be_zero, mbz )
;
1463 }
1464}
1465
1466/* Determines number of iterations of loop whose ending condition
1467 is IV0 < IV1 which likes: {base, -C} < n, or n < {base, C}.
1468 The number of iterations is stored to NITER. */
1469
1470static bool
1471number_of_iterations_until_wrap (class loop *loop, tree type, affine_iv *iv0,
1472 affine_iv *iv1, class tree_niter_desc *niter)
1473{
1474 tree niter_type = unsigned_type_for (type);
1475 tree step, num, assumptions, may_be_zero, span;
1476 wide_int high, low, max, min;
1477
1478 may_be_zero = fold_build2 (LE_EXPR, boolean_type_node, iv1->base, iv0->base)fold_build2_loc (((location_t) 0), LE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv1->base, iv0->base )
;
1479 if (integer_onep (may_be_zero))
1480 return false;
1481
1482 int prec = TYPE_PRECISION (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1482, __FUNCTION__))->type_common.precision)
;
1483 signop sgn = TYPE_SIGN (type)((signop) ((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1483, __FUNCTION__))->base.u.bits.unsigned_flag))
;
1484 min = wi::min_value (prec, sgn);
1485 max = wi::max_value (prec, sgn);
1486
1487 /* n < {base, C}. */
1488 if (integer_zerop (iv0->step) && !tree_int_cst_sign_bit (iv1->step))
1489 {
1490 step = iv1->step;
1491 /* MIN + C - 1 <= n. */
1492 tree last = wide_int_to_tree (type, min + wi::to_wide (step) - 1);
1493 assumptions = fold_build2 (LE_EXPR, boolean_type_node, last, iv0->base)fold_build2_loc (((location_t) 0), LE_EXPR, global_trees[TI_BOOLEAN_TYPE
], last, iv0->base )
;
1494 if (integer_zerop (assumptions))
1495 return false;
1496
1497 num = fold_build2 (MINUS_EXPR, niter_type,fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, wide_int_to_tree
(niter_type, max), fold_convert_loc (((location_t) 0), niter_type
, iv1->base) )
1498 wide_int_to_tree (niter_type, max),fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, wide_int_to_tree
(niter_type, max), fold_convert_loc (((location_t) 0), niter_type
, iv1->base) )
1499 fold_convert (niter_type, iv1->base))fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, wide_int_to_tree
(niter_type, max), fold_convert_loc (((location_t) 0), niter_type
, iv1->base) )
;
1500
1501 /* When base has the form iv + 1, if we know iv >= n, then iv + 1 < n
1502 only when iv + 1 overflows, i.e. when iv == TYPE_VALUE_MAX. */
1503 if (sgn == UNSIGNED
1504 && integer_onep (step)
1505 && TREE_CODE (iv1->base)((enum tree_code) (iv1->base)->base.code) == PLUS_EXPR
1506 && integer_onep (TREE_OPERAND (iv1->base, 1)(*((const_cast<tree*> (tree_operand_check ((iv1->base
), (1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1506, __FUNCTION__)))))
))
1507 {
1508 tree cond = fold_build2 (GE_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), GE_EXPR, global_trees[TI_BOOLEAN_TYPE
], (*((const_cast<tree*> (tree_operand_check ((iv1->
base), (0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1509, __FUNCTION__))))), iv0->base )
1509 TREE_OPERAND (iv1->base, 0), iv0->base)fold_build2_loc (((location_t) 0), GE_EXPR, global_trees[TI_BOOLEAN_TYPE
], (*((const_cast<tree*> (tree_operand_check ((iv1->
base), (0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1509, __FUNCTION__))))), iv0->base )
;
1510 cond = simplify_using_initial_conditions (loop, cond);
1511 if (integer_onep (cond))
1512 may_be_zero = fold_build2 (EQ_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), EQ_EXPR, global_trees[TI_BOOLEAN_TYPE
], (*((const_cast<tree*> (tree_operand_check ((iv1->
base), (0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1513, __FUNCTION__))))), ((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1514, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
) )
1513 TREE_OPERAND (iv1->base, 0),fold_build2_loc (((location_t) 0), EQ_EXPR, global_trees[TI_BOOLEAN_TYPE
], (*((const_cast<tree*> (tree_operand_check ((iv1->
base), (0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1513, __FUNCTION__))))), ((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1514, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
) )
1514 TYPE_MAX_VALUE (type))fold_build2_loc (((location_t) 0), EQ_EXPR, global_trees[TI_BOOLEAN_TYPE
], (*((const_cast<tree*> (tree_operand_check ((iv1->
base), (0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1513, __FUNCTION__))))), ((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1514, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
) )
;
1515 }
1516
1517 high = max;
1518 if (TREE_CODE (iv1->base)((enum tree_code) (iv1->base)->base.code) == INTEGER_CST)
1519 low = wi::to_wide (iv1->base) - 1;
1520 else if (TREE_CODE (iv0->base)((enum tree_code) (iv0->base)->base.code) == INTEGER_CST)
1521 low = wi::to_wide (iv0->base);
1522 else
1523 low = min;
1524 }
1525 /* {base, -C} < n. */
1526 else if (tree_int_cst_sign_bit (iv0->step) && integer_zerop (iv1->step))
1527 {
1528 step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv0->step), iv0->step)fold_build1_loc (((location_t) 0), NEGATE_EXPR, ((contains_struct_check
((iv0->step), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1528, __FUNCTION__))->typed.type), iv0->step )
;
1529 /* MAX - C + 1 >= n. */
1530 tree last = wide_int_to_tree (type, max - wi::to_wide (step) + 1);
1531 assumptions = fold_build2 (GE_EXPR, boolean_type_node, last, iv1->base)fold_build2_loc (((location_t) 0), GE_EXPR, global_trees[TI_BOOLEAN_TYPE
], last, iv1->base )
;
1532 if (integer_zerop (assumptions))
1533 return false;
1534
1535 num = fold_build2 (MINUS_EXPR, niter_type,fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, iv0->base), wide_int_to_tree
(niter_type, min) )
1536 fold_convert (niter_type, iv0->base),fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, iv0->base), wide_int_to_tree
(niter_type, min) )
1537 wide_int_to_tree (niter_type, min))fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, iv0->base), wide_int_to_tree
(niter_type, min) )
;
1538 low = min;
1539 if (TREE_CODE (iv0->base)((enum tree_code) (iv0->base)->base.code) == INTEGER_CST)
1540 high = wi::to_wide (iv0->base) + 1;
1541 else if (TREE_CODE (iv1->base)((enum tree_code) (iv1->base)->base.code) == INTEGER_CST)
1542 high = wi::to_wide (iv1->base);
1543 else
1544 high = max;
1545 }
1546 else
1547 return false;
1548
1549 /* (delta + step - 1) / step */
1550 step = fold_convert (niter_type, step)fold_convert_loc (((location_t) 0), niter_type, step);
1551 num = fold_build2 (PLUS_EXPR, niter_type, num, step)fold_build2_loc (((location_t) 0), PLUS_EXPR, niter_type, num
, step )
;
1552 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, num, step)fold_build2_loc (((location_t) 0), FLOOR_DIV_EXPR, niter_type
, num, step )
;
1553
1554 widest_int delta, s;
1555 delta = widest_int::from (high, sgn) - widest_int::from (low, sgn);
1556 s = wi::to_widest (step);
1557 delta = delta + s - 1;
1558 niter->max = wi::udiv_floor (delta, s);
1559
1560 niter->may_be_zero = may_be_zero;
1561
1562 if (!integer_nonzerop (assumptions))
1563 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->assumptions, assumptions )
1564 niter->assumptions, assumptions)fold_build2_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->assumptions, assumptions )
;
1565
1566 niter->control.no_overflow = false;
1567
1568 /* Update bound and exit condition as:
1569 bound = niter * STEP + (IVbase - STEP).
1570 { IVbase - STEP, +, STEP } != bound
1571 Here, biasing IVbase by 1 step makes 'bound' be the value before wrap.
1572 */
1573 tree base_type = TREE_TYPE (niter->control.base)((contains_struct_check ((niter->control.base), (TS_TYPED)
, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1573, __FUNCTION__))->typed.type)
;
1574 if (POINTER_TYPE_P (base_type)(((enum tree_code) (base_type)->base.code) == POINTER_TYPE
|| ((enum tree_code) (base_type)->base.code) == REFERENCE_TYPE
)
)
1575 {
1576 tree utype = unsigned_type_for (base_type);
1577 niter->control.base
1578 = fold_build2 (MINUS_EXPR, utype,fold_build2_loc (((location_t) 0), MINUS_EXPR, utype, fold_convert_loc
(((location_t) 0), utype, niter->control.base), fold_convert_loc
(((location_t) 0), utype, niter->control.step) )
1579 fold_convert (utype, niter->control.base),fold_build2_loc (((location_t) 0), MINUS_EXPR, utype, fold_convert_loc
(((location_t) 0), utype, niter->control.base), fold_convert_loc
(((location_t) 0), utype, niter->control.step) )
1580 fold_convert (utype, niter->control.step))fold_build2_loc (((location_t) 0), MINUS_EXPR, utype, fold_convert_loc
(((location_t) 0), utype, niter->control.base), fold_convert_loc
(((location_t) 0), utype, niter->control.step) )
;
1581 niter->control.base = fold_convert (base_type, niter->control.base)fold_convert_loc (((location_t) 0), base_type, niter->control
.base)
;
1582 }
1583 else
1584 niter->control.base
1585 = fold_build2 (MINUS_EXPR, base_type, niter->control.base,fold_build2_loc (((location_t) 0), MINUS_EXPR, base_type, niter
->control.base, niter->control.step )
1586 niter->control.step)fold_build2_loc (((location_t) 0), MINUS_EXPR, base_type, niter
->control.base, niter->control.step )
;
1587
1588 span = fold_build2 (MULT_EXPR, niter_type, niter->niter,fold_build2_loc (((location_t) 0), MULT_EXPR, niter_type, niter
->niter, fold_convert_loc (((location_t) 0), niter_type, niter
->control.step) )
1589 fold_convert (niter_type, niter->control.step))fold_build2_loc (((location_t) 0), MULT_EXPR, niter_type, niter
->niter, fold_convert_loc (((location_t) 0), niter_type, niter
->control.step) )
;
1590 niter->bound = fold_build2 (PLUS_EXPR, niter_type, span,fold_build2_loc (((location_t) 0), PLUS_EXPR, niter_type, span
, fold_convert_loc (((location_t) 0), niter_type, niter->control
.base) )
1591 fold_convert (niter_type, niter->control.base))fold_build2_loc (((location_t) 0), PLUS_EXPR, niter_type, span
, fold_convert_loc (((location_t) 0), niter_type, niter->control
.base) )
;
1592 niter->bound = fold_convert (type, niter->bound)fold_convert_loc (((location_t) 0), type, niter->bound);
1593 niter->cmp = NE_EXPR;
1594
1595 return true;
1596}
1597
1598/* Determines number of iterations of loop whose ending condition
1599 is IV0 < IV1. TYPE is the type of the iv. The number of
1600 iterations is stored to NITER. BNDS bounds the difference
1601 IV1->base - IV0->base. EXIT_MUST_BE_TAKEN is true if we know
1602 that the exit must be taken eventually. */
1603
1604static bool
1605number_of_iterations_lt (class loop *loop, tree type, affine_iv *iv0,
1606 affine_iv *iv1, class tree_niter_desc *niter,
1607 bool exit_must_be_taken, bounds *bnds)
1608{
1609 tree niter_type = unsigned_type_for (type);
1610 tree delta, step, s;
1611 mpz_t mstep, tmp;
1612
1613 if (integer_nonzerop (iv0->step))
1614 {
1615 niter->control = *iv0;
1616 niter->cmp = LT_EXPR;
1617 niter->bound = iv1->base;
1618 }
1619 else
1620 {
1621 niter->control = *iv1;
1622 niter->cmp = GT_EXPR;
1623 niter->bound = iv0->base;
1624 }
1625
1626 /* {base, -C} < n, or n < {base, C} */
1627 if (tree_int_cst_sign_bit (iv0->step)
1628 || (!integer_zerop (iv1->step) && !tree_int_cst_sign_bit (iv1->step)))
1629 return number_of_iterations_until_wrap (loop, type, iv0, iv1, niter);
1630
1631 delta = fold_build2 (MINUS_EXPR, niter_type,fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, iv1->base), fold_convert_loc
(((location_t) 0), niter_type, iv0->base) )
1632 fold_convert (niter_type, iv1->base),fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, iv1->base), fold_convert_loc
(((location_t) 0), niter_type, iv0->base) )
1633 fold_convert (niter_type, iv0->base))fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, fold_convert_loc
(((location_t) 0), niter_type, iv1->base), fold_convert_loc
(((location_t) 0), niter_type, iv0->base) )
;
1634
1635 /* First handle the special case that the step is +-1. */
1636 if ((integer_onep (iv0->step) && integer_zerop (iv1->step))
1637 || (integer_all_onesp (iv1->step) && integer_zerop (iv0->step)))
1638 {
1639 /* for (i = iv0->base; i < iv1->base; i++)
1640
1641 or
1642
1643 for (i = iv1->base; i > iv0->base; i--).
1644
1645 In both cases # of iterations is iv1->base - iv0->base, assuming that
1646 iv1->base >= iv0->base.
1647
1648 First try to derive a lower bound on the value of
1649 iv1->base - iv0->base, computed in full precision. If the difference
1650 is nonnegative, we are done, otherwise we must record the
1651 condition. */
1652
1653 if (mpz_sgn (bnds->below)((bnds->below)->_mp_size < 0 ? -1 : (bnds->below)
->_mp_size > 0)
< 0)
1654 niter->may_be_zero = fold_build2 (LT_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), LT_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv1->base, iv0->base )
1655 iv1->base, iv0->base)fold_build2_loc (((location_t) 0), LT_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv1->base, iv0->base )
;
1656 niter->niter = delta;
1657 niter->max = widest_int::from (wi::from_mpz (niter_type, bnds->up, false),
1658 TYPE_SIGN (niter_type)((signop) ((tree_class_check ((niter_type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1658, __FUNCTION__))->base.u.bits.unsigned_flag))
);
1659 niter->control.no_overflow = true;
1660 return true;
1661 }
1662
1663 if (integer_nonzerop (iv0->step))
1664 step = fold_convert (niter_type, iv0->step)fold_convert_loc (((location_t) 0), niter_type, iv0->step);
1665 else
1666 step = fold_convert (niter_type,fold_convert_loc (((location_t) 0), niter_type, fold_build1_loc
(((location_t) 0), NEGATE_EXPR, type, iv1->step ))
1667 fold_build1 (NEGATE_EXPR, type, iv1->step))fold_convert_loc (((location_t) 0), niter_type, fold_build1_loc
(((location_t) 0), NEGATE_EXPR, type, iv1->step ))
;
1668
1669 /* If we can determine the final value of the control iv exactly, we can
1670 transform the condition to != comparison. In particular, this will be
1671 the case if DELTA is constant. */
1672 if (number_of_iterations_lt_to_ne (type, iv0, iv1, niter, &delta, step,
1673 exit_must_be_taken, bnds))
1674 {
1675 affine_iv zps;
1676
1677 zps.base = build_int_cst (niter_type, 0);
1678 zps.step = step;
1679 /* number_of_iterations_lt_to_ne will add assumptions that ensure that
1680 zps does not overflow. */
1681 zps.no_overflow = true;
1682
1683 return number_of_iterations_ne (loop, type, &zps,
1684 delta, niter, true, bnds);
1685 }
1686
1687 /* Make sure that the control iv does not overflow. */
1688 if (!assert_no_overflow_lt (type, iv0, iv1, niter, step))
1689 return false;
1690
1691 /* We determine the number of iterations as (delta + step - 1) / step. For
1692 this to work, we must know that iv1->base >= iv0->base - step + 1,
1693 otherwise the loop does not roll. */
1694 assert_loop_rolls_lt (type, iv0, iv1, niter, bnds);
1695
1696 s = fold_build2 (MINUS_EXPR, niter_type,fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, step
, build_int_cst (niter_type, 1) )
1697 step, build_int_cst (niter_type, 1))fold_build2_loc (((location_t) 0), MINUS_EXPR, niter_type, step
, build_int_cst (niter_type, 1) )
;
1698 delta = fold_build2 (PLUS_EXPR, niter_type, delta, s)fold_build2_loc (((location_t) 0), PLUS_EXPR, niter_type, delta
, s )
;
1699 niter->niter = fold_build2 (FLOOR_DIV_EXPR, niter_type, delta, step)fold_build2_loc (((location_t) 0), FLOOR_DIV_EXPR, niter_type
, delta, step )
;
1700
1701 mpz_init__gmpz_init (mstep);
1702 mpz_init__gmpz_init (tmp);
1703 wi::to_mpz (wi::to_wide (step), mstep, UNSIGNED);
1704 mpz_add__gmpz_add (tmp, bnds->up, mstep);
1705 mpz_sub_ui__gmpz_sub_ui (tmp, tmp, 1);
1706 mpz_fdiv_q__gmpz_fdiv_q (tmp, tmp, mstep);
1707 niter->max = widest_int::from (wi::from_mpz (niter_type, tmp, false),
1708 TYPE_SIGN (niter_type)((signop) ((tree_class_check ((niter_type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1708, __FUNCTION__))->base.u.bits.unsigned_flag))
);
1709 mpz_clear__gmpz_clear (mstep);
1710 mpz_clear__gmpz_clear (tmp);
1711
1712 return true;
1713}
1714
1715/* Determines number of iterations of loop whose ending condition
1716 is IV0 <= IV1. TYPE is the type of the iv. The number of
1717 iterations is stored to NITER. EXIT_MUST_BE_TAKEN is true if
1718 we know that this condition must eventually become false (we derived this
1719 earlier, and possibly set NITER->assumptions to make sure this
1720 is the case). BNDS bounds the difference IV1->base - IV0->base. */
1721
1722static bool
1723number_of_iterations_le (class loop *loop, tree type, affine_iv *iv0,
1724 affine_iv *iv1, class tree_niter_desc *niter,
1725 bool exit_must_be_taken, bounds *bnds)
1726{
1727 tree assumption;
1728 tree type1 = type;
1729 if (POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
)
1730 type1 = sizetypesizetype_tab[(int) stk_sizetype];
1731
1732 /* Say that IV0 is the control variable. Then IV0 <= IV1 iff
1733 IV0 < IV1 + 1, assuming that IV1 is not equal to the greatest
1734 value of the type. This we must know anyway, since if it is
1735 equal to this value, the loop rolls forever. We do not check
1736 this condition for pointer type ivs, as the code cannot rely on
1737 the object to that the pointer points being placed at the end of
1738 the address space (and more pragmatically, TYPE_{MIN,MAX}_VALUE is
1739 not defined for pointers). */
1740
1741 if (!exit_must_be_taken && !POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
)
1742 {
1743 if (integer_nonzerop (iv0->step))
1744 assumption = fold_build2 (NE_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), NE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv1->base, ((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1745, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
) )
1745 iv1->base, TYPE_MAX_VALUE (type))fold_build2_loc (((location_t) 0), NE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv1->base, ((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1745, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
) )
;
1746 else
1747 assumption = fold_build2 (NE_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), NE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv0->base, ((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1748, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.minval
) )
1748 iv0->base, TYPE_MIN_VALUE (type))fold_build2_loc (((location_t) 0), NE_EXPR, global_trees[TI_BOOLEAN_TYPE
], iv0->base, ((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1748, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.minval
) )
;
1749
1750 if (integer_zerop (assumption))
1751 return false;
1752 if (!integer_nonzerop (assumption))
1753 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->assumptions, assumption )
1754 niter->assumptions, assumption)fold_build2_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->assumptions, assumption )
;
1755 }
1756
1757 if (integer_nonzerop (iv0->step))
1758 {
1759 if (POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
)
1760 iv1->base = fold_build_pointer_plus_hwi (iv1->base, 1)fold_build_pointer_plus_hwi_loc (((location_t) 0), iv1->base
, 1)
;
1761 else
1762 iv1->base = fold_build2 (PLUS_EXPR, type1, iv1->base,fold_build2_loc (((location_t) 0), PLUS_EXPR, type1, iv1->
base, build_int_cst (type1, 1) )
1763 build_int_cst (type1, 1))fold_build2_loc (((location_t) 0), PLUS_EXPR, type1, iv1->
base, build_int_cst (type1, 1) )
;
1764 }
1765 else if (POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
)
1766 iv0->base = fold_build_pointer_plus_hwi (iv0->base, -1)fold_build_pointer_plus_hwi_loc (((location_t) 0), iv0->base
, -1)
;
1767 else
1768 iv0->base = fold_build2 (MINUS_EXPR, type1,fold_build2_loc (((location_t) 0), MINUS_EXPR, type1, iv0->
base, build_int_cst (type1, 1) )
1769 iv0->base, build_int_cst (type1, 1))fold_build2_loc (((location_t) 0), MINUS_EXPR, type1, iv0->
base, build_int_cst (type1, 1) )
;
1770
1771 bounds_add (bnds, 1, type1);
1772
1773 return number_of_iterations_lt (loop, type, iv0, iv1, niter, exit_must_be_taken,
1774 bnds);
1775}
1776
1777/* Dumps description of affine induction variable IV to FILE. */
1778
1779static void
1780dump_affine_iv (FILE *file, affine_iv *iv)
1781{
1782 if (!integer_zerop (iv->step))
1783 fprintf (file, "[");
1784
1785 print_generic_expr (dump_file, iv->base, TDF_SLIM);
1786
1787 if (!integer_zerop (iv->step))
1788 {
1789 fprintf (file, ", + , ");
1790 print_generic_expr (dump_file, iv->step, TDF_SLIM);
1791 fprintf (file, "]%s", iv->no_overflow ? "(no_overflow)" : "");
1792 }
1793}
1794
1795/* Determine the number of iterations according to condition (for staying
1796 inside loop) which compares two induction variables using comparison
1797 operator CODE. The induction variable on left side of the comparison
1798 is IV0, the right-hand side is IV1. Both induction variables must have
1799 type TYPE, which must be an integer or pointer type. The steps of the
1800 ivs must be constants (or NULL_TREE, which is interpreted as constant zero).
1801
1802 LOOP is the loop whose number of iterations we are determining.
1803
1804 ONLY_EXIT is true if we are sure this is the only way the loop could be
1805 exited (including possibly non-returning function calls, exceptions, etc.)
1806 -- in this case we can use the information whether the control induction
1807 variables can overflow or not in a more efficient way.
1808
1809 if EVERY_ITERATION is true, we know the test is executed on every iteration.
1810
1811 The results (number of iterations and assumptions as described in
1812 comments at class tree_niter_desc in tree-ssa-loop.h) are stored to NITER.
1813 Returns false if it fails to determine number of iterations, true if it
1814 was determined (possibly with some assumptions). */
1815
1816static bool
1817number_of_iterations_cond (class loop *loop,
1818 tree type, affine_iv *iv0, enum tree_code code,
1819 affine_iv *iv1, class tree_niter_desc *niter,
1820 bool only_exit, bool every_iteration)
1821{
1822 bool exit_must_be_taken = false, ret;
1823 bounds bnds;
1824
1825 /* If the test is not executed every iteration, wrapping may make the test
1826 to pass again.
1827 TODO: the overflow case can be still used as unreliable estimate of upper
1828 bound. But we have no API to pass it down to number of iterations code
1829 and, at present, it will not use it anyway. */
1830 if (!every_iteration
1831 && (!iv0->no_overflow || !iv1->no_overflow
1832 || code == NE_EXPR || code == EQ_EXPR))
1833 return false;
1834
1835 /* The meaning of these assumptions is this:
1836 if !assumptions
1837 then the rest of information does not have to be valid
1838 if may_be_zero then the loop does not roll, even if
1839 niter != 0. */
1840 niter->assumptions = boolean_true_nodeglobal_trees[TI_BOOLEAN_TRUE];
1841 niter->may_be_zero = boolean_false_nodeglobal_trees[TI_BOOLEAN_FALSE];
1842 niter->niter = NULL_TREE(tree) __null;
1843 niter->max = 0;
1844 niter->bound = NULL_TREE(tree) __null;
1845 niter->cmp = ERROR_MARK;
1846
1847 /* Make < comparison from > ones, and for NE_EXPR comparisons, ensure that
1848 the control variable is on lhs. */
1849 if (code == GE_EXPR || code == GT_EXPR
1850 || (code == NE_EXPR && integer_zerop (iv0->step)))
1851 {
1852 std::swap (iv0, iv1);
1853 code = swap_tree_comparison (code);
1854 }
1855
1856 if (POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
)
1857 {
1858 /* Comparison of pointers is undefined unless both iv0 and iv1 point
1859 to the same object. If they do, the control variable cannot wrap
1860 (as wrap around the bounds of memory will never return a pointer
1861 that would be guaranteed to point to the same object, even if we
1862 avoid undefined behavior by casting to size_t and back). */
1863 iv0->no_overflow = true;
1864 iv1->no_overflow = true;
1865 }
1866
1867 /* If the control induction variable does not overflow and the only exit
1868 from the loop is the one that we analyze, we know it must be taken
1869 eventually. */
1870 if (only_exit)
1871 {
1872 if (!integer_zerop (iv0->step) && iv0->no_overflow)
1873 exit_must_be_taken = true;
1874 else if (!integer_zerop (iv1->step) && iv1->no_overflow)
1875 exit_must_be_taken = true;
1876 }
1877
1878 /* We can handle cases which neither of the sides of the comparison is
1879 invariant:
1880
1881 {iv0.base, iv0.step} cmp_code {iv1.base, iv1.step}
1882 as if:
1883 {iv0.base, iv0.step - iv1.step} cmp_code {iv1.base, 0}
1884
1885 provided that either below condition is satisfied:
1886
1887 a) the test is NE_EXPR;
1888 b) iv0 and iv1 do not overflow and iv0.step - iv1.step is of
1889 the same sign and of less or equal magnitude than iv0.step
1890
1891 This rarely occurs in practice, but it is simple enough to manage. */
1892 if (!integer_zerop (iv0->step) && !integer_zerop (iv1->step))
1893 {
1894 tree step_type = POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
? sizetypesizetype_tab[(int) stk_sizetype] : type;
1895 tree step = fold_binary_to_constant (MINUS_EXPR, step_type,
1896 iv0->step, iv1->step);
1897
1898 /* For code other than NE_EXPR we have to ensure moving the evolution
1899 of IV1 to that of IV0 does not introduce overflow. */
1900 if (TREE_CODE (step)((enum tree_code) (step)->base.code) != INTEGER_CST
1901 || !iv0->no_overflow || !iv1->no_overflow)
1902 {
1903 if (code != NE_EXPR)
1904 return false;
1905 iv0->no_overflow = false;
1906 }
1907 /* If the new step of IV0 has changed sign or is of greater
1908 magnitude then we do not know whether IV0 does overflow
1909 and thus the transform is not valid for code other than NE_EXPR. */
1910 else if (tree_int_cst_sign_bit (step) != tree_int_cst_sign_bit (iv0->step)
1911 || wi::gtu_p (wi::abs (wi::to_widest (step)),
1912 wi::abs (wi::to_widest (iv0->step))))
1913 {
1914 if (POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
&& code != NE_EXPR)
1915 /* For relational pointer compares we have further guarantees
1916 that the pointers always point to the same object (or one
1917 after it) and that objects do not cross the zero page. So
1918 not only is the transform always valid for relational
1919 pointer compares, we also know the resulting IV does not
1920 overflow. */
1921 ;
1922 else if (code != NE_EXPR)
1923 return false;
1924 else
1925 iv0->no_overflow = false;
1926 }
1927
1928 iv0->step = step;
1929 iv1->step = build_int_cst (step_type, 0);
1930 iv1->no_overflow = true;
1931 }
1932
1933 /* If the result of the comparison is a constant, the loop is weird. More
1934 precise handling would be possible, but the situation is not common enough
1935 to waste time on it. */
1936 if (integer_zerop (iv0->step) && integer_zerop (iv1->step))
1937 return false;
1938
1939 /* If the loop exits immediately, there is nothing to do. */
1940 tree tem = fold_binary (code, boolean_type_node, iv0->base, iv1->base)fold_binary_loc (((location_t) 0), code, global_trees[TI_BOOLEAN_TYPE
], iv0->base, iv1->base)
;
1941 if (tem && integer_zerop (tem))
1942 {
1943 if (!every_iteration)
1944 return false;
1945 niter->niter = build_int_cst (unsigned_type_for (type), 0);
1946 niter->max = 0;
1947 return true;
1948 }
1949
1950 /* OK, now we know we have a senseful loop. Handle several cases, depending
1951 on what comparison operator is used. */
1952 bound_difference (loop, iv1->base, iv0->base, &bnds);
1953
1954 if (dump_file && (dump_flags & TDF_DETAILS))
1955 {
1956 fprintf (dump_file,
1957 "Analyzing # of iterations of loop %d\n", loop->num);
1958
1959 fprintf (dump_file, " exit condition ");
1960 dump_affine_iv (dump_file, iv0);
1961 fprintf (dump_file, " %s ",
1962 code == NE_EXPR ? "!="
1963 : code == LT_EXPR ? "<"
1964 : "<=");
1965 dump_affine_iv (dump_file, iv1);
1966 fprintf (dump_file, "\n");
1967
1968 fprintf (dump_file, " bounds on difference of bases: ");
1969 mpz_out_str__gmpz_out_str (dump_file, 10, bnds.below);
1970 fprintf (dump_file, " ... ");
1971 mpz_out_str__gmpz_out_str (dump_file, 10, bnds.up);
1972 fprintf (dump_file, "\n");
1973 }
1974
1975 switch (code)
1976 {
1977 case NE_EXPR:
1978 gcc_assert (integer_zerop (iv1->step))((void)(!(integer_zerop (iv1->step)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1978, __FUNCTION__), 0 : 0))
;
1979 ret = number_of_iterations_ne (loop, type, iv0, iv1->base, niter,
1980 exit_must_be_taken, &bnds);
1981 break;
1982
1983 case LT_EXPR:
1984 ret = number_of_iterations_lt (loop, type, iv0, iv1, niter,
1985 exit_must_be_taken, &bnds);
1986 break;
1987
1988 case LE_EXPR:
1989 ret = number_of_iterations_le (loop, type, iv0, iv1, niter,
1990 exit_must_be_taken, &bnds);
1991 break;
1992
1993 default:
1994 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 1994, __FUNCTION__))
;
1995 }
1996
1997 mpz_clear__gmpz_clear (bnds.up);
1998 mpz_clear__gmpz_clear (bnds.below);
1999
2000 if (dump_file && (dump_flags & TDF_DETAILS))
2001 {
2002 if (ret)
2003 {
2004 fprintf (dump_file, " result:\n");
2005 if (!integer_nonzerop (niter->assumptions))
2006 {
2007 fprintf (dump_file, " under assumptions ");
2008 print_generic_expr (dump_file, niter->assumptions, TDF_SLIM);
2009 fprintf (dump_file, "\n");
2010 }
2011
2012 if (!integer_zerop (niter->may_be_zero))
2013 {
2014 fprintf (dump_file, " zero if ");
2015 print_generic_expr (dump_file, niter->may_be_zero, TDF_SLIM);
2016 fprintf (dump_file, "\n");
2017 }
2018
2019 fprintf (dump_file, " # of iterations ");
2020 print_generic_expr (dump_file, niter->niter, TDF_SLIM);
2021 fprintf (dump_file, ", bounded by ");
2022 print_decu (niter->max, dump_file);
2023 fprintf (dump_file, "\n");
2024 }
2025 else
2026 fprintf (dump_file, " failed\n\n");
2027 }
2028 return ret;
2029}
2030
2031/* Return an expression that computes the popcount of src. */
2032
2033static tree
2034build_popcount_expr (tree src)
2035{
2036 tree fn;
2037 bool use_ifn = false;
2038 int prec = TYPE_PRECISION (TREE_TYPE (src))((tree_class_check ((((contains_struct_check ((src), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2038, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2038, __FUNCTION__))->type_common.precision)
;
2039 int i_prec = TYPE_PRECISION (integer_type_node)((tree_class_check ((integer_types[itk_int]), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2039, __FUNCTION__))->type_common.precision)
;
2040 int li_prec = TYPE_PRECISION (long_integer_type_node)((tree_class_check ((integer_types[itk_long]), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2040, __FUNCTION__))->type_common.precision)
;
2041 int lli_prec = TYPE_PRECISION (long_long_integer_type_node)((tree_class_check ((integer_types[itk_long_long]), (tcc_type
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2041, __FUNCTION__))->type_common.precision)
;
2042
2043 tree utype = unsigned_type_for (TREE_TYPE (src)((contains_struct_check ((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2043, __FUNCTION__))->typed.type)
);
2044 src = fold_convert (utype, src)fold_convert_loc (((location_t) 0), utype, src);
2045
2046 if (direct_internal_fn_supported_p (IFN_POPCOUNT, utype, OPTIMIZE_FOR_BOTH))
2047 use_ifn = true;
2048 else if (prec <= i_prec)
2049 fn = builtin_decl_implicit (BUILT_IN_POPCOUNT);
2050 else if (prec == li_prec)
2051 fn = builtin_decl_implicit (BUILT_IN_POPCOUNTL);
2052 else if (prec == lli_prec || prec == 2 * lli_prec)
2053 fn = builtin_decl_implicit (BUILT_IN_POPCOUNTLL);
2054 else
2055 return NULL_TREE(tree) __null;
2056
2057 tree call;
2058 if (use_ifn)
2059 call = build_call_expr_internal_loc (UNKNOWN_LOCATION((location_t) 0), IFN_POPCOUNT,
2060 integer_type_nodeinteger_types[itk_int], 1, src);
2061 else if (prec == 2 * lli_prec)
2062 {
2063 tree src1 = fold_convert (long_long_unsigned_type_node,fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_long_long
], fold_build2_loc (((location_t) 0), RSHIFT_EXPR, ((contains_struct_check
((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2064, __FUNCTION__))->typed.type), unshare_expr (src), build_int_cst
(integer_types[itk_int], lli_prec) ))
2064 fold_build2 (RSHIFT_EXPR, TREE_TYPE (src),fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_long_long
], fold_build2_loc (((location_t) 0), RSHIFT_EXPR, ((contains_struct_check
((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2064, __FUNCTION__))->typed.type), unshare_expr (src), build_int_cst
(integer_types[itk_int], lli_prec) ))
2065 unshare_expr (src),fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_long_long
], fold_build2_loc (((location_t) 0), RSHIFT_EXPR, ((contains_struct_check
((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2064, __FUNCTION__))->typed.type), unshare_expr (src), build_int_cst
(integer_types[itk_int], lli_prec) ))
2066 build_int_cst (integer_type_node,fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_long_long
], fold_build2_loc (((location_t) 0), RSHIFT_EXPR, ((contains_struct_check
((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2064, __FUNCTION__))->typed.type), unshare_expr (src), build_int_cst
(integer_types[itk_int], lli_prec) ))
2067 lli_prec)))fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_long_long
], fold_build2_loc (((location_t) 0), RSHIFT_EXPR, ((contains_struct_check
((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2064, __FUNCTION__))->typed.type), unshare_expr (src), build_int_cst
(integer_types[itk_int], lli_prec) ))
;
2068 tree src2 = fold_convert (long_long_unsigned_type_node, src)fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_long_long
], src)
;
2069 tree call1 = build_call_expr (fn, 1, src1);
2070 tree call2 = build_call_expr (fn, 1, src2);
2071 call = fold_build2 (PLUS_EXPR, integer_type_node, call1, call2)fold_build2_loc (((location_t) 0), PLUS_EXPR, integer_types[itk_int
], call1, call2 )
;
2072 }
2073 else
2074 {
2075 if (prec < i_prec)
2076 src = fold_convert (unsigned_type_node, src)fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_int
], src)
;
2077
2078 call = build_call_expr (fn, 1, src);
2079 }
2080
2081 return call;
2082}
2083
2084/* Utility function to check if OP is defined by a stmt
2085 that is a val - 1. */
2086
2087static bool
2088ssa_defined_by_minus_one_stmt_p (tree op, tree val)
2089{
2090 gimple *stmt;
2091 return (TREE_CODE (op)((enum tree_code) (op)->base.code) == SSA_NAME
2092 && (stmt = SSA_NAME_DEF_STMT (op)(tree_check ((op), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2092, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
)
2093 && is_gimple_assign (stmt)
2094 && (gimple_assign_rhs_code (stmt) == PLUS_EXPR)
2095 && val == gimple_assign_rhs1 (stmt)
2096 && integer_minus_onep (gimple_assign_rhs2 (stmt)));
2097}
2098
2099/* See comment below for number_of_iterations_bitcount.
2100 For popcount, we have:
2101
2102 modify:
2103 _1 = iv_1 + -1
2104 iv_2 = iv_1 & _1
2105
2106 test:
2107 if (iv != 0)
2108
2109 modification count:
2110 popcount (src)
2111
2112 */
2113
2114static bool
2115number_of_iterations_popcount (loop_p loop, edge exit,
2116 enum tree_code code,
2117 class tree_niter_desc *niter)
2118{
2119 bool modify_before_test = true;
2120 HOST_WIDE_INTlong max;
2121
2122 /* Check that condition for staying inside the loop is like
2123 if (iv != 0). */
2124 gimple *cond_stmt = last_stmt (exit->src);
2125 if (!cond_stmt
2126 || gimple_code (cond_stmt) != GIMPLE_COND
2127 || code != NE_EXPR
2128 || !integer_zerop (gimple_cond_rhs (cond_stmt))
2129 || TREE_CODE (gimple_cond_lhs (cond_stmt))((enum tree_code) (gimple_cond_lhs (cond_stmt))->base.code
)
!= SSA_NAME)
2130 return false;
2131
2132 tree iv_2 = gimple_cond_lhs (cond_stmt);
2133 gimple *iv_2_stmt = SSA_NAME_DEF_STMT (iv_2)(tree_check ((iv_2), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2133, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
2134
2135 /* If the test comes before the iv modification, then these will actually be
2136 iv_1 and a phi node. */
2137 if (gimple_code (iv_2_stmt) == GIMPLE_PHI
2138 && gimple_bb (iv_2_stmt) == loop->header
2139 && gimple_phi_num_args (iv_2_stmt) == 2
2140 && (TREE_CODE (gimple_phi_arg_def (iv_2_stmt,((enum tree_code) (gimple_phi_arg_def (iv_2_stmt, loop_latch_edge
(loop)->dest_idx))->base.code)
2141 loop_latch_edge (loop)->dest_idx))((enum tree_code) (gimple_phi_arg_def (iv_2_stmt, loop_latch_edge
(loop)->dest_idx))->base.code)
2142 == SSA_NAME))
2143 {
2144 /* iv_2 is actually one of the inputs to the phi. */
2145 iv_2 = gimple_phi_arg_def (iv_2_stmt, loop_latch_edge (loop)->dest_idx);
2146 iv_2_stmt = SSA_NAME_DEF_STMT (iv_2)(tree_check ((iv_2), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2146, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
2147 modify_before_test = false;
2148 }
2149
2150 /* Make sure iv_2_stmt is an and stmt (iv_2 = _1 & iv_1). */
2151 if (!is_gimple_assign (iv_2_stmt)
2152 || gimple_assign_rhs_code (iv_2_stmt) != BIT_AND_EXPR)
2153 return false;
2154
2155 tree iv_1 = gimple_assign_rhs1 (iv_2_stmt);
2156 tree _1 = gimple_assign_rhs2 (iv_2_stmt);
2157
2158 /* Check that _1 is defined by (_1 = iv_1 + -1).
2159 Also make sure that _1 is the same in and_stmt and _1 defining stmt.
2160 Also canonicalize if _1 and _b11 are revrsed. */
2161 if (ssa_defined_by_minus_one_stmt_p (iv_1, _1))
2162 std::swap (iv_1, _1);
2163 else if (ssa_defined_by_minus_one_stmt_p (_1, iv_1))
2164 ;
2165 else
2166 return false;
2167
2168 /* Check the recurrence. */
2169 gimple *phi = SSA_NAME_DEF_STMT (iv_1)(tree_check ((iv_1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2169, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
2170 if (gimple_code (phi) != GIMPLE_PHI
2171 || (gimple_bb (phi) != loop_latch_edge (loop)->dest)
2172 || (iv_2 != gimple_phi_arg_def (phi, loop_latch_edge (loop)->dest_idx)))
2173 return false;
2174
2175 /* We found a match. */
2176 tree src = gimple_phi_arg_def (phi, loop_preheader_edge (loop)->dest_idx);
2177 int src_precision = TYPE_PRECISION (TREE_TYPE (src))((tree_class_check ((((contains_struct_check ((src), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2177, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2177, __FUNCTION__))->type_common.precision)
;
2178
2179 /* Get the corresponding popcount builtin. */
2180 tree expr = build_popcount_expr (src);
2181
2182 if (!expr)
2183 return false;
2184
2185 max = src_precision;
2186
2187 tree may_be_zero = boolean_false_nodeglobal_trees[TI_BOOLEAN_FALSE];
2188
2189 if (modify_before_test)
2190 {
2191 expr = fold_build2 (MINUS_EXPR, integer_type_node, expr,fold_build2_loc (((location_t) 0), MINUS_EXPR, integer_types[
itk_int], expr, global_trees[TI_INTEGER_ONE] )
2192 integer_one_node)fold_build2_loc (((location_t) 0), MINUS_EXPR, integer_types[
itk_int], expr, global_trees[TI_INTEGER_ONE] )
;
2193 max = max - 1;
2194 may_be_zero = fold_build2 (EQ_EXPR, boolean_type_node, src,fold_build2_loc (((location_t) 0), EQ_EXPR, global_trees[TI_BOOLEAN_TYPE
], src, build_zero_cst (((contains_struct_check ((src), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2195, __FUNCTION__))->typed.type)) )
2195 build_zero_cst (TREE_TYPE (src)))fold_build2_loc (((location_t) 0), EQ_EXPR, global_trees[TI_BOOLEAN_TYPE
], src, build_zero_cst (((contains_struct_check ((src), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2195, __FUNCTION__))->typed.type)) )
;
2196 }
2197
2198 expr = fold_convert (unsigned_type_node, expr)fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_int
], expr)
;
2199
2200 niter->assumptions = boolean_true_nodeglobal_trees[TI_BOOLEAN_TRUE];
2201 niter->may_be_zero = simplify_using_initial_conditions (loop, may_be_zero);
2202 niter->niter = simplify_using_initial_conditions(loop, expr);
2203
2204 if (TREE_CODE (niter->niter)((enum tree_code) (niter->niter)->base.code) == INTEGER_CST)
2205 niter->max = tree_to_uhwi (niter->niter);
2206 else
2207 niter->max = max;
2208
2209 niter->bound = NULL_TREE(tree) __null;
2210 niter->cmp = ERROR_MARK;
2211 return true;
2212}
2213
2214/* Return an expression that counts the leading/trailing zeroes of src.
2215
2216 If define_at_zero is true, then the built expression will be defined to
2217 return the precision of src when src == 0 (using either a conditional
2218 expression or a suitable internal function).
2219 Otherwise, we can elide the conditional expression and let src = 0 invoke
2220 undefined behaviour. */
2221
2222static tree
2223build_cltz_expr (tree src, bool leading, bool define_at_zero)
2224{
2225 tree fn;
2226 internal_fn ifn = leading ? IFN_CLZ : IFN_CTZ;
2227 bool use_ifn = false;
2228 int prec = TYPE_PRECISION (TREE_TYPE (src))((tree_class_check ((((contains_struct_check ((src), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2228, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2228, __FUNCTION__))->type_common.precision)
;
2229 int i_prec = TYPE_PRECISION (integer_type_node)((tree_class_check ((integer_types[itk_int]), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2229, __FUNCTION__))->type_common.precision)
;
2230 int li_prec = TYPE_PRECISION (long_integer_type_node)((tree_class_check ((integer_types[itk_long]), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2230, __FUNCTION__))->type_common.precision)
;
2231 int lli_prec = TYPE_PRECISION (long_long_integer_type_node)((tree_class_check ((integer_types[itk_long_long]), (tcc_type
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2231, __FUNCTION__))->type_common.precision)
;
2232
2233 tree utype = unsigned_type_for (TREE_TYPE (src)((contains_struct_check ((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2233, __FUNCTION__))->typed.type)
);
2234 src = fold_convert (utype, src)fold_convert_loc (((location_t) 0), utype, src);
2235
2236 if (direct_internal_fn_supported_p (ifn, utype, OPTIMIZE_FOR_BOTH))
2237 use_ifn = true;
2238 else if (prec <= i_prec)
2239 fn = leading ? builtin_decl_implicit (BUILT_IN_CLZ)
2240 : builtin_decl_implicit (BUILT_IN_CTZ);
2241 else if (prec == li_prec)
2242 fn = leading ? builtin_decl_implicit (BUILT_IN_CLZL)
2243 : builtin_decl_implicit (BUILT_IN_CTZL);
2244 else if (prec == lli_prec || prec == 2 * lli_prec)
2245 fn = leading ? builtin_decl_implicit (BUILT_IN_CLZLL)
2246 : builtin_decl_implicit (BUILT_IN_CTZLL);
2247 else
2248 return NULL_TREE(tree) __null;
2249
2250 tree call;
2251 if (use_ifn)
2252 {
2253 call = build_call_expr_internal_loc (UNKNOWN_LOCATION((location_t) 0), ifn,
2254 integer_type_nodeinteger_types[itk_int], 1, src);
2255 int val;
2256 int optab_defined_at_zero
2257 = (leading
2258 ? CLZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (utype), val)((val) = GET_MODE_BITSIZE ((as_a <scalar_int_mode> ((tree_class_check
((utype), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2258, __FUNCTION__))->type_common.mode))), ((global_options
.x_ix86_isa_flags & (1UL << 35)) != 0) ? 2 : 0)
2259 : CTZ_DEFINED_VALUE_AT_ZERO (SCALAR_INT_TYPE_MODE (utype), val)((val) = GET_MODE_BITSIZE ((as_a <scalar_int_mode> ((tree_class_check
((utype), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2259, __FUNCTION__))->type_common.mode))), ((global_options
.x_ix86_isa_flags & (1UL << 23)) != 0) ? 2 : 0)
);
2260 if (define_at_zero && !(optab_defined_at_zero == 2 && val == prec))
2261 {
2262 tree is_zero = fold_build2 (NE_EXPR, boolean_type_node, src,fold_build2_loc (((location_t) 0), NE_EXPR, global_trees[TI_BOOLEAN_TYPE
], src, build_zero_cst (((contains_struct_check ((src), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2263, __FUNCTION__))->typed.type)) )
2263 build_zero_cst (TREE_TYPE (src)))fold_build2_loc (((location_t) 0), NE_EXPR, global_trees[TI_BOOLEAN_TYPE
], src, build_zero_cst (((contains_struct_check ((src), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2263, __FUNCTION__))->typed.type)) )
;
2264 call = fold_build3 (COND_EXPR, integer_type_node, is_zero, call,fold_build3_loc (((location_t) 0), COND_EXPR, integer_types[itk_int
], is_zero, call, build_int_cst (integer_types[itk_int], prec
) )
2265 build_int_cst (integer_type_node, prec))fold_build3_loc (((location_t) 0), COND_EXPR, integer_types[itk_int
], is_zero, call, build_int_cst (integer_types[itk_int], prec
) )
;
2266 }
2267 }
2268 else if (prec == 2 * lli_prec)
2269 {
2270 tree src1 = fold_convert (long_long_unsigned_type_node,fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_long_long
], fold_build2_loc (((location_t) 0), RSHIFT_EXPR, ((contains_struct_check
((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2271, __FUNCTION__))->typed.type), unshare_expr (src), build_int_cst
(integer_types[itk_int], lli_prec) ))
2271 fold_build2 (RSHIFT_EXPR, TREE_TYPE (src),fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_long_long
], fold_build2_loc (((location_t) 0), RSHIFT_EXPR, ((contains_struct_check
((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2271, __FUNCTION__))->typed.type), unshare_expr (src), build_int_cst
(integer_types[itk_int], lli_prec) ))
2272 unshare_expr (src),fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_long_long
], fold_build2_loc (((location_t) 0), RSHIFT_EXPR, ((contains_struct_check
((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2271, __FUNCTION__))->typed.type), unshare_expr (src), build_int_cst
(integer_types[itk_int], lli_prec) ))
2273 build_int_cst (integer_type_node,fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_long_long
], fold_build2_loc (((location_t) 0), RSHIFT_EXPR, ((contains_struct_check
((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2271, __FUNCTION__))->typed.type), unshare_expr (src), build_int_cst
(integer_types[itk_int], lli_prec) ))
2274 lli_prec)))fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_long_long
], fold_build2_loc (((location_t) 0), RSHIFT_EXPR, ((contains_struct_check
((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2271, __FUNCTION__))->typed.type), unshare_expr (src), build_int_cst
(integer_types[itk_int], lli_prec) ))
;
2275 tree src2 = fold_convert (long_long_unsigned_type_node, src)fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_long_long
], src)
;
2276 /* We count the zeroes in src1, and add the number in src2 when src1
2277 is 0. */
2278 if (!leading)
2279 std::swap (src1, src2);
2280 tree call1 = build_call_expr (fn, 1, src1);
2281 tree call2 = build_call_expr (fn, 1, src2);
2282 if (define_at_zero)
2283 {
2284 tree is_zero2 = fold_build2 (NE_EXPR, boolean_type_node, src2,fold_build2_loc (((location_t) 0), NE_EXPR, global_trees[TI_BOOLEAN_TYPE
], src2, build_zero_cst (((contains_struct_check ((src2), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2285, __FUNCTION__))->typed.type)) )
2285 build_zero_cst (TREE_TYPE (src2)))fold_build2_loc (((location_t) 0), NE_EXPR, global_trees[TI_BOOLEAN_TYPE
], src2, build_zero_cst (((contains_struct_check ((src2), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2285, __FUNCTION__))->typed.type)) )
;
2286 call2 = fold_build3 (COND_EXPR, integer_type_node, is_zero2, call2,fold_build3_loc (((location_t) 0), COND_EXPR, integer_types[itk_int
], is_zero2, call2, build_int_cst (integer_types[itk_int], lli_prec
) )
2287 build_int_cst (integer_type_node, lli_prec))fold_build3_loc (((location_t) 0), COND_EXPR, integer_types[itk_int
], is_zero2, call2, build_int_cst (integer_types[itk_int], lli_prec
) )
;
2288 }
2289 tree is_zero1 = fold_build2 (NE_EXPR, boolean_type_node, src1,fold_build2_loc (((location_t) 0), NE_EXPR, global_trees[TI_BOOLEAN_TYPE
], src1, build_zero_cst (((contains_struct_check ((src1), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2290, __FUNCTION__))->typed.type)) )
2290 build_zero_cst (TREE_TYPE (src1)))fold_build2_loc (((location_t) 0), NE_EXPR, global_trees[TI_BOOLEAN_TYPE
], src1, build_zero_cst (((contains_struct_check ((src1), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2290, __FUNCTION__))->typed.type)) )
;
2291 call = fold_build3 (COND_EXPR, integer_type_node, is_zero1, call1,fold_build3_loc (((location_t) 0), COND_EXPR, integer_types[itk_int
], is_zero1, call1, fold_build2_loc (((location_t) 0), PLUS_EXPR
, integer_types[itk_int], call2, build_int_cst (integer_types
[itk_int], lli_prec) ) )
2292 fold_build2 (PLUS_EXPR, integer_type_node, call2,fold_build3_loc (((location_t) 0), COND_EXPR, integer_types[itk_int
], is_zero1, call1, fold_build2_loc (((location_t) 0), PLUS_EXPR
, integer_types[itk_int], call2, build_int_cst (integer_types
[itk_int], lli_prec) ) )
2293 build_int_cst (integer_type_node,fold_build3_loc (((location_t) 0), COND_EXPR, integer_types[itk_int
], is_zero1, call1, fold_build2_loc (((location_t) 0), PLUS_EXPR
, integer_types[itk_int], call2, build_int_cst (integer_types
[itk_int], lli_prec) ) )
2294 lli_prec)))fold_build3_loc (((location_t) 0), COND_EXPR, integer_types[itk_int
], is_zero1, call1, fold_build2_loc (((location_t) 0), PLUS_EXPR
, integer_types[itk_int], call2, build_int_cst (integer_types
[itk_int], lli_prec) ) )
;
2295 }
2296 else
2297 {
2298 if (prec < i_prec)
2299 src = fold_convert (unsigned_type_node, src)fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_int
], src)
;
2300
2301 call = build_call_expr (fn, 1, src);
2302 if (define_at_zero)
2303 {
2304 tree is_zero = fold_build2 (NE_EXPR, boolean_type_node, src,fold_build2_loc (((location_t) 0), NE_EXPR, global_trees[TI_BOOLEAN_TYPE
], src, build_zero_cst (((contains_struct_check ((src), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2305, __FUNCTION__))->typed.type)) )
2305 build_zero_cst (TREE_TYPE (src)))fold_build2_loc (((location_t) 0), NE_EXPR, global_trees[TI_BOOLEAN_TYPE
], src, build_zero_cst (((contains_struct_check ((src), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2305, __FUNCTION__))->typed.type)) )
;
2306 call = fold_build3 (COND_EXPR, integer_type_node, is_zero, call,fold_build3_loc (((location_t) 0), COND_EXPR, integer_types[itk_int
], is_zero, call, build_int_cst (integer_types[itk_int], prec
) )
2307 build_int_cst (integer_type_node, prec))fold_build3_loc (((location_t) 0), COND_EXPR, integer_types[itk_int
], is_zero, call, build_int_cst (integer_types[itk_int], prec
) )
;
2308 }
2309
2310 if (leading && prec < i_prec)
2311 call = fold_build2 (MINUS_EXPR, integer_type_node, call,fold_build2_loc (((location_t) 0), MINUS_EXPR, integer_types[
itk_int], call, build_int_cst (integer_types[itk_int], i_prec
- prec) )
2312 build_int_cst (integer_type_node, i_prec - prec))fold_build2_loc (((location_t) 0), MINUS_EXPR, integer_types[
itk_int], call, build_int_cst (integer_types[itk_int], i_prec
- prec) )
;
2313 }
2314
2315 return call;
2316}
2317
2318/* See comment below for number_of_iterations_bitcount.
2319 For c[lt]z, we have:
2320
2321 modify:
2322 iv_2 = iv_1 << 1 OR iv_1 >> 1
2323
2324 test:
2325 if (iv & 1 << (prec-1)) OR (iv & 1)
2326
2327 modification count:
2328 src precision - c[lt]z (src)
2329
2330 */
2331
2332static bool
2333number_of_iterations_cltz (loop_p loop, edge exit,
2334 enum tree_code code,
2335 class tree_niter_desc *niter)
2336{
2337 bool modify_before_test = true;
2338 HOST_WIDE_INTlong max;
2339 int checked_bit;
2340 tree iv_2;
2341
2342 /* Check that condition for staying inside the loop is like
2343 if (iv == 0). */
2344 gimple *cond_stmt = last_stmt (exit->src);
2345 if (!cond_stmt
2346 || gimple_code (cond_stmt) != GIMPLE_COND
2347 || (code != EQ_EXPR && code != GE_EXPR)
2348 || !integer_zerop (gimple_cond_rhs (cond_stmt))
2349 || TREE_CODE (gimple_cond_lhs (cond_stmt))((enum tree_code) (gimple_cond_lhs (cond_stmt))->base.code
)
!= SSA_NAME)
2350 return false;
2351
2352 if (code == EQ_EXPR)
2353 {
2354 /* Make sure we check a bitwise and with a suitable constant */
2355 gimple *and_stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond_stmt))(tree_check ((gimple_cond_lhs (cond_stmt)), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2355, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
2356 if (!is_gimple_assign (and_stmt)
2357 || gimple_assign_rhs_code (and_stmt) != BIT_AND_EXPR
2358 || !integer_pow2p (gimple_assign_rhs2 (and_stmt))
2359 || TREE_CODE (gimple_assign_rhs1 (and_stmt))((enum tree_code) (gimple_assign_rhs1 (and_stmt))->base.code
)
!= SSA_NAME)
2360 return false;
2361
2362 checked_bit = tree_log2 (gimple_assign_rhs2 (and_stmt));
2363
2364 iv_2 = gimple_assign_rhs1 (and_stmt);
2365 }
2366 else
2367 {
2368 /* We have a GE_EXPR - a signed comparison with zero is equivalent to
2369 testing the leading bit, so check for this pattern too. */
2370
2371 iv_2 = gimple_cond_lhs (cond_stmt);
2372 tree test_value_type = TREE_TYPE (iv_2)((contains_struct_check ((iv_2), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2372, __FUNCTION__))->typed.type)
;
2373
2374 if (TYPE_UNSIGNED (test_value_type)((tree_class_check ((test_value_type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2374, __FUNCTION__))->base.u.bits.unsigned_flag)
)
2375 return false;
2376
2377 gimple *test_value_stmt = SSA_NAME_DEF_STMT (iv_2)(tree_check ((iv_2), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2377, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
2378
2379 if (is_gimple_assign (test_value_stmt)
2380 && gimple_assign_rhs_code (test_value_stmt) == NOP_EXPR)
2381 {
2382 /* If the test value comes from a NOP_EXPR, then we need to unwrap
2383 this. We conservatively require that both types have the same
2384 precision. */
2385 iv_2 = gimple_assign_rhs1 (test_value_stmt);
2386 tree rhs_type = TREE_TYPE (iv_2)((contains_struct_check ((iv_2), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2386, __FUNCTION__))->typed.type)
;
2387 if (TREE_CODE (iv_2)((enum tree_code) (iv_2)->base.code) != SSA_NAME
2388 || TREE_CODE (rhs_type)((enum tree_code) (rhs_type)->base.code) != INTEGER_TYPE
2389 || (TYPE_PRECISION (rhs_type)((tree_class_check ((rhs_type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2389, __FUNCTION__))->type_common.precision)
2390 != TYPE_PRECISION (test_value_type)((tree_class_check ((test_value_type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2390, __FUNCTION__))->type_common.precision)
))
2391 return false;
2392 }
2393
2394 checked_bit = TYPE_PRECISION (test_value_type)((tree_class_check ((test_value_type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2394, __FUNCTION__))->type_common.precision)
- 1;
2395 }
2396
2397 gimple *iv_2_stmt = SSA_NAME_DEF_STMT (iv_2)(tree_check ((iv_2), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2397, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
2398
2399 /* If the test comes before the iv modification, then these will actually be
2400 iv_1 and a phi node. */
2401 if (gimple_code (iv_2_stmt) == GIMPLE_PHI
2402 && gimple_bb (iv_2_stmt) == loop->header
2403 && gimple_phi_num_args (iv_2_stmt) == 2
2404 && (TREE_CODE (gimple_phi_arg_def (iv_2_stmt,((enum tree_code) (gimple_phi_arg_def (iv_2_stmt, loop_latch_edge
(loop)->dest_idx))->base.code)
2405 loop_latch_edge (loop)->dest_idx))((enum tree_code) (gimple_phi_arg_def (iv_2_stmt, loop_latch_edge
(loop)->dest_idx))->base.code)
2406 == SSA_NAME))
2407 {
2408 /* iv_2 is actually one of the inputs to the phi. */
2409 iv_2 = gimple_phi_arg_def (iv_2_stmt, loop_latch_edge (loop)->dest_idx);
2410 iv_2_stmt = SSA_NAME_DEF_STMT (iv_2)(tree_check ((iv_2), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2410, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
2411 modify_before_test = false;
2412 }
2413
2414 /* Make sure iv_2_stmt is a logical shift by one stmt:
2415 iv_2 = iv_1 {<<|>>} 1 */
2416 if (!is_gimple_assign (iv_2_stmt)
2417 || (gimple_assign_rhs_code (iv_2_stmt) != LSHIFT_EXPR
2418 && (gimple_assign_rhs_code (iv_2_stmt) != RSHIFT_EXPR
2419 || !TYPE_UNSIGNED (TREE_TYPE (gimple_assign_lhs (iv_2_stmt)))((tree_class_check ((((contains_struct_check ((gimple_assign_lhs
(iv_2_stmt)), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2419, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2419, __FUNCTION__))->base.u.bits.unsigned_flag)
))
2420 || !integer_onep (gimple_assign_rhs2 (iv_2_stmt)))
2421 return false;
2422
2423 bool left_shift = (gimple_assign_rhs_code (iv_2_stmt) == LSHIFT_EXPR);
2424
2425 tree iv_1 = gimple_assign_rhs1 (iv_2_stmt);
2426
2427 /* Check the recurrence. */
2428 gimple *phi = SSA_NAME_DEF_STMT (iv_1)(tree_check ((iv_1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2428, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
2429 if (gimple_code (phi) != GIMPLE_PHI
2430 || (gimple_bb (phi) != loop_latch_edge (loop)->dest)
2431 || (iv_2 != gimple_phi_arg_def (phi, loop_latch_edge (loop)->dest_idx)))
2432 return false;
2433
2434 /* We found a match. */
2435 tree src = gimple_phi_arg_def (phi, loop_preheader_edge (loop)->dest_idx);
2436 int src_precision = TYPE_PRECISION (TREE_TYPE (src))((tree_class_check ((((contains_struct_check ((src), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2436, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2436, __FUNCTION__))->type_common.precision)
;
2437
2438 /* Apply any needed preprocessing to src. */
2439 int num_ignored_bits;
2440 if (left_shift)
2441 num_ignored_bits = src_precision - checked_bit - 1;
2442 else
2443 num_ignored_bits = checked_bit;
2444
2445 if (modify_before_test)
2446 num_ignored_bits++;
2447
2448 if (num_ignored_bits != 0)
2449 src = fold_build2 (left_shift ? LSHIFT_EXPR : RSHIFT_EXPR,fold_build2_loc (((location_t) 0), left_shift ? LSHIFT_EXPR :
RSHIFT_EXPR, ((contains_struct_check ((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2450, __FUNCTION__))->typed.type), src, build_int_cst (integer_types
[itk_int], num_ignored_bits) )
2450 TREE_TYPE (src), src,fold_build2_loc (((location_t) 0), left_shift ? LSHIFT_EXPR :
RSHIFT_EXPR, ((contains_struct_check ((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2450, __FUNCTION__))->typed.type), src, build_int_cst (integer_types
[itk_int], num_ignored_bits) )
2451 build_int_cst (integer_type_node, num_ignored_bits))fold_build2_loc (((location_t) 0), left_shift ? LSHIFT_EXPR :
RSHIFT_EXPR, ((contains_struct_check ((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2450, __FUNCTION__))->typed.type), src, build_int_cst (integer_types
[itk_int], num_ignored_bits) )
;
2452
2453 /* Get the corresponding c[lt]z builtin. */
2454 tree expr = build_cltz_expr (src, left_shift, false);
2455
2456 if (!expr)
2457 return false;
2458
2459 max = src_precision - num_ignored_bits - 1;
2460
2461 expr = fold_convert (unsigned_type_node, expr)fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_int
], expr)
;
2462
2463 tree assumptions = fold_build2 (NE_EXPR, boolean_type_node, src,fold_build2_loc (((location_t) 0), NE_EXPR, global_trees[TI_BOOLEAN_TYPE
], src, build_zero_cst (((contains_struct_check ((src), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2464, __FUNCTION__))->typed.type)) )
2464 build_zero_cst (TREE_TYPE (src)))fold_build2_loc (((location_t) 0), NE_EXPR, global_trees[TI_BOOLEAN_TYPE
], src, build_zero_cst (((contains_struct_check ((src), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2464, __FUNCTION__))->typed.type)) )
;
2465
2466 niter->assumptions = simplify_using_initial_conditions (loop, assumptions);
2467 niter->may_be_zero = boolean_false_nodeglobal_trees[TI_BOOLEAN_FALSE];
2468 niter->niter = simplify_using_initial_conditions (loop, expr);
2469
2470 if (TREE_CODE (niter->niter)((enum tree_code) (niter->niter)->base.code) == INTEGER_CST)
2471 niter->max = tree_to_uhwi (niter->niter);
2472 else
2473 niter->max = max;
2474
2475 niter->bound = NULL_TREE(tree) __null;
2476 niter->cmp = ERROR_MARK;
2477
2478 return true;
2479}
2480
2481/* See comment below for number_of_iterations_bitcount.
2482 For c[lt]z complement, we have:
2483
2484 modify:
2485 iv_2 = iv_1 >> 1 OR iv_1 << 1
2486
2487 test:
2488 if (iv != 0)
2489
2490 modification count:
2491 src precision - c[lt]z (src)
2492
2493 */
2494
2495static bool
2496number_of_iterations_cltz_complement (loop_p loop, edge exit,
2497 enum tree_code code,
2498 class tree_niter_desc *niter)
2499{
2500 bool modify_before_test = true;
2501 HOST_WIDE_INTlong max;
2502
2503 /* Check that condition for staying inside the loop is like
2504 if (iv != 0). */
2505 gimple *cond_stmt = last_stmt (exit->src);
2506 if (!cond_stmt
2507 || gimple_code (cond_stmt) != GIMPLE_COND
2508 || code != NE_EXPR
2509 || !integer_zerop (gimple_cond_rhs (cond_stmt))
2510 || TREE_CODE (gimple_cond_lhs (cond_stmt))((enum tree_code) (gimple_cond_lhs (cond_stmt))->base.code
)
!= SSA_NAME)
2511 return false;
2512
2513 tree iv_2 = gimple_cond_lhs (cond_stmt);
2514 gimple *iv_2_stmt = SSA_NAME_DEF_STMT (iv_2)(tree_check ((iv_2), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2514, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
2515
2516 /* If the test comes before the iv modification, then these will actually be
2517 iv_1 and a phi node. */
2518 if (gimple_code (iv_2_stmt) == GIMPLE_PHI
2519 && gimple_bb (iv_2_stmt) == loop->header
2520 && gimple_phi_num_args (iv_2_stmt) == 2
2521 && (TREE_CODE (gimple_phi_arg_def (iv_2_stmt,((enum tree_code) (gimple_phi_arg_def (iv_2_stmt, loop_latch_edge
(loop)->dest_idx))->base.code)
2522 loop_latch_edge (loop)->dest_idx))((enum tree_code) (gimple_phi_arg_def (iv_2_stmt, loop_latch_edge
(loop)->dest_idx))->base.code)
2523 == SSA_NAME))
2524 {
2525 /* iv_2 is actually one of the inputs to the phi. */
2526 iv_2 = gimple_phi_arg_def (iv_2_stmt, loop_latch_edge (loop)->dest_idx);
2527 iv_2_stmt = SSA_NAME_DEF_STMT (iv_2)(tree_check ((iv_2), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2527, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
2528 modify_before_test = false;
2529 }
2530
2531 /* Make sure iv_2_stmt is a logical shift by one stmt:
2532 iv_2 = iv_1 {>>|<<} 1 */
2533 if (!is_gimple_assign (iv_2_stmt)
2534 || (gimple_assign_rhs_code (iv_2_stmt) != LSHIFT_EXPR
2535 && (gimple_assign_rhs_code (iv_2_stmt) != RSHIFT_EXPR
2536 || !TYPE_UNSIGNED (TREE_TYPE (gimple_assign_lhs (iv_2_stmt)))((tree_class_check ((((contains_struct_check ((gimple_assign_lhs
(iv_2_stmt)), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2536, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2536, __FUNCTION__))->base.u.bits.unsigned_flag)
))
2537 || !integer_onep (gimple_assign_rhs2 (iv_2_stmt)))
2538 return false;
2539
2540 bool left_shift = (gimple_assign_rhs_code (iv_2_stmt) == LSHIFT_EXPR);
2541
2542 tree iv_1 = gimple_assign_rhs1 (iv_2_stmt);
2543
2544 /* Check the recurrence. */
2545 gimple *phi = SSA_NAME_DEF_STMT (iv_1)(tree_check ((iv_1), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2545, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
2546 if (gimple_code (phi) != GIMPLE_PHI
2547 || (gimple_bb (phi) != loop_latch_edge (loop)->dest)
2548 || (iv_2 != gimple_phi_arg_def (phi, loop_latch_edge (loop)->dest_idx)))
2549 return false;
2550
2551 /* We found a match. */
2552 tree src = gimple_phi_arg_def (phi, loop_preheader_edge (loop)->dest_idx);
2553 int src_precision = TYPE_PRECISION (TREE_TYPE (src))((tree_class_check ((((contains_struct_check ((src), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2553, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2553, __FUNCTION__))->type_common.precision)
;
2554
2555 /* Get the corresponding c[lt]z builtin. */
2556 tree expr = build_cltz_expr (src, !left_shift, true);
2557
2558 if (!expr)
2559 return false;
2560
2561 expr = fold_build2 (MINUS_EXPR, integer_type_node,fold_build2_loc (((location_t) 0), MINUS_EXPR, integer_types[
itk_int], build_int_cst (integer_types[itk_int], src_precision
), expr )
2562 build_int_cst (integer_type_node, src_precision),fold_build2_loc (((location_t) 0), MINUS_EXPR, integer_types[
itk_int], build_int_cst (integer_types[itk_int], src_precision
), expr )
2563 expr)fold_build2_loc (((location_t) 0), MINUS_EXPR, integer_types[
itk_int], build_int_cst (integer_types[itk_int], src_precision
), expr )
;
2564
2565 max = src_precision;
2566
2567 tree may_be_zero = boolean_false_nodeglobal_trees[TI_BOOLEAN_FALSE];
2568
2569 if (modify_before_test)
2570 {
2571 expr = fold_build2 (MINUS_EXPR, integer_type_node, expr,fold_build2_loc (((location_t) 0), MINUS_EXPR, integer_types[
itk_int], expr, global_trees[TI_INTEGER_ONE] )
2572 integer_one_node)fold_build2_loc (((location_t) 0), MINUS_EXPR, integer_types[
itk_int], expr, global_trees[TI_INTEGER_ONE] )
;
2573 max = max - 1;
2574 may_be_zero = fold_build2 (EQ_EXPR, boolean_type_node, src,fold_build2_loc (((location_t) 0), EQ_EXPR, global_trees[TI_BOOLEAN_TYPE
], src, build_zero_cst (((contains_struct_check ((src), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2575, __FUNCTION__))->typed.type)) )
2575 build_zero_cst (TREE_TYPE (src)))fold_build2_loc (((location_t) 0), EQ_EXPR, global_trees[TI_BOOLEAN_TYPE
], src, build_zero_cst (((contains_struct_check ((src), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2575, __FUNCTION__))->typed.type)) )
;
2576 }
2577
2578 expr = fold_convert (unsigned_type_node, expr)fold_convert_loc (((location_t) 0), integer_types[itk_unsigned_int
], expr)
;
2579
2580 niter->assumptions = boolean_true_nodeglobal_trees[TI_BOOLEAN_TRUE];
2581 niter->may_be_zero = simplify_using_initial_conditions (loop, may_be_zero);
2582 niter->niter = simplify_using_initial_conditions (loop, expr);
2583
2584 if (TREE_CODE (niter->niter)((enum tree_code) (niter->niter)->base.code) == INTEGER_CST)
2585 niter->max = tree_to_uhwi (niter->niter);
2586 else
2587 niter->max = max;
2588
2589 niter->bound = NULL_TREE(tree) __null;
2590 niter->cmp = ERROR_MARK;
2591 return true;
2592}
2593
2594/* See if LOOP contains a bit counting idiom. The idiom consists of two parts:
2595 1. A modification to the induction variabler;.
2596 2. A test to determine whether or not to exit the loop.
2597
2598 These can come in either order - i.e.:
2599
2600 <bb 3>
2601 iv_1 = PHI <src(2), iv_2(4)>
2602 if (test (iv_1))
2603 goto <bb 4>
2604 else
2605 goto <bb 5>
2606
2607 <bb 4>
2608 iv_2 = modify (iv_1)
2609 goto <bb 3>
2610
2611 OR
2612
2613 <bb 3>
2614 iv_1 = PHI <src(2), iv_2(4)>
2615 iv_2 = modify (iv_1)
2616
2617 <bb 4>
2618 if (test (iv_2))
2619 goto <bb 3>
2620 else
2621 goto <bb 5>
2622
2623 The second form can be generated by copying the loop header out of the loop.
2624
2625 In the first case, the number of latch executions will be equal to the
2626 number of induction variable modifications required before the test fails.
2627
2628 In the second case (modify_before_test), if we assume that the number of
2629 modifications required before the test fails is nonzero, then the number of
2630 latch executions will be one less than this number.
2631
2632 If we recognise the pattern, then we update niter accordingly, and return
2633 true. */
2634
2635static bool
2636number_of_iterations_bitcount (loop_p loop, edge exit,
2637 enum tree_code code,
2638 class tree_niter_desc *niter)
2639{
2640 return (number_of_iterations_popcount (loop, exit, code, niter)
2641 || number_of_iterations_cltz (loop, exit, code, niter)
2642 || number_of_iterations_cltz_complement (loop, exit, code, niter));
2643}
2644
2645/* Substitute NEW_TREE for OLD in EXPR and fold the result.
2646 If VALUEIZE is non-NULL then OLD and NEW_TREE are ignored and instead
2647 all SSA names are replaced with the result of calling the VALUEIZE
2648 function with the SSA name as argument. */
2649
2650tree
2651simplify_replace_tree (tree expr, tree old, tree new_tree,
2652 tree (*valueize) (tree, void*), void *context,
2653 bool do_fold)
2654{
2655 unsigned i, n;
2656 tree ret = NULL_TREE(tree) __null, e, se;
2657
2658 if (!expr)
2659 return NULL_TREE(tree) __null;
2660
2661 /* Do not bother to replace constants. */
2662 if (CONSTANT_CLASS_P (expr)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code
) (expr)->base.code))] == tcc_constant)
)
2663 return expr;
2664
2665 if (valueize)
2666 {
2667 if (TREE_CODE (expr)((enum tree_code) (expr)->base.code) == SSA_NAME)
2668 {
2669 new_tree = valueize (expr, context);
2670 if (new_tree != expr)
2671 return new_tree;
2672 }
2673 }
2674 else if (expr == old
2675 || operand_equal_p (expr, old, 0))
2676 return unshare_expr (new_tree);
2677
2678 if (!EXPR_P (expr)((tree_code_type_tmpl <0>::tree_code_type[(int) (((enum
tree_code) (expr)->base.code))]) >= tcc_reference &&
(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum
tree_code) (expr)->base.code))]) <= tcc_expression)
)
2679 return expr;
2680
2681 n = TREE_OPERAND_LENGTH (expr)tree_operand_length (expr);
2682 for (i = 0; i < n; i++)
2683 {
2684 e = TREE_OPERAND (expr, i)(*((const_cast<tree*> (tree_operand_check ((expr), (i),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2684, __FUNCTION__)))))
;
2685 se = simplify_replace_tree (e, old, new_tree, valueize, context, do_fold);
2686 if (e == se)
2687 continue;
2688
2689 if (!ret)
2690 ret = copy_node (expr);
2691
2692 TREE_OPERAND (ret, i)(*((const_cast<tree*> (tree_operand_check ((ret), (i), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2692, __FUNCTION__)))))
= se;
2693 }
2694
2695 return (ret ? (do_fold ? fold (ret) : ret) : expr);
2696}
2697
2698/* Expand definitions of ssa names in EXPR as long as they are simple
2699 enough, and return the new expression. If STOP is specified, stop
2700 expanding if EXPR equals to it. */
2701
2702static tree
2703expand_simple_operations (tree expr, tree stop, hash_map<tree, tree> &cache)
2704{
2705 unsigned i, n;
2706 tree ret = NULL_TREE(tree) __null, e, ee, e1;
2707 enum tree_code code;
2708 gimple *stmt;
2709
2710 if (expr == NULL_TREE(tree) __null)
2711 return expr;
2712
2713 if (is_gimple_min_invariant (expr))
2714 return expr;
2715
2716 code = TREE_CODE (expr)((enum tree_code) (expr)->base.code);
2717 if (IS_EXPR_CODE_CLASS (TREE_CODE_CLASS (code))((tree_code_type_tmpl <0>::tree_code_type[(int) (code)]
) >= tcc_reference && (tree_code_type_tmpl <0>
::tree_code_type[(int) (code)]) <= tcc_expression)
)
2718 {
2719 n = TREE_OPERAND_LENGTH (expr)tree_operand_length (expr);
2720 for (i = 0; i < n; i++)
2721 {
2722 e = TREE_OPERAND (expr, i)(*((const_cast<tree*> (tree_operand_check ((expr), (i),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2722, __FUNCTION__)))))
;
2723 if (!e)
2724 continue;
2725 /* SCEV analysis feeds us with a proper expression
2726 graph matching the SSA graph. Avoid turning it
2727 into a tree here, thus handle tree sharing
2728 properly.
2729 ??? The SSA walk below still turns the SSA graph
2730 into a tree but until we find a testcase do not
2731 introduce additional tree sharing here. */
2732 bool existed_p;
2733 tree &cee = cache.get_or_insert (e, &existed_p);
2734 if (existed_p)
2735 ee = cee;
2736 else
2737 {
2738 cee = e;
2739 ee = expand_simple_operations (e, stop, cache);
2740 if (ee != e)
2741 *cache.get (e) = ee;
2742 }
2743 if (e == ee)
2744 continue;
2745
2746 if (!ret)
2747 ret = copy_node (expr);
2748
2749 TREE_OPERAND (ret, i)(*((const_cast<tree*> (tree_operand_check ((ret), (i), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2749, __FUNCTION__)))))
= ee;
2750 }
2751
2752 if (!ret)
2753 return expr;
2754
2755 fold_defer_overflow_warnings ();
2756 ret = fold (ret);
2757 fold_undefer_and_ignore_overflow_warnings ();
2758 return ret;
2759 }
2760
2761 /* Stop if it's not ssa name or the one we don't want to expand. */
2762 if (TREE_CODE (expr)((enum tree_code) (expr)->base.code) != SSA_NAME || expr == stop)
2763 return expr;
2764
2765 stmt = SSA_NAME_DEF_STMT (expr)(tree_check ((expr), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2765, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
2766 if (gimple_code (stmt) == GIMPLE_PHI)
2767 {
2768 basic_block src, dest;
2769
2770 if (gimple_phi_num_args (stmt) != 1)
2771 return expr;
2772 e = PHI_ARG_DEF (stmt, 0)gimple_phi_arg_def ((stmt), (0));
2773
2774 /* Avoid propagating through loop exit phi nodes, which
2775 could break loop-closed SSA form restrictions. */
2776 dest = gimple_bb (stmt);
2777 src = single_pred (dest);
2778 if (TREE_CODE (e)((enum tree_code) (e)->base.code) == SSA_NAME
2779 && src->loop_father != dest->loop_father)
2780 return expr;
2781
2782 return expand_simple_operations (e, stop, cache);
2783 }
2784 if (gimple_code (stmt) != GIMPLE_ASSIGN)
2785 return expr;
2786
2787 /* Avoid expanding to expressions that contain SSA names that need
2788 to take part in abnormal coalescing. */
2789 ssa_op_iter iter;
2790 FOR_EACH_SSA_TREE_OPERAND (e, stmt, iter, SSA_OP_USE)for (e = op_iter_init_tree (&(iter), stmt, 0x01); !op_iter_done
(&(iter)); (void) (e = op_iter_next_tree (&(iter))))
2791 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (e)(tree_check ((e), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2791, __FUNCTION__, (SSA_NAME)))->base.asm_written_flag
)
2792 return expr;
2793
2794 e = gimple_assign_rhs1 (stmt);
2795 code = gimple_assign_rhs_code (stmt);
2796 if (get_gimple_rhs_class (code) == GIMPLE_SINGLE_RHS)
2797 {
2798 if (is_gimple_min_invariant (e))
2799 return e;
2800
2801 if (code == SSA_NAME)
2802 return expand_simple_operations (e, stop, cache);
2803 else if (code == ADDR_EXPR)
2804 {
2805 poly_int64 offset;
2806 tree base = get_addr_base_and_unit_offset (TREE_OPERAND (e, 0)(*((const_cast<tree*> (tree_operand_check ((e), (0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2806, __FUNCTION__)))))
,
2807 &offset);
2808 if (base
2809 && TREE_CODE (base)((enum tree_code) (base)->base.code) == MEM_REF)
2810 {
2811 ee = expand_simple_operations (TREE_OPERAND (base, 0)(*((const_cast<tree*> (tree_operand_check ((base), (0),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2811, __FUNCTION__)))))
, stop,
2812 cache);
2813 return fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (expr), ee,fold_build2_loc (((location_t) 0), POINTER_PLUS_EXPR, ((contains_struct_check
((expr), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2813, __FUNCTION__))->typed.type), ee, wide_int_to_tree (
sizetype_tab[(int) stk_sizetype], mem_ref_offset (base) + offset
) )
2814 wide_int_to_tree (sizetype,fold_build2_loc (((location_t) 0), POINTER_PLUS_EXPR, ((contains_struct_check
((expr), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2813, __FUNCTION__))->typed.type), ee, wide_int_to_tree (
sizetype_tab[(int) stk_sizetype], mem_ref_offset (base) + offset
) )
2815 mem_ref_offset (base)fold_build2_loc (((location_t) 0), POINTER_PLUS_EXPR, ((contains_struct_check
((expr), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2813, __FUNCTION__))->typed.type), ee, wide_int_to_tree (
sizetype_tab[(int) stk_sizetype], mem_ref_offset (base) + offset
) )
2816 + offset))fold_build2_loc (((location_t) 0), POINTER_PLUS_EXPR, ((contains_struct_check
((expr), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2813, __FUNCTION__))->typed.type), ee, wide_int_to_tree (
sizetype_tab[(int) stk_sizetype], mem_ref_offset (base) + offset
) )
;
2817 }
2818 }
2819
2820 return expr;
2821 }
2822
2823 switch (code)
2824 {
2825 CASE_CONVERTcase NOP_EXPR: case CONVERT_EXPR:
2826 /* Casts are simple. */
2827 ee = expand_simple_operations (e, stop, cache);
2828 return fold_build1 (code, TREE_TYPE (expr), ee)fold_build1_loc (((location_t) 0), code, ((contains_struct_check
((expr), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2828, __FUNCTION__))->typed.type), ee )
;
2829
2830 case PLUS_EXPR:
2831 case MINUS_EXPR:
2832 case MULT_EXPR:
2833 if (ANY_INTEGRAL_TYPE_P (TREE_TYPE (expr))((((enum tree_code) (((contains_struct_check ((expr), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2833, __FUNCTION__))->typed.type))->base.code) == ENUMERAL_TYPE
|| ((enum tree_code) (((contains_struct_check ((expr), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2833, __FUNCTION__))->typed.type))->base.code) == BOOLEAN_TYPE
|| ((enum tree_code) (((contains_struct_check ((expr), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2833, __FUNCTION__))->typed.type))->base.code) == INTEGER_TYPE
) || ((((enum tree_code) (((contains_struct_check ((expr), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2833, __FUNCTION__))->typed.type))->base.code) == COMPLEX_TYPE
|| (((enum tree_code) (((contains_struct_check ((expr), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2833, __FUNCTION__))->typed.type))->base.code) == VECTOR_TYPE
)) && (((enum tree_code) (((contains_struct_check (((
(contains_struct_check ((expr), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2833, __FUNCTION__))->typed.type)), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2833, __FUNCTION__))->typed.type))->base.code) == ENUMERAL_TYPE
|| ((enum tree_code) (((contains_struct_check ((((contains_struct_check
((expr), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2833, __FUNCTION__))->typed.type)), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2833, __FUNCTION__))->typed.type))->base.code) == BOOLEAN_TYPE
|| ((enum tree_code) (((contains_struct_check ((((contains_struct_check
((expr), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2833, __FUNCTION__))->typed.type)), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2833, __FUNCTION__))->typed.type))->base.code) == INTEGER_TYPE
)))
2834 && TYPE_OVERFLOW_TRAPS (TREE_TYPE (expr))(!(any_integral_type_check ((((contains_struct_check ((expr),
(TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2834, __FUNCTION__))->typed.type)), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2834, __FUNCTION__))->base.u.bits.unsigned_flag &&
global_options.x_flag_trapv)
)
2835 return expr;
2836 /* Fallthru. */
2837 case POINTER_PLUS_EXPR:
2838 /* And increments and decrements by a constant are simple. */
2839 e1 = gimple_assign_rhs2 (stmt);
2840 if (!is_gimple_min_invariant (e1))
2841 return expr;
2842
2843 ee = expand_simple_operations (e, stop, cache);
2844 return fold_build2 (code, TREE_TYPE (expr), ee, e1)fold_build2_loc (((location_t) 0), code, ((contains_struct_check
((expr), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2844, __FUNCTION__))->typed.type), ee, e1 )
;
2845
2846 default:
2847 return expr;
2848 }
2849}
2850
2851tree
2852expand_simple_operations (tree expr, tree stop)
2853{
2854 hash_map<tree, tree> cache;
2855 return expand_simple_operations (expr, stop, cache);
2856}
2857
2858/* Tries to simplify EXPR using the condition COND. Returns the simplified
2859 expression (or EXPR unchanged, if no simplification was possible). */
2860
2861static tree
2862tree_simplify_using_condition_1 (tree cond, tree expr)
2863{
2864 bool changed;
2865 tree e, e0, e1, e2, notcond;
2866 enum tree_code code = TREE_CODE (expr)((enum tree_code) (expr)->base.code);
2867
2868 if (code == INTEGER_CST)
2869 return expr;
2870
2871 if (code == TRUTH_OR_EXPR
2872 || code == TRUTH_AND_EXPR
2873 || code == COND_EXPR)
2874 {
2875 changed = false;
2876
2877 e0 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 0)(*((const_cast<tree*> (tree_operand_check ((expr), (0),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2877, __FUNCTION__)))))
);
2878 if (TREE_OPERAND (expr, 0)(*((const_cast<tree*> (tree_operand_check ((expr), (0),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2878, __FUNCTION__)))))
!= e0)
2879 changed = true;
2880
2881 e1 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 1)(*((const_cast<tree*> (tree_operand_check ((expr), (1),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2881, __FUNCTION__)))))
);
2882 if (TREE_OPERAND (expr, 1)(*((const_cast<tree*> (tree_operand_check ((expr), (1),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2882, __FUNCTION__)))))
!= e1)
2883 changed = true;
2884
2885 if (code == COND_EXPR)
2886 {
2887 e2 = tree_simplify_using_condition_1 (cond, TREE_OPERAND (expr, 2)(*((const_cast<tree*> (tree_operand_check ((expr), (2),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2887, __FUNCTION__)))))
);
2888 if (TREE_OPERAND (expr, 2)(*((const_cast<tree*> (tree_operand_check ((expr), (2),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2888, __FUNCTION__)))))
!= e2)
2889 changed = true;
2890 }
2891 else
2892 e2 = NULL_TREE(tree) __null;
2893
2894 if (changed)
2895 {
2896 if (code == COND_EXPR)
2897 expr = fold_build3 (code, boolean_type_node, e0, e1, e2)fold_build3_loc (((location_t) 0), code, global_trees[TI_BOOLEAN_TYPE
], e0, e1, e2 )
;
2898 else
2899 expr = fold_build2 (code, boolean_type_node, e0, e1)fold_build2_loc (((location_t) 0), code, global_trees[TI_BOOLEAN_TYPE
], e0, e1 )
;
2900 }
2901
2902 return expr;
2903 }
2904
2905 /* In case COND is equality, we may be able to simplify EXPR by copy/constant
2906 propagation, and vice versa. Fold does not handle this, since it is
2907 considered too expensive. */
2908 if (TREE_CODE (cond)((enum tree_code) (cond)->base.code) == EQ_EXPR)
2909 {
2910 e0 = TREE_OPERAND (cond, 0)(*((const_cast<tree*> (tree_operand_check ((cond), (0),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2910, __FUNCTION__)))))
;
2911 e1 = TREE_OPERAND (cond, 1)(*((const_cast<tree*> (tree_operand_check ((cond), (1),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2911, __FUNCTION__)))))
;
2912
2913 /* We know that e0 == e1. Check whether we cannot simplify expr
2914 using this fact. */
2915 e = simplify_replace_tree (expr, e0, e1);
2916 if (integer_zerop (e) || integer_nonzerop (e))
2917 return e;
2918
2919 e = simplify_replace_tree (expr, e1, e0);
2920 if (integer_zerop (e) || integer_nonzerop (e))
2921 return e;
2922 }
2923 if (TREE_CODE (expr)((enum tree_code) (expr)->base.code) == EQ_EXPR)
2924 {
2925 e0 = TREE_OPERAND (expr, 0)(*((const_cast<tree*> (tree_operand_check ((expr), (0),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2925, __FUNCTION__)))))
;
2926 e1 = TREE_OPERAND (expr, 1)(*((const_cast<tree*> (tree_operand_check ((expr), (1),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2926, __FUNCTION__)))))
;
2927
2928 /* If e0 == e1 (EXPR) implies !COND, then EXPR cannot be true. */
2929 e = simplify_replace_tree (cond, e0, e1);
2930 if (integer_zerop (e))
2931 return e;
2932 e = simplify_replace_tree (cond, e1, e0);
2933 if (integer_zerop (e))
2934 return e;
2935 }
2936 if (TREE_CODE (expr)((enum tree_code) (expr)->base.code) == NE_EXPR)
2937 {
2938 e0 = TREE_OPERAND (expr, 0)(*((const_cast<tree*> (tree_operand_check ((expr), (0),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2938, __FUNCTION__)))))
;
2939 e1 = TREE_OPERAND (expr, 1)(*((const_cast<tree*> (tree_operand_check ((expr), (1),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 2939, __FUNCTION__)))))
;
2940
2941 /* If e0 == e1 (!EXPR) implies !COND, then EXPR must be true. */
2942 e = simplify_replace_tree (cond, e0, e1);
2943 if (integer_zerop (e))
2944 return boolean_true_nodeglobal_trees[TI_BOOLEAN_TRUE];
2945 e = simplify_replace_tree (cond, e1, e0);
2946 if (integer_zerop (e))
2947 return boolean_true_nodeglobal_trees[TI_BOOLEAN_TRUE];
2948 }
2949
2950 /* Check whether COND ==> EXPR. */
2951 notcond = invert_truthvalue (cond)invert_truthvalue_loc (((location_t) 0), cond);
2952 e = fold_binary (TRUTH_OR_EXPR, boolean_type_node, notcond, expr)fold_binary_loc (((location_t) 0), TRUTH_OR_EXPR, global_trees
[TI_BOOLEAN_TYPE], notcond, expr)
;
2953 if (e && integer_nonzerop (e))
2954 return e;
2955
2956 /* Check whether COND ==> not EXPR. */
2957 e = fold_binary (TRUTH_AND_EXPR, boolean_type_node, cond, expr)fold_binary_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], cond, expr)
;
2958 if (e && integer_zerop (e))
2959 return e;
2960
2961 return expr;
2962}
2963
2964/* Tries to simplify EXPR using the condition COND. Returns the simplified
2965 expression (or EXPR unchanged, if no simplification was possible).
2966 Wrapper around tree_simplify_using_condition_1 that ensures that chains
2967 of simple operations in definitions of ssa names in COND are expanded,
2968 so that things like casts or incrementing the value of the bound before
2969 the loop do not cause us to fail. */
2970
2971static tree
2972tree_simplify_using_condition (tree cond, tree expr)
2973{
2974 cond = expand_simple_operations (cond);
2975
2976 return tree_simplify_using_condition_1 (cond, expr);
2977}
2978
2979/* Tries to simplify EXPR using the conditions on entry to LOOP.
2980 Returns the simplified expression (or EXPR unchanged, if no
2981 simplification was possible). */
2982
2983tree
2984simplify_using_initial_conditions (class loop *loop, tree expr)
2985{
2986 edge e;
2987 basic_block bb;
2988 gimple *stmt;
2989 tree cond, expanded, backup;
2990 int cnt = 0;
2991
2992 if (TREE_CODE (expr)((enum tree_code) (expr)->base.code) == INTEGER_CST)
2993 return expr;
2994
2995 backup = expanded = expand_simple_operations (expr);
2996
2997 /* Limit walking the dominators to avoid quadraticness in
2998 the number of BBs times the number of loops in degenerate
2999 cases. */
3000 for (bb = loop->header;
3001 bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) && cnt < MAX_DOMINATORS_TO_WALK8;
3002 bb = get_immediate_dominator (CDI_DOMINATORS, bb))
3003 {
3004 if (!single_pred_p (bb))
3005 continue;
3006 e = single_pred_edge (bb);
3007
3008 if (!(e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
3009 continue;
3010
3011 stmt = last_stmt (e->src);
3012 cond = fold_build2 (gimple_cond_code (stmt),fold_build2_loc (((location_t) 0), gimple_cond_code (stmt), global_trees
[TI_BOOLEAN_TYPE], gimple_cond_lhs (stmt), gimple_cond_rhs (stmt
) )
3013 boolean_type_node,fold_build2_loc (((location_t) 0), gimple_cond_code (stmt), global_trees
[TI_BOOLEAN_TYPE], gimple_cond_lhs (stmt), gimple_cond_rhs (stmt
) )
3014 gimple_cond_lhs (stmt),fold_build2_loc (((location_t) 0), gimple_cond_code (stmt), global_trees
[TI_BOOLEAN_TYPE], gimple_cond_lhs (stmt), gimple_cond_rhs (stmt
) )
3015 gimple_cond_rhs (stmt))fold_build2_loc (((location_t) 0), gimple_cond_code (stmt), global_trees
[TI_BOOLEAN_TYPE], gimple_cond_lhs (stmt), gimple_cond_rhs (stmt
) )
;
3016 if (e->flags & EDGE_FALSE_VALUE)
3017 cond = invert_truthvalue (cond)invert_truthvalue_loc (((location_t) 0), cond);
3018 expanded = tree_simplify_using_condition (cond, expanded);
3019 /* Break if EXPR is simplified to const values. */
3020 if (expanded
3021 && (integer_zerop (expanded) || integer_nonzerop (expanded)))
3022 return expanded;
3023
3024 ++cnt;
3025 }
3026
3027 /* Return the original expression if no simplification is done. */
3028 return operand_equal_p (backup, expanded, 0) ? expr : expanded;
3029}
3030
3031/* Tries to simplify EXPR using the evolutions of the loop invariants
3032 in the superloops of LOOP. Returns the simplified expression
3033 (or EXPR unchanged, if no simplification was possible). */
3034
3035static tree
3036simplify_using_outer_evolutions (class loop *loop, tree expr)
3037{
3038 enum tree_code code = TREE_CODE (expr)((enum tree_code) (expr)->base.code);
3039 bool changed;
3040 tree e, e0, e1, e2;
3041
3042 if (is_gimple_min_invariant (expr))
3043 return expr;
3044
3045 if (code == TRUTH_OR_EXPR
3046 || code == TRUTH_AND_EXPR
3047 || code == COND_EXPR)
3048 {
3049 changed = false;
3050
3051 e0 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 0)(*((const_cast<tree*> (tree_operand_check ((expr), (0),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3051, __FUNCTION__)))))
);
3052 if (TREE_OPERAND (expr, 0)(*((const_cast<tree*> (tree_operand_check ((expr), (0),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3052, __FUNCTION__)))))
!= e0)
3053 changed = true;
3054
3055 e1 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 1)(*((const_cast<tree*> (tree_operand_check ((expr), (1),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3055, __FUNCTION__)))))
);
3056 if (TREE_OPERAND (expr, 1)(*((const_cast<tree*> (tree_operand_check ((expr), (1),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3056, __FUNCTION__)))))
!= e1)
3057 changed = true;
3058
3059 if (code == COND_EXPR)
3060 {
3061 e2 = simplify_using_outer_evolutions (loop, TREE_OPERAND (expr, 2)(*((const_cast<tree*> (tree_operand_check ((expr), (2),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3061, __FUNCTION__)))))
);
3062 if (TREE_OPERAND (expr, 2)(*((const_cast<tree*> (tree_operand_check ((expr), (2),
"/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3062, __FUNCTION__)))))
!= e2)
3063 changed = true;
3064 }
3065 else
3066 e2 = NULL_TREE(tree) __null;
3067
3068 if (changed)
3069 {
3070 if (code == COND_EXPR)
3071 expr = fold_build3 (code, boolean_type_node, e0, e1, e2)fold_build3_loc (((location_t) 0), code, global_trees[TI_BOOLEAN_TYPE
], e0, e1, e2 )
;
3072 else
3073 expr = fold_build2 (code, boolean_type_node, e0, e1)fold_build2_loc (((location_t) 0), code, global_trees[TI_BOOLEAN_TYPE
], e0, e1 )
;
3074 }
3075
3076 return expr;
3077 }
3078
3079 e = instantiate_parameters (loop, expr);
3080 if (is_gimple_min_invariant (e))
3081 return e;
3082
3083 return expr;
3084}
3085
3086/* Returns true if EXIT is the only possible exit from LOOP. */
3087
3088bool
3089loop_only_exit_p (const class loop *loop, basic_block *body, const_edge exit)
3090{
3091 gimple_stmt_iterator bsi;
3092 unsigned i;
3093
3094 if (exit != single_exit (loop))
3095 return false;
3096
3097 for (i = 0; i < loop->num_nodes; i++)
3098 for (bsi = gsi_start_bb (body[i]); !gsi_end_p (bsi); gsi_next (&bsi))
3099 if (stmt_can_terminate_bb_p (gsi_stmt (bsi)))
3100 return false;
3101
3102 return true;
3103}
3104
3105/* Stores description of number of iterations of LOOP derived from
3106 EXIT (an exit edge of the LOOP) in NITER. Returns true if some useful
3107 information could be derived (and fields of NITER have meaning described
3108 in comments at class tree_niter_desc declaration), false otherwise.
3109 When EVERY_ITERATION is true, only tests that are known to be executed
3110 every iteration are considered (i.e. only test that alone bounds the loop).
3111 If AT_STMT is not NULL, this function stores LOOP's condition statement in
3112 it when returning true. */
3113
3114bool
3115number_of_iterations_exit_assumptions (class loop *loop, edge exit,
3116 class tree_niter_desc *niter,
3117 gcond **at_stmt, bool every_iteration,
3118 basic_block *body)
3119{
3120 gimple *last;
3121 gcond *stmt;
3122 tree type;
3123 tree op0, op1;
3124 enum tree_code code;
3125 affine_iv iv0, iv1;
3126 bool safe;
3127
3128 /* The condition at a fake exit (if it exists) does not control its
3129 execution. */
3130 if (exit->flags & EDGE_FAKE)
3131 return false;
3132
3133 /* Nothing to analyze if the loop is known to be infinite. */
3134 if (loop_constraint_set_p (loop, LOOP_C_INFINITE(1 << 0)))
3135 return false;
3136
3137 safe = dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src);
3138
3139 if (every_iteration && !safe)
3140 return false;
3141
3142 niter->assumptions = boolean_false_nodeglobal_trees[TI_BOOLEAN_FALSE];
3143 niter->control.base = NULL_TREE(tree) __null;
3144 niter->control.step = NULL_TREE(tree) __null;
3145 niter->control.no_overflow = false;
3146 last = last_stmt (exit->src);
3147 if (!last)
3148 return false;
3149 stmt = dyn_cast <gcond *> (last);
3150 if (!stmt)
3151 return false;
3152
3153 if (at_stmt)
3154 *at_stmt = stmt;
3155
3156 /* We want the condition for staying inside loop. */
3157 code = gimple_cond_code (stmt);
3158 if (exit->flags & EDGE_TRUE_VALUE)
3159 code = invert_tree_comparison (code, false);
3160
3161 switch (code)
3162 {
3163 case GT_EXPR:
3164 case GE_EXPR:
3165 case LT_EXPR:
3166 case LE_EXPR:
3167 case NE_EXPR:
3168 break;
3169
3170 case EQ_EXPR:
3171 return number_of_iterations_cltz (loop, exit, code, niter);
3172
3173 default:
3174 return false;
3175 }
3176
3177 op0 = gimple_cond_lhs (stmt);
3178 op1 = gimple_cond_rhs (stmt);
3179 type = TREE_TYPE (op0)((contains_struct_check ((op0), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3179, __FUNCTION__))->typed.type)
;
3180
3181 if (TREE_CODE (type)((enum tree_code) (type)->base.code) != INTEGER_TYPE
3182 && !POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
)
3183 return false;
3184
3185 tree iv0_niters = NULL_TREE(tree) __null;
3186 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
3187 op0, &iv0, safe ? &iv0_niters : NULL__null, false))
3188 return number_of_iterations_bitcount (loop, exit, code, niter);
3189 tree iv1_niters = NULL_TREE(tree) __null;
3190 if (!simple_iv_with_niters (loop, loop_containing_stmt (stmt),
3191 op1, &iv1, safe ? &iv1_niters : NULL__null, false))
3192 return false;
3193 /* Give up on complicated case. */
3194 if (iv0_niters && iv1_niters)
3195 return false;
3196
3197 /* We don't want to see undefined signed overflow warnings while
3198 computing the number of iterations. */
3199 fold_defer_overflow_warnings ();
3200
3201 iv0.base = expand_simple_operations (iv0.base);
3202 iv1.base = expand_simple_operations (iv1.base);
3203 bool body_from_caller = true;
3204 if (!body)
3205 {
3206 body = get_loop_body (loop);
3207 body_from_caller = false;
3208 }
3209 bool only_exit_p = loop_only_exit_p (loop, body, exit);
3210 if (!body_from_caller)
3211 free (body);
3212 if (!number_of_iterations_cond (loop, type, &iv0, code, &iv1, niter,
3213 only_exit_p, safe))
3214 {
3215 fold_undefer_and_ignore_overflow_warnings ();
3216 return false;
3217 }
3218
3219 /* Incorporate additional assumption implied by control iv. */
3220 tree iv_niters = iv0_niters ? iv0_niters : iv1_niters;
3221 if (iv_niters)
3222 {
3223 tree assumption = fold_build2 (LE_EXPR, boolean_type_node, niter->niter,fold_build2_loc (((location_t) 0), LE_EXPR, global_trees[TI_BOOLEAN_TYPE
], niter->niter, fold_convert_loc (((location_t) 0), ((contains_struct_check
((niter->niter), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3224, __FUNCTION__))->typed.type), iv_niters) )
3224 fold_convert (TREE_TYPE (niter->niter),fold_build2_loc (((location_t) 0), LE_EXPR, global_trees[TI_BOOLEAN_TYPE
], niter->niter, fold_convert_loc (((location_t) 0), ((contains_struct_check
((niter->niter), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3224, __FUNCTION__))->typed.type), iv_niters) )
3225 iv_niters))fold_build2_loc (((location_t) 0), LE_EXPR, global_trees[TI_BOOLEAN_TYPE
], niter->niter, fold_convert_loc (((location_t) 0), ((contains_struct_check
((niter->niter), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3224, __FUNCTION__))->typed.type), iv_niters) )
;
3226
3227 if (!integer_nonzerop (assumption))
3228 niter->assumptions = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,fold_build2_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->assumptions, assumption )
3229 niter->assumptions, assumption)fold_build2_loc (((location_t) 0), TRUTH_AND_EXPR, global_trees
[TI_BOOLEAN_TYPE], niter->assumptions, assumption )
;
3230
3231 /* Refine upper bound if possible. */
3232 if (TREE_CODE (iv_niters)((enum tree_code) (iv_niters)->base.code) == INTEGER_CST
3233 && niter->max > wi::to_widest (iv_niters))
3234 niter->max = wi::to_widest (iv_niters);
3235 }
3236
3237 /* There is no assumptions if the loop is known to be finite. */
3238 if (!integer_zerop (niter->assumptions)
3239 && loop_constraint_set_p (loop, LOOP_C_FINITE(1 << 1)))
3240 niter->assumptions = boolean_true_nodeglobal_trees[TI_BOOLEAN_TRUE];
3241
3242 if (optimizeglobal_options.x_optimize >= 3)
3243 {
3244 niter->assumptions = simplify_using_outer_evolutions (loop,
3245 niter->assumptions);
3246 niter->may_be_zero = simplify_using_outer_evolutions (loop,
3247 niter->may_be_zero);
3248 niter->niter = simplify_using_outer_evolutions (loop, niter->niter);
3249 }
3250
3251 niter->assumptions
3252 = simplify_using_initial_conditions (loop,
3253 niter->assumptions);
3254 niter->may_be_zero
3255 = simplify_using_initial_conditions (loop,
3256 niter->may_be_zero);
3257
3258 fold_undefer_and_ignore_overflow_warnings ();
3259
3260 /* If NITER has simplified into a constant, update MAX. */
3261 if (TREE_CODE (niter->niter)((enum tree_code) (niter->niter)->base.code) == INTEGER_CST)
3262 niter->max = wi::to_widest (niter->niter);
3263
3264 return (!integer_zerop (niter->assumptions));
3265}
3266
3267/* Like number_of_iterations_exit_assumptions, but return TRUE only if
3268 the niter information holds unconditionally. */
3269
3270bool
3271number_of_iterations_exit (class loop *loop, edge exit,
3272 class tree_niter_desc *niter,
3273 bool warn, bool every_iteration,
3274 basic_block *body)
3275{
3276 gcond *stmt;
3277 if (!number_of_iterations_exit_assumptions (loop, exit, niter,
3278 &stmt, every_iteration, body))
3279 return false;
3280
3281 if (integer_nonzerop (niter->assumptions))
3282 return true;
3283
3284 if (warn && dump_enabled_p ())
3285 dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmt,
3286 "missed loop optimization: niters analysis ends up "
3287 "with assumptions.\n");
3288
3289 return false;
3290}
3291
3292/* Try to determine the number of iterations of LOOP. If we succeed,
3293 expression giving number of iterations is returned and *EXIT is
3294 set to the edge from that the information is obtained. Otherwise
3295 chrec_dont_know is returned. */
3296
3297tree
3298find_loop_niter (class loop *loop, edge *exit)
3299{
3300 unsigned i;
3301 auto_vec<edge> exits = get_loop_exit_edges (loop);
3302 edge ex;
3303 tree niter = NULL_TREE(tree) __null, aniter;
3304 class tree_niter_desc desc;
3305
3306 *exit = NULL__null;
3307 FOR_EACH_VEC_ELT (exits, i, ex)for (i = 0; (exits).iterate ((i), &(ex)); ++(i))
3308 {
3309 if (!number_of_iterations_exit (loop, ex, &desc, false))
3310 continue;
3311
3312 if (integer_nonzerop (desc.may_be_zero))
3313 {
3314 /* We exit in the first iteration through this exit.
3315 We won't find anything better. */
3316 niter = build_int_cst (unsigned_type_nodeinteger_types[itk_unsigned_int], 0);
3317 *exit = ex;
3318 break;
3319 }
3320
3321 if (!integer_zerop (desc.may_be_zero))
3322 continue;
3323
3324 aniter = desc.niter;
3325
3326 if (!niter)
3327 {
3328 /* Nothing recorded yet. */
3329 niter = aniter;
3330 *exit = ex;
3331 continue;
3332 }
3333
3334 /* Prefer constants, the lower the better. */
3335 if (TREE_CODE (aniter)((enum tree_code) (aniter)->base.code) != INTEGER_CST)
3336 continue;
3337
3338 if (TREE_CODE (niter)((enum tree_code) (niter)->base.code) != INTEGER_CST)
3339 {
3340 niter = aniter;
3341 *exit = ex;
3342 continue;
3343 }
3344
3345 if (tree_int_cst_lt (aniter, niter))
3346 {
3347 niter = aniter;
3348 *exit = ex;
3349 continue;
3350 }
3351 }
3352
3353 return niter ? niter : chrec_dont_knowglobal_trees[TI_CHREC_DONT_KNOW];
3354}
3355
3356/* Return true if loop is known to have bounded number of iterations. */
3357
3358bool
3359finite_loop_p (class loop *loop)
3360{
3361 widest_int nit;
3362 int flags;
3363
3364 flags = flags_from_decl_or_type (current_function_decl);
3365 if ((flags & (ECF_CONST(1 << 0)|ECF_PURE(1 << 1))) && !(flags & ECF_LOOPING_CONST_OR_PURE(1 << 2)))
3366 {
3367 if (dump_file && (dump_flags & TDF_DETAILS))
3368 fprintf (dump_file, "Found loop %i to be finite: it is within pure or const function.\n",
3369 loop->num);
3370 return true;
3371 }
3372
3373 if (loop->any_upper_bound
3374 || max_loop_iterations (loop, &nit))
3375 {
3376 if (dump_file && (dump_flags & TDF_DETAILS))
3377 fprintf (dump_file, "Found loop %i to be finite: upper bound found.\n",
3378 loop->num);
3379 return true;
3380 }
3381
3382 if (loop->finite_p)
3383 {
3384 unsigned i;
3385 auto_vec<edge> exits = get_loop_exit_edges (loop);
3386 edge ex;
3387
3388 /* If the loop has a normal exit, we can assume it will terminate. */
3389 FOR_EACH_VEC_ELT (exits, i, ex)for (i = 0; (exits).iterate ((i), &(ex)); ++(i))
3390 if (!(ex->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_FAKE)))
3391 {
3392 if (dump_file)
3393 fprintf (dump_file, "Assume loop %i to be finite: it has an exit "
3394 "and -ffinite-loops is on.\n", loop->num);
3395 return true;
3396 }
3397 }
3398
3399 return false;
3400}
3401
3402/*
3403
3404 Analysis of a number of iterations of a loop by a brute-force evaluation.
3405
3406*/
3407
3408/* Bound on the number of iterations we try to evaluate. */
3409
3410#define MAX_ITERATIONS_TO_TRACK((unsigned) global_options.x_param_max_iterations_to_track) \
3411 ((unsigned) param_max_iterations_to_trackglobal_options.x_param_max_iterations_to_track)
3412
3413/* Returns the loop phi node of LOOP such that ssa name X is derived from its
3414 result by a chain of operations such that all but exactly one of their
3415 operands are constants. */
3416
3417static gphi *
3418chain_of_csts_start (class loop *loop, tree x)
3419{
3420 gimple *stmt = SSA_NAME_DEF_STMT (x)(tree_check ((x), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3420, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
3421 tree use;
3422 basic_block bb = gimple_bb (stmt);
3423 enum tree_code code;
3424
3425 if (!bb
3426 || !flow_bb_inside_loop_p (loop, bb))
3427 return NULL__null;
3428
3429 if (gimple_code (stmt) == GIMPLE_PHI)
3430 {
3431 if (bb == loop->header)
3432 return as_a <gphi *> (stmt);
3433
3434 return NULL__null;
3435 }
3436
3437 if (gimple_code (stmt) != GIMPLE_ASSIGN
3438 || gimple_assign_rhs_class (stmt) == GIMPLE_TERNARY_RHS)
3439 return NULL__null;
3440
3441 code = gimple_assign_rhs_code (stmt);
3442 if (gimple_references_memory_p (stmt)
3443 || TREE_CODE_CLASS (code)tree_code_type_tmpl <0>::tree_code_type[(int) (code)] == tcc_reference
3444 || (code == ADDR_EXPR
3445 && !is_gimple_min_invariant (gimple_assign_rhs1 (stmt))))
3446 return NULL__null;
3447
3448 use = SINGLE_SSA_TREE_OPERAND (stmt, SSA_OP_USE)single_ssa_tree_operand (stmt, 0x01);
3449 if (use == NULL_TREE(tree) __null)
3450 return NULL__null;
3451
3452 return chain_of_csts_start (loop, use);
3453}
3454
3455/* Determines whether the expression X is derived from a result of a phi node
3456 in header of LOOP such that
3457
3458 * the derivation of X consists only from operations with constants
3459 * the initial value of the phi node is constant
3460 * the value of the phi node in the next iteration can be derived from the
3461 value in the current iteration by a chain of operations with constants,
3462 or is also a constant
3463
3464 If such phi node exists, it is returned, otherwise NULL is returned. */
3465
3466static gphi *
3467get_base_for (class loop *loop, tree x)
3468{
3469 gphi *phi;
3470 tree init, next;
3471
3472 if (is_gimple_min_invariant (x))
3473 return NULL__null;
3474
3475 phi = chain_of_csts_start (loop, x);
3476 if (!phi)
3477 return NULL__null;
3478
3479 init = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop))gimple_phi_arg_def (((phi)), ((loop_preheader_edge (loop))->
dest_idx))
;
3480 next = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop))gimple_phi_arg_def (((phi)), ((loop_latch_edge (loop))->dest_idx
))
;
3481
3482 if (!is_gimple_min_invariant (init))
3483 return NULL__null;
3484
3485 if (TREE_CODE (next)((enum tree_code) (next)->base.code) == SSA_NAME
3486 && chain_of_csts_start (loop, next) != phi)
3487 return NULL__null;
3488
3489 return phi;
3490}
3491
3492/* Given an expression X, then
3493
3494 * if X is NULL_TREE, we return the constant BASE.
3495 * if X is a constant, we return the constant X.
3496 * otherwise X is a SSA name, whose value in the considered loop is derived
3497 by a chain of operations with constant from a result of a phi node in
3498 the header of the loop. Then we return value of X when the value of the
3499 result of this phi node is given by the constant BASE. */
3500
3501static tree
3502get_val_for (tree x, tree base)
3503{
3504 gimple *stmt;
3505
3506 gcc_checking_assert (is_gimple_min_invariant (base))((void)(!(is_gimple_min_invariant (base)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3506, __FUNCTION__), 0 : 0))
;
3507
3508 if (!x)
3509 return base;
3510 else if (is_gimple_min_invariant (x))
3511 return x;
3512
3513 stmt = SSA_NAME_DEF_STMT (x)(tree_check ((x), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3513, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
3514 if (gimple_code (stmt) == GIMPLE_PHI)
3515 return base;
3516
3517 gcc_checking_assert (is_gimple_assign (stmt))((void)(!(is_gimple_assign (stmt)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3517, __FUNCTION__), 0 : 0))
;
3518
3519 /* STMT must be either an assignment of a single SSA name or an
3520 expression involving an SSA name and a constant. Try to fold that
3521 expression using the value for the SSA name. */
3522 if (gimple_assign_ssa_name_copy_p (stmt))
3523 return get_val_for (gimple_assign_rhs1 (stmt), base);
3524 else if (gimple_assign_rhs_class (stmt) == GIMPLE_UNARY_RHS
3525 && TREE_CODE (gimple_assign_rhs1 (stmt))((enum tree_code) (gimple_assign_rhs1 (stmt))->base.code) == SSA_NAME)
3526 return fold_build1 (gimple_assign_rhs_code (stmt),fold_build1_loc (((location_t) 0), gimple_assign_rhs_code (stmt
), ((contains_struct_check ((gimple_assign_lhs (stmt)), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3527, __FUNCTION__))->typed.type), get_val_for (gimple_assign_rhs1
(stmt), base) )
3527 TREE_TYPE (gimple_assign_lhs (stmt)),fold_build1_loc (((location_t) 0), gimple_assign_rhs_code (stmt
), ((contains_struct_check ((gimple_assign_lhs (stmt)), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3527, __FUNCTION__))->typed.type), get_val_for (gimple_assign_rhs1
(stmt), base) )
3528 get_val_for (gimple_assign_rhs1 (stmt), base))fold_build1_loc (((location_t) 0), gimple_assign_rhs_code (stmt
), ((contains_struct_check ((gimple_assign_lhs (stmt)), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3527, __FUNCTION__))->typed.type), get_val_for (gimple_assign_rhs1
(stmt), base) )
;
3529 else if (gimple_assign_rhs_class (stmt) == GIMPLE_BINARY_RHS)
3530 {
3531 tree rhs1 = gimple_assign_rhs1 (stmt);
3532 tree rhs2 = gimple_assign_rhs2 (stmt);
3533 if (TREE_CODE (rhs1)((enum tree_code) (rhs1)->base.code) == SSA_NAME)
3534 rhs1 = get_val_for (rhs1, base);
3535 else if (TREE_CODE (rhs2)((enum tree_code) (rhs2)->base.code) == SSA_NAME)
3536 rhs2 = get_val_for (rhs2, base);
3537 else
3538 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3538, __FUNCTION__))
;
3539 return fold_build2 (gimple_assign_rhs_code (stmt),fold_build2_loc (((location_t) 0), gimple_assign_rhs_code (stmt
), ((contains_struct_check ((gimple_assign_lhs (stmt)), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3540, __FUNCTION__))->typed.type), rhs1, rhs2 )
3540 TREE_TYPE (gimple_assign_lhs (stmt)), rhs1, rhs2)fold_build2_loc (((location_t) 0), gimple_assign_rhs_code (stmt
), ((contains_struct_check ((gimple_assign_lhs (stmt)), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3540, __FUNCTION__))->typed.type), rhs1, rhs2 )
;
3541 }
3542 else
3543 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3543, __FUNCTION__))
;
3544}
3545
3546
3547/* Tries to count the number of iterations of LOOP till it exits by EXIT
3548 by brute force -- i.e. by determining the value of the operands of the
3549 condition at EXIT in first few iterations of the loop (assuming that
3550 these values are constant) and determining the first one in that the
3551 condition is not satisfied. Returns the constant giving the number
3552 of the iterations of LOOP if successful, chrec_dont_know otherwise. */
3553
3554tree
3555loop_niter_by_eval (class loop *loop, edge exit)
3556{
3557 tree acnd;
3558 tree op[2], val[2], next[2], aval[2];
3559 gphi *phi;
3560 gimple *cond;
3561 unsigned i, j;
3562 enum tree_code cmp;
3563
3564 cond = last_stmt (exit->src);
3565 if (!cond || gimple_code (cond) != GIMPLE_COND)
3566 return chrec_dont_knowglobal_trees[TI_CHREC_DONT_KNOW];
3567
3568 cmp = gimple_cond_code (cond);
3569 if (exit->flags & EDGE_TRUE_VALUE)
3570 cmp = invert_tree_comparison (cmp, false);
3571
3572 switch (cmp)
3573 {
3574 case EQ_EXPR:
3575 case NE_EXPR:
3576 case GT_EXPR:
3577 case GE_EXPR:
3578 case LT_EXPR:
3579 case LE_EXPR:
3580 op[0] = gimple_cond_lhs (cond);
3581 op[1] = gimple_cond_rhs (cond);
3582 break;
3583
3584 default:
3585 return chrec_dont_knowglobal_trees[TI_CHREC_DONT_KNOW];
3586 }
3587
3588 for (j = 0; j < 2; j++)
3589 {
3590 if (is_gimple_min_invariant (op[j]))
3591 {
3592 val[j] = op[j];
3593 next[j] = NULL_TREE(tree) __null;
3594 op[j] = NULL_TREE(tree) __null;
3595 }
3596 else
3597 {
3598 phi = get_base_for (loop, op[j]);
3599 if (!phi)
3600 return chrec_dont_knowglobal_trees[TI_CHREC_DONT_KNOW];
3601 val[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_preheader_edge (loop))gimple_phi_arg_def (((phi)), ((loop_preheader_edge (loop))->
dest_idx))
;
3602 next[j] = PHI_ARG_DEF_FROM_EDGE (phi, loop_latch_edge (loop))gimple_phi_arg_def (((phi)), ((loop_latch_edge (loop))->dest_idx
))
;
3603 }
3604 }
3605
3606 /* Don't issue signed overflow warnings. */
3607 fold_defer_overflow_warnings ();
3608
3609 for (i = 0; i < MAX_ITERATIONS_TO_TRACK((unsigned) global_options.x_param_max_iterations_to_track); i++)
3610 {
3611 for (j = 0; j < 2; j++)
3612 aval[j] = get_val_for (op[j], val[j]);
3613
3614 acnd = fold_binary (cmp, boolean_type_node, aval[0], aval[1])fold_binary_loc (((location_t) 0), cmp, global_trees[TI_BOOLEAN_TYPE
], aval[0], aval[1])
;
3615 if (acnd && integer_zerop (acnd))
3616 {
3617 fold_undefer_and_ignore_overflow_warnings ();
3618 if (dump_file && (dump_flags & TDF_DETAILS))
3619 fprintf (dump_file,
3620 "Proved that loop %d iterates %d times using brute force.\n",
3621 loop->num, i);
3622 return build_int_cst (unsigned_type_nodeinteger_types[itk_unsigned_int], i);
3623 }
3624
3625 for (j = 0; j < 2; j++)
3626 {
3627 aval[j] = val[j];
3628 val[j] = get_val_for (next[j], val[j]);
3629 if (!is_gimple_min_invariant (val[j]))
3630 {
3631 fold_undefer_and_ignore_overflow_warnings ();
3632 return chrec_dont_knowglobal_trees[TI_CHREC_DONT_KNOW];
3633 }
3634 }
3635
3636 /* If the next iteration would use the same base values
3637 as the current one, there is no point looping further,
3638 all following iterations will be the same as this one. */
3639 if (val[0] == aval[0] && val[1] == aval[1])
3640 break;
3641 }
3642
3643 fold_undefer_and_ignore_overflow_warnings ();
3644
3645 return chrec_dont_knowglobal_trees[TI_CHREC_DONT_KNOW];
3646}
3647
3648/* Finds the exit of the LOOP by that the loop exits after a constant
3649 number of iterations and stores the exit edge to *EXIT. The constant
3650 giving the number of iterations of LOOP is returned. The number of
3651 iterations is determined using loop_niter_by_eval (i.e. by brute force
3652 evaluation). If we are unable to find the exit for that loop_niter_by_eval
3653 determines the number of iterations, chrec_dont_know is returned. */
3654
3655tree
3656find_loop_niter_by_eval (class loop *loop, edge *exit)
3657{
3658 unsigned i;
3659 auto_vec<edge> exits = get_loop_exit_edges (loop);
3660 edge ex;
3661 tree niter = NULL_TREE(tree) __null, aniter;
3662
3663 *exit = NULL__null;
3664
3665 /* Loops with multiple exits are expensive to handle and less important. */
3666 if (!flag_expensive_optimizationsglobal_options.x_flag_expensive_optimizations
3667 && exits.length () > 1)
3668 return chrec_dont_knowglobal_trees[TI_CHREC_DONT_KNOW];
3669
3670 FOR_EACH_VEC_ELT (exits, i, ex)for (i = 0; (exits).iterate ((i), &(ex)); ++(i))
3671 {
3672 if (!just_once_each_iteration_p (loop, ex->src))
3673 continue;
3674
3675 aniter = loop_niter_by_eval (loop, ex);
3676 if (chrec_contains_undetermined (aniter))
3677 continue;
3678
3679 if (niter
3680 && !tree_int_cst_lt (aniter, niter))
3681 continue;
3682
3683 niter = aniter;
3684 *exit = ex;
3685 }
3686
3687 return niter ? niter : chrec_dont_knowglobal_trees[TI_CHREC_DONT_KNOW];
3688}
3689
3690/*
3691
3692 Analysis of upper bounds on number of iterations of a loop.
3693
3694*/
3695
3696static widest_int derive_constant_upper_bound_ops (tree, tree,
3697 enum tree_code, tree);
3698
3699/* Returns a constant upper bound on the value of the right-hand side of
3700 an assignment statement STMT. */
3701
3702static widest_int
3703derive_constant_upper_bound_assign (gimple *stmt)
3704{
3705 enum tree_code code = gimple_assign_rhs_code (stmt);
3706 tree op0 = gimple_assign_rhs1 (stmt);
3707 tree op1 = gimple_assign_rhs2 (stmt);
3708
3709 return derive_constant_upper_bound_ops (TREE_TYPE (gimple_assign_lhs (stmt))((contains_struct_check ((gimple_assign_lhs (stmt)), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3709, __FUNCTION__))->typed.type)
,
3710 op0, code, op1);
3711}
3712
3713/* Returns a constant upper bound on the value of expression VAL. VAL
3714 is considered to be unsigned. If its type is signed, its value must
3715 be nonnegative. */
3716
3717static widest_int
3718derive_constant_upper_bound (tree val)
3719{
3720 enum tree_code code;
3721 tree op0, op1, op2;
3722
3723 extract_ops_from_tree (val, &code, &op0, &op1, &op2);
3724 return derive_constant_upper_bound_ops (TREE_TYPE (val)((contains_struct_check ((val), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3724, __FUNCTION__))->typed.type)
, op0, code, op1);
3725}
3726
3727/* Returns a constant upper bound on the value of expression OP0 CODE OP1,
3728 whose type is TYPE. The expression is considered to be unsigned. If
3729 its type is signed, its value must be nonnegative. */
3730
3731static widest_int
3732derive_constant_upper_bound_ops (tree type, tree op0,
3733 enum tree_code code, tree op1)
3734{
3735 tree subtype, maxt;
3736 widest_int bnd, max, cst;
3737 gimple *stmt;
3738
3739 if (INTEGRAL_TYPE_P (type)(((enum tree_code) (type)->base.code) == ENUMERAL_TYPE || (
(enum tree_code) (type)->base.code) == BOOLEAN_TYPE || ((enum
tree_code) (type)->base.code) == INTEGER_TYPE)
)
3740 maxt = TYPE_MAX_VALUE (type)((tree_check5 ((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3740, __FUNCTION__, (INTEGER_TYPE), (ENUMERAL_TYPE), (BOOLEAN_TYPE
), (REAL_TYPE), (FIXED_POINT_TYPE)))->type_non_common.maxval
)
;
3741 else
3742 maxt = upper_bound_in_type (type, type);
3743
3744 max = wi::to_widest (maxt);
3745
3746 switch (code)
3747 {
3748 case INTEGER_CST:
3749 return wi::to_widest (op0);
3750
3751 CASE_CONVERTcase NOP_EXPR: case CONVERT_EXPR:
3752 subtype = TREE_TYPE (op0)((contains_struct_check ((op0), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3752, __FUNCTION__))->typed.type)
;
3753 if (!TYPE_UNSIGNED (subtype)((tree_class_check ((subtype), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3753, __FUNCTION__))->base.u.bits.unsigned_flag)
3754 /* If TYPE is also signed, the fact that VAL is nonnegative implies
3755 that OP0 is nonnegative. */
3756 && TYPE_UNSIGNED (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3756, __FUNCTION__))->base.u.bits.unsigned_flag)
3757 && !tree_expr_nonnegative_p (op0))
3758 {
3759 /* If we cannot prove that the casted expression is nonnegative,
3760 we cannot establish more useful upper bound than the precision
3761 of the type gives us. */
3762 return max;
3763 }
3764
3765 /* We now know that op0 is an nonnegative value. Try deriving an upper
3766 bound for it. */
3767 bnd = derive_constant_upper_bound (op0);
3768
3769 /* If the bound does not fit in TYPE, max. value of TYPE could be
3770 attained. */
3771 if (wi::ltu_p (max, bnd))
3772 return max;
3773
3774 return bnd;
3775
3776 case PLUS_EXPR:
3777 case POINTER_PLUS_EXPR:
3778 case MINUS_EXPR:
3779 if (TREE_CODE (op1)((enum tree_code) (op1)->base.code) != INTEGER_CST
3780 || !tree_expr_nonnegative_p (op0))
3781 return max;
3782
3783 /* Canonicalize to OP0 - CST. Consider CST to be signed, in order to
3784 choose the most logical way how to treat this constant regardless
3785 of the signedness of the type. */
3786 cst = wi::sext (wi::to_widest (op1), TYPE_PRECISION (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3786, __FUNCTION__))->type_common.precision)
);
3787 if (code != MINUS_EXPR)
3788 cst = -cst;
3789
3790 bnd = derive_constant_upper_bound (op0);
3791
3792 if (wi::neg_p (cst))
3793 {
3794 cst = -cst;
3795 /* Avoid CST == 0x80000... */
3796 if (wi::neg_p (cst))
3797 return max;
3798
3799 /* OP0 + CST. We need to check that
3800 BND <= MAX (type) - CST. */
3801
3802 widest_int mmax = max - cst;
3803 if (wi::leu_p (bnd, mmax))
3804 return max;
3805
3806 return bnd + cst;
3807 }
3808 else
3809 {
3810 /* OP0 - CST, where CST >= 0.
3811
3812 If TYPE is signed, we have already verified that OP0 >= 0, and we
3813 know that the result is nonnegative. This implies that
3814 VAL <= BND - CST.
3815
3816 If TYPE is unsigned, we must additionally know that OP0 >= CST,
3817 otherwise the operation underflows.
3818 */
3819
3820 /* This should only happen if the type is unsigned; however, for
3821 buggy programs that use overflowing signed arithmetics even with
3822 -fno-wrapv, this condition may also be true for signed values. */
3823 if (wi::ltu_p (bnd, cst))
3824 return max;
3825
3826 if (TYPE_UNSIGNED (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3826, __FUNCTION__))->base.u.bits.unsigned_flag)
)
3827 {
3828 tree tem = fold_binary (GE_EXPR, boolean_type_node, op0,fold_binary_loc (((location_t) 0), GE_EXPR, global_trees[TI_BOOLEAN_TYPE
], op0, wide_int_to_tree (type, cst))
3829 wide_int_to_tree (type, cst))fold_binary_loc (((location_t) 0), GE_EXPR, global_trees[TI_BOOLEAN_TYPE
], op0, wide_int_to_tree (type, cst))
;
3830 if (!tem || integer_nonzerop (tem))
3831 return max;
3832 }
3833
3834 bnd -= cst;
3835 }
3836
3837 return bnd;
3838
3839 case FLOOR_DIV_EXPR:
3840 case EXACT_DIV_EXPR:
3841 if (TREE_CODE (op1)((enum tree_code) (op1)->base.code) != INTEGER_CST
3842 || tree_int_cst_sign_bit (op1))
3843 return max;
3844
3845 bnd = derive_constant_upper_bound (op0);
3846 return wi::udiv_floor (bnd, wi::to_widest (op1));
3847
3848 case BIT_AND_EXPR:
3849 if (TREE_CODE (op1)((enum tree_code) (op1)->base.code) != INTEGER_CST
3850 || tree_int_cst_sign_bit (op1))
3851 return max;
3852 return wi::to_widest (op1);
3853
3854 case SSA_NAME:
3855 stmt = SSA_NAME_DEF_STMT (op0)(tree_check ((op0), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3855, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
3856 if (gimple_code (stmt) != GIMPLE_ASSIGN
3857 || gimple_assign_lhs (stmt) != op0)
3858 return max;
3859 return derive_constant_upper_bound_assign (stmt);
3860
3861 default:
3862 return max;
3863 }
3864}
3865
3866/* Emit a -Waggressive-loop-optimizations warning if needed. */
3867
3868static void
3869do_warn_aggressive_loop_optimizations (class loop *loop,
3870 widest_int i_bound, gimple *stmt)
3871{
3872 /* Don't warn if the loop doesn't have known constant bound. */
3873 if (!loop->nb_iterations
3874 || TREE_CODE (loop->nb_iterations)((enum tree_code) (loop->nb_iterations)->base.code) != INTEGER_CST
3875 || !warn_aggressive_loop_optimizationsglobal_options.x_warn_aggressive_loop_optimizations
3876 /* To avoid warning multiple times for the same loop,
3877 only start warning when we preserve loops. */
3878 || (cfun(cfun + 0)->curr_properties & PROP_loops(1 << 11)) == 0
3879 /* Only warn once per loop. */
3880 || loop->warned_aggressive_loop_optimizations
3881 /* Only warn if undefined behavior gives us lower estimate than the
3882 known constant bound. */
3883 || wi::cmpu (i_bound, wi::to_widest (loop->nb_iterations)) >= 0
3884 /* And undefined behavior happens unconditionally. */
3885 || !dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (stmt)))
3886 return;
3887
3888 edge e = single_exit (loop);
3889 if (e == NULL__null)
3890 return;
3891
3892 gimple *estmt = last_stmt (e->src);
3893 char buf[WIDE_INT_PRINT_BUFFER_SIZE(((((64*(8)) + 64) / 64) * 64) / 4 + 4)];
3894 print_dec (i_bound, buf, TYPE_UNSIGNED (TREE_TYPE (loop->nb_iterations))((tree_class_check ((((contains_struct_check ((loop->nb_iterations
), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3894, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3894, __FUNCTION__))->base.u.bits.unsigned_flag)
3895 ? UNSIGNED : SIGNED);
3896 auto_diagnostic_group d;
3897 if (warning_at (gimple_location (stmt), OPT_Waggressive_loop_optimizations,
3898 "iteration %s invokes undefined behavior", buf))
3899 inform (gimple_location (estmt), "within this loop");
3900 loop->warned_aggressive_loop_optimizations = true;
3901}
3902
3903/* Records that AT_STMT is executed at most BOUND + 1 times in LOOP. IS_EXIT
3904 is true if the loop is exited immediately after STMT, and this exit
3905 is taken at last when the STMT is executed BOUND + 1 times.
3906 REALISTIC is true if BOUND is expected to be close to the real number
3907 of iterations. UPPER is true if we are sure the loop iterates at most
3908 BOUND times. I_BOUND is a widest_int upper estimate on BOUND. */
3909
3910static void
3911record_estimate (class loop *loop, tree bound, const widest_int &i_bound,
3912 gimple *at_stmt, bool is_exit, bool realistic, bool upper)
3913{
3914 widest_int delta;
3915
3916 if (dump_file && (dump_flags & TDF_DETAILS))
3917 {
3918 fprintf (dump_file, "Statement %s", is_exit ? "(exit)" : "");
3919 print_gimple_stmt (dump_file, at_stmt, 0, TDF_SLIM);
3920 fprintf (dump_file, " is %sexecuted at most ",
3921 upper ? "" : "probably ");
3922 print_generic_expr (dump_file, bound, TDF_SLIM);
3923 fprintf (dump_file, " (bounded by ");
3924 print_decu (i_bound, dump_file);
3925 fprintf (dump_file, ") + 1 times in loop %d.\n", loop->num);
3926 }
3927
3928 /* If the I_BOUND is just an estimate of BOUND, it rarely is close to the
3929 real number of iterations. */
3930 if (TREE_CODE (bound)((enum tree_code) (bound)->base.code) != INTEGER_CST)
3931 realistic = false;
3932 else
3933 gcc_checking_assert (i_bound == wi::to_widest (bound))((void)(!(i_bound == wi::to_widest (bound)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 3933, __FUNCTION__), 0 : 0))
;
3934
3935 /* If we have a guaranteed upper bound, record it in the appropriate
3936 list, unless this is an !is_exit bound (i.e. undefined behavior in
3937 at_stmt) in a loop with known constant number of iterations. */
3938 if (upper
3939 && (is_exit
3940 || loop->nb_iterations == NULL_TREE(tree) __null
3941 || TREE_CODE (loop->nb_iterations)((enum tree_code) (loop->nb_iterations)->base.code) != INTEGER_CST))
3942 {
3943 class nb_iter_bound *elt = ggc_alloc<nb_iter_bound> ();
3944
3945 elt->bound = i_bound;
3946 elt->stmt = at_stmt;
3947 elt->is_exit = is_exit;
3948 elt->next = loop->bounds;
3949 loop->bounds = elt;
3950 }
3951
3952 /* If statement is executed on every path to the loop latch, we can directly
3953 infer the upper bound on the # of iterations of the loop. */
3954 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (at_stmt)))
3955 upper = false;
3956
3957 /* Update the number of iteration estimates according to the bound.
3958 If at_stmt is an exit then the loop latch is executed at most BOUND times,
3959 otherwise it can be executed BOUND + 1 times. We will lower the estimate
3960 later if such statement must be executed on last iteration */
3961 if (is_exit)
3962 delta = 0;
3963 else
3964 delta = 1;
3965 widest_int new_i_bound = i_bound + delta;
3966
3967 /* If an overflow occurred, ignore the result. */
3968 if (wi::ltu_p (new_i_bound, delta))
3969 return;
3970
3971 if (upper && !is_exit)
3972 do_warn_aggressive_loop_optimizations (loop, new_i_bound, at_stmt);
3973 record_niter_bound (loop, new_i_bound, realistic, upper);
3974}
3975
3976/* Records the control iv analyzed in NITER for LOOP if the iv is valid
3977 and doesn't overflow. */
3978
3979static void
3980record_control_iv (class loop *loop, class tree_niter_desc *niter)
3981{
3982 struct control_iv *iv;
3983
3984 if (!niter->control.base || !niter->control.step)
3985 return;
3986
3987 if (!integer_onep (niter->assumptions) || !niter->control.no_overflow)
3988 return;
3989
3990 iv = ggc_alloc<control_iv> ();
3991 iv->base = niter->control.base;
3992 iv->step = niter->control.step;
3993 iv->next = loop->control_ivs;
3994 loop->control_ivs = iv;
3995
3996 return;
3997}
3998
3999/* This function returns TRUE if below conditions are satisfied:
4000 1) VAR is SSA variable.
4001 2) VAR is an IV:{base, step} in its defining loop.
4002 3) IV doesn't overflow.
4003 4) Both base and step are integer constants.
4004 5) Base is the MIN/MAX value depends on IS_MIN.
4005 Store value of base to INIT correspondingly. */
4006
4007static bool
4008get_cst_init_from_scev (tree var, wide_int *init, bool is_min)
4009{
4010 if (TREE_CODE (var)((enum tree_code) (var)->base.code) != SSA_NAME)
4011 return false;
4012
4013 gimple *def_stmt = SSA_NAME_DEF_STMT (var)(tree_check ((var), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4013, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
;
4014 class loop *loop = loop_containing_stmt (def_stmt);
4015
4016 if (loop == NULL__null)
4017 return false;
4018
4019 affine_iv iv;
4020 if (!simple_iv (loop, loop, var, &iv, false))
4021 return false;
4022
4023 if (!iv.no_overflow)
4024 return false;
4025
4026 if (TREE_CODE (iv.base)((enum tree_code) (iv.base)->base.code) != INTEGER_CST || TREE_CODE (iv.step)((enum tree_code) (iv.step)->base.code) != INTEGER_CST)
4027 return false;
4028
4029 if (is_min == tree_int_cst_sign_bit (iv.step))
4030 return false;
4031
4032 *init = wi::to_wide (iv.base);
4033 return true;
4034}
4035
4036/* Record the estimate on number of iterations of LOOP based on the fact that
4037 the induction variable BASE + STEP * i evaluated in STMT does not wrap and
4038 its values belong to the range <LOW, HIGH>. REALISTIC is true if the
4039 estimated number of iterations is expected to be close to the real one.
4040 UPPER is true if we are sure the induction variable does not wrap. */
4041
4042static void
4043record_nonwrapping_iv (class loop *loop, tree base, tree step, gimple *stmt,
4044 tree low, tree high, bool realistic, bool upper)
4045{
4046 tree niter_bound, extreme, delta;
4047 tree type = TREE_TYPE (base)((contains_struct_check ((base), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4047, __FUNCTION__))->typed.type)
, unsigned_type;
4048 tree orig_base = base;
4049
4050 if (TREE_CODE (step)((enum tree_code) (step)->base.code) != INTEGER_CST || integer_zerop (step))
26
Assuming field 'code' is equal to INTEGER_CST
27
Assuming the condition is false
4051 return;
4052
4053 if (dump_file && (dump_flags & TDF_DETAILS))
28
Assuming 'dump_file' is null
4054 {
4055 fprintf (dump_file, "Induction variable (");
4056 print_generic_expr (dump_file, TREE_TYPE (base)((contains_struct_check ((base), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4056, __FUNCTION__))->typed.type)
, TDF_SLIM);
4057 fprintf (dump_file, ") ");
4058 print_generic_expr (dump_file, base, TDF_SLIM);
4059 fprintf (dump_file, " + ");
4060 print_generic_expr (dump_file, step, TDF_SLIM);
4061 fprintf (dump_file, " * iteration does not wrap in statement ");
4062 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
4063 fprintf (dump_file, " in loop %d.\n", loop->num);
4064 }
4065
4066 unsigned_type = unsigned_type_for (type);
4067 base = fold_convert (unsigned_type, base)fold_convert_loc (((location_t) 0), unsigned_type, base);
4068 step = fold_convert (unsigned_type, step)fold_convert_loc (((location_t) 0), unsigned_type, step);
4069
4070 if (tree_int_cst_sign_bit (step))
29
Assuming the condition is true
30
Taking true branch
4071 {
4072 wide_int max;
31
Calling default constructor for 'generic_wide_int<wide_int_storage>'
36
Returning from default constructor for 'generic_wide_int<wide_int_storage>'
4073 Value_Range base_range (TREE_TYPE (orig_base)((contains_struct_check ((orig_base), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4073, __FUNCTION__))->typed.type)
);
4074 if (get_range_query (cfun(cfun + 0))->range_of_expr (base_range, orig_base)
37
Assuming the condition is false
4075 && !base_range.undefined_p ())
4076 max = base_range.upper_bound ();
4077 extreme = fold_convert (unsigned_type, low)fold_convert_loc (((location_t) 0), unsigned_type, low);
4078 if (TREE_CODE (orig_base)((enum tree_code) (orig_base)->base.code) == SSA_NAME
38
Assuming field 'code' is equal to SSA_NAME
4079 && TREE_CODE (high)((enum tree_code) (high)->base.code) == INTEGER_CST
39
Assuming field 'code' is equal to INTEGER_CST
4080 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))(((enum tree_code) (((contains_struct_check ((orig_base), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4080, __FUNCTION__))->typed.type))->base.code) == ENUMERAL_TYPE
|| ((enum tree_code) (((contains_struct_check ((orig_base), (
TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4080, __FUNCTION__))->typed.type))->base.code) == BOOLEAN_TYPE
|| ((enum tree_code) (((contains_struct_check ((orig_base), (
TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4080, __FUNCTION__))->typed.type))->base.code) == INTEGER_TYPE
)
40
Assuming field 'code' is equal to ENUMERAL_TYPE
4081 && (base_range.kind () == VR_RANGE
41
Assuming the condition is true
4082 || get_cst_init_from_scev (orig_base, &max, false))
4083 && wi::gts_p (wi::to_wide (high), max))
42
Calling 'gts_p<generic_wide_int<wide_int_ref_storage<false, false>>, generic_wide_int<wide_int_storage>>'
4084 base = wide_int_to_tree (unsigned_type, max);
4085 else if (TREE_CODE (base)((enum tree_code) (base)->base.code) != INTEGER_CST
4086 && dominated_by_p (CDI_DOMINATORS,
4087 loop->latch, gimple_bb (stmt)))
4088 base = fold_convert (unsigned_type, high)fold_convert_loc (((location_t) 0), unsigned_type, high);
4089 delta = fold_build2 (MINUS_EXPR, unsigned_type, base, extreme)fold_build2_loc (((location_t) 0), MINUS_EXPR, unsigned_type,
base, extreme )
;
4090 step = fold_build1 (NEGATE_EXPR, unsigned_type, step)fold_build1_loc (((location_t) 0), NEGATE_EXPR, unsigned_type
, step )
;
4091 }
4092 else
4093 {
4094 wide_int min;
4095 Value_Range base_range (TREE_TYPE (orig_base)((contains_struct_check ((orig_base), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4095, __FUNCTION__))->typed.type)
);
4096 if (get_range_query (cfun(cfun + 0))->range_of_expr (base_range, orig_base)
4097 && !base_range.undefined_p ())
4098 min = base_range.lower_bound ();
4099 extreme = fold_convert (unsigned_type, high)fold_convert_loc (((location_t) 0), unsigned_type, high);
4100 if (TREE_CODE (orig_base)((enum tree_code) (orig_base)->base.code) == SSA_NAME
4101 && TREE_CODE (low)((enum tree_code) (low)->base.code) == INTEGER_CST
4102 && INTEGRAL_TYPE_P (TREE_TYPE (orig_base))(((enum tree_code) (((contains_struct_check ((orig_base), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4102, __FUNCTION__))->typed.type))->base.code) == ENUMERAL_TYPE
|| ((enum tree_code) (((contains_struct_check ((orig_base), (
TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4102, __FUNCTION__))->typed.type))->base.code) == BOOLEAN_TYPE
|| ((enum tree_code) (((contains_struct_check ((orig_base), (
TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4102, __FUNCTION__))->typed.type))->base.code) == INTEGER_TYPE
)
4103 && (base_range.kind () == VR_RANGE
4104 || get_cst_init_from_scev (orig_base, &min, true))
4105 && wi::gts_p (min, wi::to_wide (low)))
4106 base = wide_int_to_tree (unsigned_type, min);
4107 else if (TREE_CODE (base)((enum tree_code) (base)->base.code) != INTEGER_CST
4108 && dominated_by_p (CDI_DOMINATORS,
4109 loop->latch, gimple_bb (stmt)))
4110 base = fold_convert (unsigned_type, low)fold_convert_loc (((location_t) 0), unsigned_type, low);
4111 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, base)fold_build2_loc (((location_t) 0), MINUS_EXPR, unsigned_type,
extreme, base )
;
4112 }
4113
4114 /* STMT is executed at most NITER_BOUND + 1 times, since otherwise the value
4115 would get out of the range. */
4116 niter_bound = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step)fold_build2_loc (((location_t) 0), FLOOR_DIV_EXPR, unsigned_type
, delta, step )
;
4117 widest_int max = derive_constant_upper_bound (niter_bound);
4118 record_estimate (loop, niter_bound, max, stmt, false, realistic, upper);
4119}
4120
4121/* Determine information about number of iterations a LOOP from the index
4122 IDX of a data reference accessed in STMT. RELIABLE is true if STMT is
4123 guaranteed to be executed in every iteration of LOOP. Callback for
4124 for_each_index. */
4125
4126struct ilb_data
4127{
4128 class loop *loop;
4129 gimple *stmt;
4130};
4131
4132static bool
4133idx_infer_loop_bounds (tree base, tree *idx, void *dta)
4134{
4135 struct ilb_data *data = (struct ilb_data *) dta;
4136 tree ev, init, step;
4137 tree low, high, type, next;
4138 bool sign, upper = true, has_flexible_size = false;
4139 class loop *loop = data->loop;
4140
4141 if (TREE_CODE (base)((enum tree_code) (base)->base.code) != ARRAY_REF)
1
Assuming field 'code' is equal to ARRAY_REF
2
Taking false branch
4142 return true;
4143
4144 /* For arrays that might have flexible sizes, it is not guaranteed that they
4145 do not really extend over their declared size. */
4146 if (array_ref_flexible_size_p (base))
3
Assuming the condition is false
4
Taking false branch
4147 {
4148 has_flexible_size = true;
4149 upper = false;
4150 }
4151
4152 class loop *dloop = loop_containing_stmt (data->stmt);
4153 if (!dloop)
5
Assuming 'dloop' is non-null
6
Taking false branch
4154 return true;
4155
4156 ev = analyze_scalar_evolution (dloop, *idx);
4157 ev = instantiate_parameters (loop, ev);
4158 init = initial_condition (ev);
4159 step = evolution_part_in_loop_num (ev, loop->num);
4160
4161 if (!init
7
Assuming 'init' is non-null
13
Taking false branch
4162 || !step
8
Assuming 'step' is non-null
4163 || TREE_CODE (step)((enum tree_code) (step)->base.code) != INTEGER_CST
9
Assuming field 'code' is equal to INTEGER_CST
4164 || integer_zerop (step)
10
Assuming the condition is false
4165 || tree_contains_chrecs (init, NULL__null)
11
Assuming the condition is false
4166 || chrec_contains_symbols_defined_in_loop (init, loop->num))
12
Assuming the condition is false
4167 return true;
4168
4169 low = array_ref_low_bound (base);
4170 high = array_ref_up_bound (base);
4171
4172 /* The case of nonconstant bounds could be handled, but it would be
4173 complicated. */
4174 if (TREE_CODE (low)((enum tree_code) (low)->base.code) != INTEGER_CST
14
Assuming field 'code' is equal to INTEGER_CST
17
Taking false branch
4175 || !high
15
Assuming 'high' is non-null
4176 || TREE_CODE (high)((enum tree_code) (high)->base.code) != INTEGER_CST)
16
Assuming field 'code' is equal to INTEGER_CST
4177 return true;
4178 sign = tree_int_cst_sign_bit (step);
4179 type = TREE_TYPE (step)((contains_struct_check ((step), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4179, __FUNCTION__))->typed.type)
;
4180
4181 /* The array that might have flexible size most likely extends
4182 beyond its bounds. */
4183 if (has_flexible_size
17.1
'has_flexible_size' is false
17.1
'has_flexible_size' is false
4184 && operand_equal_p (low, high, 0)) 4185 return true; 4186 4187 /* In case the relevant bound of the array does not fit in type, or 4188 it does, but bound + step (in type) still belongs into the range of the 4189 array, the index may wrap and still stay within the range of the array 4190 (consider e.g. if the array is indexed by the full range of 4191 unsigned char). 4192 4193 To make things simpler, we require both bounds to fit into type, although 4194 there are cases where this would not be strictly necessary. */ 4195 if (!int_fits_type_p (high, type)
18
Assuming the condition is false
20
Taking false branch
4196 || !int_fits_type_p (low, type))
19
Assuming the condition is false
4197 return true; 4198 low = fold_convert (type, low)fold_convert_loc (((location_t) 0), type, low); 4199 high = fold_convert (type, high)fold_convert_loc (((location_t) 0), type, high); 4200 4201 if (sign)
21
Assuming 'sign' is false
22
Taking false branch
4202 next = fold_binary (PLUS_EXPR, type, low, step)fold_binary_loc (((location_t) 0), PLUS_EXPR, type, low, step
)
; 4203 else 4204 next = fold_binary (PLUS_EXPR, type, high, step)fold_binary_loc (((location_t) 0), PLUS_EXPR, type, high, step
)
; 4205 4206 if (tree_int_cst_compare (low, next) <= 0
23
Assuming the condition is false
4207 && tree_int_cst_compare (next, high) <= 0) 4208 return true; 4209 4210 /* If access is not executed on every iteration, we must ensure that overlow 4211 may not make the access valid later. */ 4212 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, gimple_bb (data->stmt))
24
Assuming the condition is false
4213 && scev_probably_wraps_p (NULL_TREE(tree) __null, 4214 initial_condition_in_loop_num (ev, loop->num), 4215 step, data->stmt, loop, true)) 4216 upper = false; 4217 4218 record_nonwrapping_iv (loop, init, step, data->stmt, low, high, false, upper);
25
Calling 'record_nonwrapping_iv'
4219 return true; 4220} 4221 4222/* Determine information about number of iterations a LOOP from the bounds 4223 of arrays in the data reference REF accessed in STMT. RELIABLE is true if 4224 STMT is guaranteed to be executed in every iteration of LOOP.*/ 4225 4226static void 4227infer_loop_bounds_from_ref (class loop *loop, gimple *stmt, tree ref) 4228{ 4229 struct ilb_data data; 4230 4231 data.loop = loop; 4232 data.stmt = stmt; 4233 for_each_index (&ref, idx_infer_loop_bounds, &data); 4234} 4235 4236/* Determine information about number of iterations of a LOOP from the way 4237 arrays are used in STMT. RELIABLE is true if STMT is guaranteed to be 4238 executed in every iteration of LOOP. */ 4239 4240static void 4241infer_loop_bounds_from_array (class loop *loop, gimple *stmt) 4242{ 4243 if (is_gimple_assign (stmt)) 4244 { 4245 tree op0 = gimple_assign_lhs (stmt); 4246 tree op1 = gimple_assign_rhs1 (stmt); 4247 4248 /* For each memory access, analyze its access function 4249 and record a bound on the loop iteration domain. */ 4250 if (REFERENCE_CLASS_P (op0)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code
) (op0)->base.code))] == tcc_reference)
) 4251 infer_loop_bounds_from_ref (loop, stmt, op0); 4252 4253 if (REFERENCE_CLASS_P (op1)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code
) (op1)->base.code))] == tcc_reference)
) 4254 infer_loop_bounds_from_ref (loop, stmt, op1); 4255 } 4256 else if (is_gimple_call (stmt)) 4257 { 4258 tree arg, lhs; 4259 unsigned i, n = gimple_call_num_args (stmt); 4260 4261 lhs = gimple_call_lhs (stmt); 4262 if (lhs && REFERENCE_CLASS_P (lhs)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code
) (lhs)->base.code))] == tcc_reference)
) 4263 infer_loop_bounds_from_ref (loop, stmt, lhs); 4264 4265 for (i = 0; i < n; i++) 4266 { 4267 arg = gimple_call_arg (stmt, i); 4268 if (REFERENCE_CLASS_P (arg)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code
) (arg)->base.code))] == tcc_reference)
) 4269 infer_loop_bounds_from_ref (loop, stmt, arg); 4270 } 4271 } 4272} 4273 4274/* Determine information about number of iterations of a LOOP from the fact 4275 that pointer arithmetics in STMT does not overflow. */ 4276 4277static void 4278infer_loop_bounds_from_pointer_arith (class loop *loop, gimple *stmt) 4279{ 4280 tree def, base, step, scev, type, low, high; 4281 tree var, ptr; 4282 4283 if (!is_gimple_assign (stmt) 4284 || gimple_assign_rhs_code (stmt) != POINTER_PLUS_EXPR) 4285 return; 4286 4287 def = gimple_assign_lhs (stmt); 4288 if (TREE_CODE (def)((enum tree_code) (def)->base.code) != SSA_NAME) 4289 return; 4290 4291 type = TREE_TYPE (def)((contains_struct_check ((def), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4291, __FUNCTION__))->typed.type)
; 4292 if (!nowrap_type_p (type)) 4293 return; 4294 4295 ptr = gimple_assign_rhs1 (stmt); 4296 if (!expr_invariant_in_loop_p (loop, ptr)) 4297 return; 4298 4299 var = gimple_assign_rhs2 (stmt); 4300 if (TYPE_PRECISION (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4300, __FUNCTION__))->type_common.precision)
!= TYPE_PRECISION (TREE_TYPE (var))((tree_class_check ((((contains_struct_check ((var), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4300, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4300, __FUNCTION__))->type_common.precision)
) 4301 return; 4302 4303 class loop *uloop = loop_containing_stmt (stmt); 4304 scev = instantiate_parameters (loop, analyze_scalar_evolution (uloop, def)); 4305 if (chrec_contains_undetermined (scev)) 4306 return; 4307 4308 base = initial_condition_in_loop_num (scev, loop->num); 4309 step = evolution_part_in_loop_num (scev, loop->num); 4310 4311 if (!base || !step 4312 || TREE_CODE (step)((enum tree_code) (step)->base.code) != INTEGER_CST 4313 || tree_contains_chrecs (base, NULL__null) 4314 || chrec_contains_symbols_defined_in_loop (base, loop->num)) 4315 return; 4316 4317 low = lower_bound_in_type (type, type); 4318 high = upper_bound_in_type (type, type); 4319 4320 /* In C, pointer arithmetic p + 1 cannot use a NULL pointer, and p - 1 cannot 4321 produce a NULL pointer. The contrary would mean NULL points to an object, 4322 while NULL is supposed to compare unequal with the address of all objects. 4323 Furthermore, p + 1 cannot produce a NULL pointer and p - 1 cannot use a 4324 NULL pointer since that would mean wrapping, which we assume here not to 4325 happen. So, we can exclude NULL from the valid range of pointer 4326 arithmetic. */ 4327 if (flag_delete_null_pointer_checksglobal_options.x_flag_delete_null_pointer_checks && int_cst_value (low) == 0) 4328 low = build_int_cstu (TREE_TYPE (low)((contains_struct_check ((low), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4328, __FUNCTION__))->typed.type)
, TYPE_ALIGN_UNIT (TREE_TYPE (type))((((tree_class_check ((((contains_struct_check ((type), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4328, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4328, __FUNCTION__))->type_common.align) ? ((unsigned)1)
<< (((tree_class_check ((((contains_struct_check ((type
), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4328, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4328, __FUNCTION__))->type_common.align) - 1) : 0) / (8)
)
); 4329 4330 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true); 4331} 4332 4333/* Determine information about number of iterations of a LOOP from the fact 4334 that signed arithmetics in STMT does not overflow. */ 4335 4336static void 4337infer_loop_bounds_from_signedness (class loop *loop, gimple *stmt) 4338{ 4339 tree def, base, step, scev, type, low, high; 4340 4341 if (gimple_code (stmt) != GIMPLE_ASSIGN) 4342 return; 4343 4344 def = gimple_assign_lhs (stmt); 4345 4346 if (TREE_CODE (def)((enum tree_code) (def)->base.code) != SSA_NAME) 4347 return; 4348 4349 type = TREE_TYPE (def)((contains_struct_check ((def), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4349, __FUNCTION__))->typed.type)
; 4350 if (!INTEGRAL_TYPE_P (type)(((enum tree_code) (type)->base.code) == ENUMERAL_TYPE || (
(enum tree_code) (type)->base.code) == BOOLEAN_TYPE || ((enum
tree_code) (type)->base.code) == INTEGER_TYPE)
4351 || !TYPE_OVERFLOW_UNDEFINED (type)((((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE) ? !
global_options.x_flag_wrapv_pointer : (!(any_integral_type_check
((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4351, __FUNCTION__))->base.u.bits.unsigned_flag &&
!global_options.x_flag_wrapv && !global_options.x_flag_trapv
))
) 4352 return; 4353 4354 scev = instantiate_parameters (loop, analyze_scalar_evolution (loop, def)); 4355 if (chrec_contains_undetermined (scev)) 4356 return; 4357 4358 base = initial_condition_in_loop_num (scev, loop->num); 4359 step = evolution_part_in_loop_num (scev, loop->num); 4360 4361 if (!base || !step 4362 || TREE_CODE (step)((enum tree_code) (step)->base.code) != INTEGER_CST 4363 || tree_contains_chrecs (base, NULL__null) 4364 || chrec_contains_symbols_defined_in_loop (base, loop->num)) 4365 return; 4366 4367 low = lower_bound_in_type (type, type); 4368 high = upper_bound_in_type (type, type); 4369 Value_Range r (TREE_TYPE (def)((contains_struct_check ((def), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4369, __FUNCTION__))->typed.type)
); 4370 get_range_query (cfun(cfun + 0))->range_of_expr (r, def); 4371 if (r.kind () == VR_RANGE) 4372 { 4373 low = wide_int_to_tree (type, r.lower_bound ()); 4374 high = wide_int_to_tree (type, r.upper_bound ()); 4375 } 4376 4377 record_nonwrapping_iv (loop, base, step, stmt, low, high, false, true); 4378} 4379 4380/* The following analyzers are extracting informations on the bounds 4381 of LOOP from the following undefined behaviors: 4382 4383 - data references should not access elements over the statically 4384 allocated size, 4385 4386 - signed variables should not overflow when flag_wrapv is not set. 4387*/ 4388 4389static void 4390infer_loop_bounds_from_undefined (class loop *loop, basic_block *bbs) 4391{ 4392 unsigned i; 4393 gimple_stmt_iterator bsi; 4394 basic_block bb; 4395 bool reliable; 4396 4397 for (i = 0; i < loop->num_nodes; i++) 4398 { 4399 bb = bbs[i]; 4400 4401 /* If BB is not executed in each iteration of the loop, we cannot 4402 use the operations in it to infer reliable upper bound on the 4403 # of iterations of the loop. However, we can use it as a guess. 4404 Reliable guesses come only from array bounds. */ 4405 reliable = dominated_by_p (CDI_DOMINATORS, loop->latch, bb); 4406 4407 for (bsi = gsi_start_bb (bb); !gsi_end_p (bsi); gsi_next (&bsi)) 4408 { 4409 gimple *stmt = gsi_stmt (bsi); 4410 4411 infer_loop_bounds_from_array (loop, stmt); 4412 4413 if (reliable) 4414 { 4415 infer_loop_bounds_from_signedness (loop, stmt); 4416 infer_loop_bounds_from_pointer_arith (loop, stmt); 4417 } 4418 } 4419 4420 } 4421} 4422 4423/* Compare wide ints, callback for qsort. */ 4424 4425static int 4426wide_int_cmp (const void *p1, const void *p2) 4427{ 4428 const widest_int *d1 = (const widest_int *) p1; 4429 const widest_int *d2 = (const widest_int *) p2; 4430 return wi::cmpu (*d1, *d2); 4431} 4432 4433/* Return index of BOUND in BOUNDS array sorted in increasing order. 4434 Lookup by binary search. */ 4435 4436static int 4437bound_index (const vec<widest_int> &bounds, const widest_int &bound) 4438{ 4439 unsigned int end = bounds.length (); 4440 unsigned int begin = 0; 4441 4442 /* Find a matching index by means of a binary search. */ 4443 while (begin != end) 4444 { 4445 unsigned int middle = (begin + end) / 2; 4446 widest_int index = bounds[middle]; 4447 4448 if (index == bound) 4449 return middle; 4450 else if (wi::ltu_p (index, bound)) 4451 begin = middle + 1; 4452 else 4453 end = middle; 4454 } 4455 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4455, __FUNCTION__))
; 4456} 4457 4458/* We recorded loop bounds only for statements dominating loop latch (and thus 4459 executed each loop iteration). If there are any bounds on statements not 4460 dominating the loop latch we can improve the estimate by walking the loop 4461 body and seeing if every path from loop header to loop latch contains 4462 some bounded statement. */ 4463 4464static void 4465discover_iteration_bound_by_body_walk (class loop *loop) 4466{ 4467 class nb_iter_bound *elt; 4468 auto_vec<widest_int> bounds; 4469 vec<vec<basic_block> > queues = vNULL; 4470 vec<basic_block> queue = vNULL; 4471 ptrdiff_t queue_index; 4472 ptrdiff_t latch_index = 0; 4473 4474 /* Discover what bounds may interest us. */ 4475 for (elt = loop->bounds; elt; elt = elt->next) 4476 { 4477 widest_int bound = elt->bound; 4478 4479 /* Exit terminates loop at given iteration, while non-exits produce undefined 4480 effect on the next iteration. */ 4481 if (!elt->is_exit) 4482 { 4483 bound += 1; 4484 /* If an overflow occurred, ignore the result. */ 4485 if (bound == 0) 4486 continue; 4487 } 4488 4489 if (!loop->any_upper_bound 4490 || wi::ltu_p (bound, loop->nb_iterations_upper_bound)) 4491 bounds.safe_push (bound); 4492 } 4493 4494 /* Exit early if there is nothing to do. */ 4495 if (!bounds.exists ()) 4496 return; 4497 4498 if (dump_file && (dump_flags & TDF_DETAILS)) 4499 fprintf (dump_file, " Trying to walk loop body to reduce the bound.\n"); 4500 4501 /* Sort the bounds in decreasing order. */ 4502 bounds.qsort (wide_int_cmp)qsort (wide_int_cmp); 4503 4504 /* For every basic block record the lowest bound that is guaranteed to 4505 terminate the loop. */ 4506 4507 hash_map<basic_block, ptrdiff_t> bb_bounds; 4508 for (elt = loop->bounds; elt; elt = elt->next) 4509 { 4510 widest_int bound = elt->bound; 4511 if (!elt->is_exit) 4512 { 4513 bound += 1; 4514 /* If an overflow occurred, ignore the result. */ 4515 if (bound == 0) 4516 continue; 4517 } 4518 4519 if (!loop->any_upper_bound 4520 || wi::ltu_p (bound, loop->nb_iterations_upper_bound)) 4521 { 4522 ptrdiff_t index = bound_index (bounds, bound); 4523 ptrdiff_t *entry = bb_bounds.get (gimple_bb (elt->stmt)); 4524 if (!entry) 4525 bb_bounds.put (gimple_bb (elt->stmt), index); 4526 else if ((ptrdiff_t)*entry > index) 4527 *entry = index; 4528 } 4529 } 4530 4531 hash_map<basic_block, ptrdiff_t> block_priority; 4532 4533 /* Perform shortest path discovery loop->header ... loop->latch. 4534 4535 The "distance" is given by the smallest loop bound of basic block 4536 present in the path and we look for path with largest smallest bound 4537 on it. 4538 4539 To avoid the need for fibonacci heap on double ints we simply compress 4540 double ints into indexes to BOUNDS array and then represent the queue 4541 as arrays of queues for every index. 4542 Index of BOUNDS.length() means that the execution of given BB has 4543 no bounds determined. 4544 4545 VISITED is a pointer map translating basic block into smallest index 4546 it was inserted into the priority queue with. */ 4547 latch_index = -1; 4548 4549 /* Start walk in loop header with index set to infinite bound. */ 4550 queue_index = bounds.length (); 4551 queues.safe_grow_cleared (queue_index + 1, true); 4552 queue.safe_push (loop->header); 4553 queues[queue_index] = queue; 4554 block_priority.put (loop->header, queue_index); 4555 4556 for (; queue_index >= 0; queue_index--) 4557 { 4558 if (latch_index < queue_index) 4559 { 4560 while (queues[queue_index].length ()) 4561 { 4562 basic_block bb; 4563 ptrdiff_t bound_index = queue_index; 4564 edge e; 4565 edge_iterator ei; 4566 4567 queue = queues[queue_index]; 4568 bb = queue.pop (); 4569 4570 /* OK, we later inserted the BB with lower priority, skip it. */ 4571 if (*block_priority.get (bb) > queue_index) 4572 continue; 4573 4574 /* See if we can improve the bound. */ 4575 ptrdiff_t *entry = bb_bounds.get (bb); 4576 if (entry && *entry < bound_index) 4577 bound_index = *entry; 4578 4579 /* Insert succesors into the queue, watch for latch edge 4580 and record greatest index we saw. */ 4581 FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei)
, &(e)); ei_next (&(ei)))
4582 { 4583 bool insert = false; 4584 4585 if (loop_exit_edge_p (loop, e)) 4586 continue; 4587 4588 if (e == loop_latch_edge (loop) 4589 && latch_index < bound_index) 4590 latch_index = bound_index; 4591 else if (!(entry = block_priority.get (e->dest))) 4592 { 4593 insert = true; 4594 block_priority.put (e->dest, bound_index); 4595 } 4596 else if (*entry < bound_index) 4597 { 4598 insert = true; 4599 *entry = bound_index; 4600 } 4601 4602 if (insert) 4603 queues[bound_index].safe_push (e->dest); 4604 } 4605 } 4606 } 4607 queues[queue_index].release (); 4608 } 4609 4610 gcc_assert (latch_index >= 0)((void)(!(latch_index >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4610, __FUNCTION__), 0 : 0))
; 4611 if ((unsigned)latch_index < bounds.length ()) 4612 { 4613 if (dump_file && (dump_flags & TDF_DETAILS)) 4614 { 4615 fprintf (dump_file, "Found better loop bound "); 4616 print_decu (bounds[latch_index], dump_file); 4617 fprintf (dump_file, "\n"); 4618 } 4619 record_niter_bound (loop, bounds[latch_index], false, true); 4620 } 4621 4622 queues.release (); 4623} 4624 4625/* See if every path cross the loop goes through a statement that is known 4626 to not execute at the last iteration. In that case we can decrese iteration 4627 count by 1. */ 4628 4629static void 4630maybe_lower_iteration_bound (class loop *loop) 4631{ 4632 hash_set<gimple *> *not_executed_last_iteration = NULL__null; 4633 class nb_iter_bound *elt; 4634 bool found_exit = false; 4635 auto_vec<basic_block> queue; 4636 bitmap visited; 4637 4638 /* Collect all statements with interesting (i.e. lower than 4639 nb_iterations_upper_bound) bound on them. 4640 4641 TODO: Due to the way record_estimate choose estimates to store, the bounds 4642 will be always nb_iterations_upper_bound-1. We can change this to record 4643 also statements not dominating the loop latch and update the walk bellow 4644 to the shortest path algorithm. */ 4645 for (elt = loop->bounds; elt; elt = elt->next) 4646 { 4647 if (!elt->is_exit 4648 && wi::ltu_p (elt->bound, loop->nb_iterations_upper_bound)) 4649 { 4650 if (!not_executed_last_iteration) 4651 not_executed_last_iteration = new hash_set<gimple *>; 4652 not_executed_last_iteration->add (elt->stmt); 4653 } 4654 } 4655 if (!not_executed_last_iteration) 4656 return; 4657 4658 /* Start DFS walk in the loop header and see if we can reach the 4659 loop latch or any of the exits (including statements with side 4660 effects that may terminate the loop otherwise) without visiting 4661 any of the statements known to have undefined effect on the last 4662 iteration. */ 4663 queue.safe_push (loop->header); 4664 visited = BITMAP_ALLOCbitmap_alloc (NULL__null); 4665 bitmap_set_bit (visited, loop->header->index); 4666 found_exit = false; 4667 4668 do 4669 { 4670 basic_block bb = queue.pop (); 4671 gimple_stmt_iterator gsi; 4672 bool stmt_found = false; 4673 4674 /* Loop for possible exits and statements bounding the execution. */ 4675 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi)) 4676 { 4677 gimple *stmt = gsi_stmt (gsi); 4678 if (not_executed_last_iteration->contains (stmt)) 4679 { 4680 stmt_found = true; 4681 break; 4682 } 4683 if (gimple_has_side_effects (stmt)) 4684 { 4685 found_exit = true; 4686 break; 4687 } 4688 } 4689 if (found_exit) 4690 break; 4691 4692 /* If no bounding statement is found, continue the walk. */ 4693 if (!stmt_found) 4694 { 4695 edge e; 4696 edge_iterator ei; 4697 4698 FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei)
, &(e)); ei_next (&(ei)))
4699 { 4700 if (loop_exit_edge_p (loop, e) 4701 || e == loop_latch_edge (loop)) 4702 { 4703 found_exit = true; 4704 break; 4705 } 4706 if (bitmap_set_bit (visited, e->dest->index)) 4707 queue.safe_push (e->dest); 4708 } 4709 } 4710 } 4711 while (queue.length () && !found_exit); 4712 4713 /* If every path through the loop reach bounding statement before exit, 4714 then we know the last iteration of the loop will have undefined effect 4715 and we can decrease number of iterations. */ 4716 4717 if (!found_exit) 4718 { 4719 if (dump_file && (dump_flags & TDF_DETAILS)) 4720 fprintf (dump_file, "Reducing loop iteration estimate by 1; " 4721 "undefined statement must be executed at the last iteration.\n"); 4722 record_niter_bound (loop, loop->nb_iterations_upper_bound - 1, 4723 false, true); 4724 } 4725 4726 BITMAP_FREE (visited)((void) (bitmap_obstack_free ((bitmap) visited), (visited) = (
bitmap) __null))
; 4727 delete not_executed_last_iteration; 4728} 4729 4730/* Get expected upper bound for number of loop iterations for 4731 BUILT_IN_EXPECT_WITH_PROBABILITY for a condition COND. */ 4732 4733static tree 4734get_upper_bound_based_on_builtin_expr_with_prob (gcond *cond) 4735{ 4736 if (cond == NULL__null) 4737 return NULL_TREE(tree) __null; 4738 4739 tree lhs = gimple_cond_lhs (cond); 4740 if (TREE_CODE (lhs)((enum tree_code) (lhs)->base.code) != SSA_NAME) 4741 return NULL_TREE(tree) __null; 4742 4743 gimple *stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond))(tree_check ((gimple_cond_lhs (cond)), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4743, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
; 4744 gcall *def = dyn_cast<gcall *> (stmt); 4745 if (def == NULL__null) 4746 return NULL_TREE(tree) __null; 4747 4748 tree decl = gimple_call_fndecl (def); 4749 if (!decl 4750 || !fndecl_built_in_p (decl, BUILT_IN_EXPECT_WITH_PROBABILITY) 4751 || gimple_call_num_args (stmt) != 3) 4752 return NULL_TREE(tree) __null; 4753 4754 tree c = gimple_call_arg (def, 1); 4755 tree condt = TREE_TYPE (lhs)((contains_struct_check ((lhs), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4755, __FUNCTION__))->typed.type)
; 4756 tree res = fold_build2 (gimple_cond_code (cond),fold_build2_loc (((location_t) 0), gimple_cond_code (cond), condt
, c, gimple_cond_rhs (cond) )
4757 condt, c,fold_build2_loc (((location_t) 0), gimple_cond_code (cond), condt
, c, gimple_cond_rhs (cond) )
4758 gimple_cond_rhs (cond))fold_build2_loc (((location_t) 0), gimple_cond_code (cond), condt
, c, gimple_cond_rhs (cond) )
; 4759 if (TREE_CODE (res)((enum tree_code) (res)->base.code) != INTEGER_CST) 4760 return NULL_TREE(tree) __null; 4761 4762 4763 tree prob = gimple_call_arg (def, 2); 4764 tree t = TREE_TYPE (prob)((contains_struct_check ((prob), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4764, __FUNCTION__))->typed.type)
; 4765 tree one 4766 = build_real_from_int_cst (t, 4767 integer_one_nodeglobal_trees[TI_INTEGER_ONE]); 4768 if (integer_zerop (res)) 4769 prob = fold_build2 (MINUS_EXPR, t, one, prob)fold_build2_loc (((location_t) 0), MINUS_EXPR, t, one, prob ); 4770 tree r = fold_build2 (RDIV_EXPR, t, one, prob)fold_build2_loc (((location_t) 0), RDIV_EXPR, t, one, prob ); 4771 if (TREE_CODE (r)((enum tree_code) (r)->base.code) != REAL_CST) 4772 return NULL_TREE(tree) __null; 4773 4774 HOST_WIDE_INTlong probi 4775 = real_to_integer (TREE_REAL_CST_PTR (r)(&(tree_check ((r), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4775, __FUNCTION__, (REAL_CST)))->real_cst.value)
); 4776 return build_int_cst (condt, probi); 4777} 4778 4779/* Records estimates on numbers of iterations of LOOP. If USE_UNDEFINED_P 4780 is true also use estimates derived from undefined behavior. */ 4781 4782void 4783estimate_numbers_of_iterations (class loop *loop) 4784{ 4785 tree niter, type; 4786 unsigned i; 4787 class tree_niter_desc niter_desc; 4788 edge ex; 4789 widest_int bound; 4790 edge likely_exit; 4791 4792 /* Give up if we already have tried to compute an estimation. */ 4793 if (loop->estimate_state != EST_NOT_COMPUTED) 4794 return; 4795 4796 if (dump_file && (dump_flags & TDF_DETAILS)) 4797 fprintf (dump_file, "Estimating # of iterations of loop %d\n", loop->num); 4798 4799 loop->estimate_state = EST_AVAILABLE; 4800 4801 /* If we have a measured profile, use it to estimate the number of 4802 iterations. Normally this is recorded by branch_prob right after 4803 reading the profile. In case we however found a new loop, record the 4804 information here. 4805 4806 Explicitly check for profile status so we do not report 4807 wrong prediction hitrates for guessed loop iterations heuristics. 4808 Do not recompute already recorded bounds - we ought to be better on 4809 updating iteration bounds than updating profile in general and thus 4810 recomputing iteration bounds later in the compilation process will just 4811 introduce random roundoff errors. */ 4812 if (!loop->any_estimate 4813 && loop->header->count.reliable_p ()) 4814 { 4815 gcov_type nit = expected_loop_iterations_unbounded (loop); 4816 bound = gcov_type_to_wide_int (nit); 4817 record_niter_bound (loop, bound, true, false); 4818 } 4819 4820 /* Ensure that loop->nb_iterations is computed if possible. If it turns out 4821 to be constant, we avoid undefined behavior implied bounds and instead 4822 diagnose those loops with -Waggressive-loop-optimizations. */ 4823 number_of_latch_executions (loop); 4824 4825 basic_block *body = get_loop_body (loop); 4826 auto_vec<edge> exits = get_loop_exit_edges (loop, body); 4827 likely_exit = single_likely_exit (loop, exits); 4828 FOR_EACH_VEC_ELT (exits, i, ex)for (i = 0; (exits).iterate ((i), &(ex)); ++(i)) 4829 { 4830 if (ex == likely_exit) 4831 { 4832 gimple *stmt = last_stmt (ex->src); 4833 if (stmt != NULL__null) 4834 { 4835 gcond *cond = dyn_cast<gcond *> (stmt); 4836 tree niter_bound 4837 = get_upper_bound_based_on_builtin_expr_with_prob (cond); 4838 if (niter_bound != NULL_TREE(tree) __null) 4839 { 4840 widest_int max = derive_constant_upper_bound (niter_bound); 4841 record_estimate (loop, niter_bound, max, cond, 4842 true, true, false); 4843 } 4844 } 4845 } 4846 4847 if (!number_of_iterations_exit (loop, ex, &niter_desc, 4848 false, false, body)) 4849 continue; 4850 4851 niter = niter_desc.niter; 4852 type = TREE_TYPE (niter)((contains_struct_check ((niter), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 4852, __FUNCTION__))->typed.type)
; 4853 if (TREE_CODE (niter_desc.may_be_zero)((enum tree_code) (niter_desc.may_be_zero)->base.code) != INTEGER_CST) 4854 niter = build3 (COND_EXPR, type, niter_desc.may_be_zero, 4855 build_int_cst (type, 0), 4856 niter); 4857 record_estimate (loop, niter, niter_desc.max, 4858 last_stmt (ex->src), 4859 true, ex == likely_exit, true); 4860 record_control_iv (loop, &niter_desc); 4861 } 4862 4863 if (flag_aggressive_loop_optimizationsglobal_options.x_flag_aggressive_loop_optimizations) 4864 infer_loop_bounds_from_undefined (loop, body); 4865 free (body); 4866 4867 discover_iteration_bound_by_body_walk (loop); 4868 4869 maybe_lower_iteration_bound (loop); 4870 4871 /* If we know the exact number of iterations of this loop, try to 4872 not break code with undefined behavior by not recording smaller 4873 maximum number of iterations. */ 4874 if (loop->nb_iterations 4875 && TREE_CODE (loop->nb_iterations)((enum tree_code) (loop->nb_iterations)->base.code) == INTEGER_CST) 4876 { 4877 loop->any_upper_bound = true; 4878 loop->nb_iterations_upper_bound = wi::to_widest (loop->nb_iterations); 4879 } 4880} 4881 4882/* Sets NIT to the estimated number of executions of the latch of the 4883 LOOP. If CONSERVATIVE is true, we must be sure that NIT is at least as 4884 large as the number of iterations. If we have no reliable estimate, 4885 the function returns false, otherwise returns true. */ 4886 4887bool 4888estimated_loop_iterations (class loop *loop, widest_int *nit) 4889{ 4890 /* When SCEV information is available, try to update loop iterations 4891 estimate. Otherwise just return whatever we recorded earlier. */ 4892 if (scev_initialized_p ()) 4893 estimate_numbers_of_iterations (loop); 4894 4895 return (get_estimated_loop_iterations (loop, nit)); 4896} 4897 4898/* Similar to estimated_loop_iterations, but returns the estimate only 4899 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate 4900 on the number of iterations of LOOP could not be derived, returns -1. */ 4901 4902HOST_WIDE_INTlong 4903estimated_loop_iterations_int (class loop *loop) 4904{ 4905 widest_int nit; 4906 HOST_WIDE_INTlong hwi_nit; 4907 4908 if (!estimated_loop_iterations (loop, &nit)) 4909 return -1; 4910 4911 if (!wi::fits_shwi_p (nit)) 4912 return -1; 4913 hwi_nit = nit.to_shwi (); 4914 4915 return hwi_nit < 0 ? -1 : hwi_nit; 4916} 4917 4918 4919/* Sets NIT to an upper bound for the maximum number of executions of the 4920 latch of the LOOP. If we have no reliable estimate, the function returns 4921 false, otherwise returns true. */ 4922 4923bool 4924max_loop_iterations (class loop *loop, widest_int *nit) 4925{ 4926 /* When SCEV information is available, try to update loop iterations 4927 estimate. Otherwise just return whatever we recorded earlier. */ 4928 if (scev_initialized_p ()) 4929 estimate_numbers_of_iterations (loop); 4930 4931 return get_max_loop_iterations (loop, nit); 4932} 4933 4934/* Similar to max_loop_iterations, but returns the estimate only 4935 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate 4936 on the number of iterations of LOOP could not be derived, returns -1. */ 4937 4938HOST_WIDE_INTlong 4939max_loop_iterations_int (class loop *loop) 4940{ 4941 widest_int nit; 4942 HOST_WIDE_INTlong hwi_nit; 4943 4944 if (!max_loop_iterations (loop, &nit)) 4945 return -1; 4946 4947 if (!wi::fits_shwi_p (nit)) 4948 return -1; 4949 hwi_nit = nit.to_shwi (); 4950 4951 return hwi_nit < 0 ? -1 : hwi_nit; 4952} 4953 4954/* Sets NIT to an likely upper bound for the maximum number of executions of the 4955 latch of the LOOP. If we have no reliable estimate, the function returns 4956 false, otherwise returns true. */ 4957 4958bool 4959likely_max_loop_iterations (class loop *loop, widest_int *nit) 4960{ 4961 /* When SCEV information is available, try to update loop iterations 4962 estimate. Otherwise just return whatever we recorded earlier. */ 4963 if (scev_initialized_p ()) 4964 estimate_numbers_of_iterations (loop); 4965 4966 return get_likely_max_loop_iterations (loop, nit); 4967} 4968 4969/* Similar to max_loop_iterations, but returns the estimate only 4970 if it fits to HOST_WIDE_INT. If this is not the case, or the estimate 4971 on the number of iterations of LOOP could not be derived, returns -1. */ 4972 4973HOST_WIDE_INTlong 4974likely_max_loop_iterations_int (class loop *loop) 4975{ 4976 widest_int nit; 4977 HOST_WIDE_INTlong hwi_nit; 4978 4979 if (!likely_max_loop_iterations (loop, &nit)) 4980 return -1; 4981 4982 if (!wi::fits_shwi_p (nit)) 4983 return -1; 4984 hwi_nit = nit.to_shwi (); 4985 4986 return hwi_nit < 0 ? -1 : hwi_nit; 4987} 4988 4989/* Returns an estimate for the number of executions of statements 4990 in the LOOP. For statements before the loop exit, this exceeds 4991 the number of execution of the latch by one. */ 4992 4993HOST_WIDE_INTlong 4994estimated_stmt_executions_int (class loop *loop) 4995{ 4996 HOST_WIDE_INTlong nit = estimated_loop_iterations_int (loop); 4997 HOST_WIDE_INTlong snit; 4998 4999 if (nit == -1) 5000 return -1; 5001 5002 snit = (HOST_WIDE_INTlong) ((unsigned HOST_WIDE_INTlong) nit + 1); 5003 5004 /* If the computation overflows, return -1. */ 5005 return snit < 0 ? -1 : snit; 5006} 5007 5008/* Sets NIT to the maximum number of executions of the latch of the 5009 LOOP, plus one. If we have no reliable estimate, the function returns 5010 false, otherwise returns true. */ 5011 5012bool 5013max_stmt_executions (class loop *loop, widest_int *nit) 5014{ 5015 widest_int nit_minus_one; 5016 5017 if (!max_loop_iterations (loop, nit)) 5018 return false; 5019 5020 nit_minus_one = *nit; 5021 5022 *nit += 1; 5023 5024 return wi::gtu_p (*nit, nit_minus_one); 5025} 5026 5027/* Sets NIT to the estimated maximum number of executions of the latch of the 5028 LOOP, plus one. If we have no likely estimate, the function returns 5029 false, otherwise returns true. */ 5030 5031bool 5032likely_max_stmt_executions (class loop *loop, widest_int *nit) 5033{ 5034 widest_int nit_minus_one; 5035 5036 if (!likely_max_loop_iterations (loop, nit)) 5037 return false; 5038 5039 nit_minus_one = *nit; 5040 5041 *nit += 1; 5042 5043 return wi::gtu_p (*nit, nit_minus_one); 5044} 5045 5046/* Sets NIT to the estimated number of executions of the latch of the 5047 LOOP, plus one. If we have no reliable estimate, the function returns 5048 false, otherwise returns true. */ 5049 5050bool 5051estimated_stmt_executions (class loop *loop, widest_int *nit) 5052{ 5053 widest_int nit_minus_one; 5054 5055 if (!estimated_loop_iterations (loop, nit)) 5056 return false; 5057 5058 nit_minus_one = *nit; 5059 5060 *nit += 1; 5061 5062 return wi::gtu_p (*nit, nit_minus_one); 5063} 5064 5065/* Records estimates on numbers of iterations of loops. */ 5066 5067void 5068estimate_numbers_of_iterations (function *fn) 5069{ 5070 /* We don't want to issue signed overflow warnings while getting 5071 loop iteration estimates. */ 5072 fold_defer_overflow_warnings (); 5073 5074 for (auto loop : loops_list (fn, 0)) 5075 estimate_numbers_of_iterations (loop); 5076 5077 fold_undefer_and_ignore_overflow_warnings (); 5078} 5079 5080/* Returns true if statement S1 dominates statement S2. */ 5081 5082bool 5083stmt_dominates_stmt_p (gimple *s1, gimple *s2) 5084{ 5085 basic_block bb1 = gimple_bb (s1), bb2 = gimple_bb (s2); 5086 5087 if (!bb1 5088 || s1 == s2) 5089 return true; 5090 5091 if (bb1 == bb2) 5092 { 5093 gimple_stmt_iterator bsi; 5094 5095 if (gimple_code (s2) == GIMPLE_PHI) 5096 return false; 5097 5098 if (gimple_code (s1) == GIMPLE_PHI) 5099 return true; 5100 5101 for (bsi = gsi_start_bb (bb1); gsi_stmt (bsi) != s2; gsi_next (&bsi)) 5102 if (gsi_stmt (bsi) == s1) 5103 return true; 5104 5105 return false; 5106 } 5107 5108 return dominated_by_p (CDI_DOMINATORS, bb2, bb1); 5109} 5110 5111/* Returns true when we can prove that the number of executions of 5112 STMT in the loop is at most NITER, according to the bound on 5113 the number of executions of the statement NITER_BOUND->stmt recorded in 5114 NITER_BOUND and fact that NITER_BOUND->stmt dominate STMT. 5115 5116 ??? This code can become quite a CPU hog - we can have many bounds, 5117 and large basic block forcing stmt_dominates_stmt_p to be queried 5118 many times on a large basic blocks, so the whole thing is O(n^2) 5119 for scev_probably_wraps_p invocation (that can be done n times). 5120 5121 It would make more sense (and give better answers) to remember BB 5122 bounds computed by discover_iteration_bound_by_body_walk. */ 5123 5124static bool 5125n_of_executions_at_most (gimple *stmt, 5126 class nb_iter_bound *niter_bound, 5127 tree niter) 5128{ 5129 widest_int bound = niter_bound->bound; 5130 tree nit_type = TREE_TYPE (niter)((contains_struct_check ((niter), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5130, __FUNCTION__))->typed.type)
, e; 5131 enum tree_code cmp; 5132 5133 gcc_assert (TYPE_UNSIGNED (nit_type))((void)(!(((tree_class_check ((nit_type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5133, __FUNCTION__))->base.u.bits.unsigned_flag)) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5133, __FUNCTION__), 0 : 0))
; 5134 5135 /* If the bound does not even fit into NIT_TYPE, it cannot tell us that 5136 the number of iterations is small. */ 5137 if (!wi::fits_to_tree_p (bound, nit_type)) 5138 return false; 5139 5140 /* We know that NITER_BOUND->stmt is executed at most NITER_BOUND->bound + 1 5141 times. This means that: 5142 5143 -- if NITER_BOUND->is_exit is true, then everything after 5144 it at most NITER_BOUND->bound times. 5145 5146 -- If NITER_BOUND->is_exit is false, then if we can prove that when STMT 5147 is executed, then NITER_BOUND->stmt is executed as well in the same 5148 iteration then STMT is executed at most NITER_BOUND->bound + 1 times. 5149 5150 If we can determine that NITER_BOUND->stmt is always executed 5151 after STMT, then STMT is executed at most NITER_BOUND->bound + 2 times. 5152 We conclude that if both statements belong to the same 5153 basic block and STMT is before NITER_BOUND->stmt and there are no 5154 statements with side effects in between. */ 5155 5156 if (niter_bound->is_exit) 5157 { 5158 if (stmt == niter_bound->stmt 5159 || !stmt_dominates_stmt_p (niter_bound->stmt, stmt)) 5160 return false; 5161 cmp = GE_EXPR; 5162 } 5163 else 5164 { 5165 if (!stmt_dominates_stmt_p (niter_bound->stmt, stmt)) 5166 { 5167 gimple_stmt_iterator bsi; 5168 if (gimple_bb (stmt) != gimple_bb (niter_bound->stmt) 5169 || gimple_code (stmt) == GIMPLE_PHI 5170 || gimple_code (niter_bound->stmt) == GIMPLE_PHI) 5171 return false; 5172 5173 /* By stmt_dominates_stmt_p we already know that STMT appears 5174 before NITER_BOUND->STMT. Still need to test that the loop 5175 cannot be terinated by a side effect in between. */ 5176 for (bsi = gsi_for_stmt (stmt); gsi_stmt (bsi) != niter_bound->stmt; 5177 gsi_next (&bsi)) 5178 if (gimple_has_side_effects (gsi_stmt (bsi))) 5179 return false; 5180 bound += 1; 5181 if (bound == 0 5182 || !wi::fits_to_tree_p (bound, nit_type)) 5183 return false; 5184 } 5185 cmp = GT_EXPR; 5186 } 5187 5188 e = fold_binary (cmp, boolean_type_node,fold_binary_loc (((location_t) 0), cmp, global_trees[TI_BOOLEAN_TYPE
], niter, wide_int_to_tree (nit_type, bound))
5189 niter, wide_int_to_tree (nit_type, bound))fold_binary_loc (((location_t) 0), cmp, global_trees[TI_BOOLEAN_TYPE
], niter, wide_int_to_tree (nit_type, bound))
; 5190 return e && integer_nonzerop (e); 5191} 5192 5193/* Returns true if the arithmetics in TYPE can be assumed not to wrap. */ 5194 5195bool 5196nowrap_type_p (tree type) 5197{ 5198 if (ANY_INTEGRAL_TYPE_P (type)((((enum tree_code) (type)->base.code) == ENUMERAL_TYPE ||
((enum tree_code) (type)->base.code) == BOOLEAN_TYPE || (
(enum tree_code) (type)->base.code) == INTEGER_TYPE) || ((
((enum tree_code) (type)->base.code) == COMPLEX_TYPE || ((
(enum tree_code) (type)->base.code) == VECTOR_TYPE)) &&
(((enum tree_code) (((contains_struct_check ((type), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5198, __FUNCTION__))->typed.type))->base.code) == ENUMERAL_TYPE
|| ((enum tree_code) (((contains_struct_check ((type), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5198, __FUNCTION__))->typed.type))->base.code) == BOOLEAN_TYPE
|| ((enum tree_code) (((contains_struct_check ((type), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5198, __FUNCTION__))->typed.type))->base.code) == INTEGER_TYPE
)))
5199 && TYPE_OVERFLOW_UNDEFINED (type)((((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE) ? !
global_options.x_flag_wrapv_pointer : (!(any_integral_type_check
((type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5199, __FUNCTION__))->base.u.bits.unsigned_flag &&
!global_options.x_flag_wrapv && !global_options.x_flag_trapv
))
) 5200 return true; 5201 5202 if (POINTER_TYPE_P (type)(((enum tree_code) (type)->base.code) == POINTER_TYPE || (
(enum tree_code) (type)->base.code) == REFERENCE_TYPE)
) 5203 return true; 5204 5205 return false; 5206} 5207 5208/* Return true if we can prove LOOP is exited before evolution of induction 5209 variable {BASE, STEP} overflows with respect to its type bound. */ 5210 5211static bool 5212loop_exits_before_overflow (tree base, tree step, 5213 gimple *at_stmt, class loop *loop) 5214{ 5215 widest_int niter; 5216 struct control_iv *civ; 5217 class nb_iter_bound *bound; 5218 tree e, delta, step_abs, unsigned_base; 5219 tree type = TREE_TYPE (step)((contains_struct_check ((step), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5219, __FUNCTION__))->typed.type)
; 5220 tree unsigned_type, valid_niter; 5221 5222 /* Don't issue signed overflow warnings. */ 5223 fold_defer_overflow_warnings (); 5224 5225 /* Compute the number of iterations before we reach the bound of the 5226 type, and verify that the loop is exited before this occurs. */ 5227 unsigned_type = unsigned_type_for (type); 5228 unsigned_base = fold_convert (unsigned_type, base)fold_convert_loc (((location_t) 0), unsigned_type, base); 5229 5230 if (tree_int_cst_sign_bit (step)) 5231 { 5232 tree extreme = fold_convert (unsigned_type,fold_convert_loc (((location_t) 0), unsigned_type, lower_bound_in_type
(type, type))
5233 lower_bound_in_type (type, type))fold_convert_loc (((location_t) 0), unsigned_type, lower_bound_in_type
(type, type))
; 5234 delta = fold_build2 (MINUS_EXPR, unsigned_type, unsigned_base, extreme)fold_build2_loc (((location_t) 0), MINUS_EXPR, unsigned_type,
unsigned_base, extreme )
; 5235 step_abs = fold_build1 (NEGATE_EXPR, unsigned_type,fold_build1_loc (((location_t) 0), NEGATE_EXPR, unsigned_type
, fold_convert_loc (((location_t) 0), unsigned_type, step) )
5236 fold_convert (unsigned_type, step))fold_build1_loc (((location_t) 0), NEGATE_EXPR, unsigned_type
, fold_convert_loc (((location_t) 0), unsigned_type, step) )
; 5237 } 5238 else 5239 { 5240 tree extreme = fold_convert (unsigned_type,fold_convert_loc (((location_t) 0), unsigned_type, upper_bound_in_type
(type, type))
5241 upper_bound_in_type (type, type))fold_convert_loc (((location_t) 0), unsigned_type, upper_bound_in_type
(type, type))
; 5242 delta = fold_build2 (MINUS_EXPR, unsigned_type, extreme, unsigned_base)fold_build2_loc (((location_t) 0), MINUS_EXPR, unsigned_type,
extreme, unsigned_base )
; 5243 step_abs = fold_convert (unsigned_type, step)fold_convert_loc (((location_t) 0), unsigned_type, step); 5244 } 5245 5246 valid_niter = fold_build2 (FLOOR_DIV_EXPR, unsigned_type, delta, step_abs)fold_build2_loc (((location_t) 0), FLOOR_DIV_EXPR, unsigned_type
, delta, step_abs )
; 5247 5248 estimate_numbers_of_iterations (loop); 5249 5250 if (max_loop_iterations (loop, &niter) 5251 && wi::fits_to_tree_p (niter, TREE_TYPE (valid_niter)((contains_struct_check ((valid_niter), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5251, __FUNCTION__))->typed.type)
) 5252 && (e = fold_binary (GT_EXPR, boolean_type_node, valid_niter,fold_binary_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], valid_niter, wide_int_to_tree (((contains_struct_check ((valid_niter
), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5253, __FUNCTION__))->typed.type), niter))
5253 wide_int_to_tree (TREE_TYPE (valid_niter),fold_binary_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], valid_niter, wide_int_to_tree (((contains_struct_check ((valid_niter
), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5253, __FUNCTION__))->typed.type), niter))
5254 niter))fold_binary_loc (((location_t) 0), GT_EXPR, global_trees[TI_BOOLEAN_TYPE
], valid_niter, wide_int_to_tree (((contains_struct_check ((valid_niter
), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5253, __FUNCTION__))->typed.type), niter))
) != NULL__null 5255 && integer_nonzerop (e)) 5256 { 5257 fold_undefer_and_ignore_overflow_warnings (); 5258 return true; 5259 } 5260 if (at_stmt) 5261 for (bound = loop->bounds; bound; bound = bound->next) 5262 { 5263 if (n_of_executions_at_most (at_stmt, bound, valid_niter)) 5264 { 5265 fold_undefer_and_ignore_overflow_warnings (); 5266 return true; 5267 } 5268 } 5269 fold_undefer_and_ignore_overflow_warnings (); 5270 5271 /* Try to prove loop is exited before {base, step} overflows with the 5272 help of analyzed loop control IV. This is done only for IVs with 5273 constant step because otherwise we don't have the information. */ 5274 if (TREE_CODE (step)((enum tree_code) (step)->base.code) == INTEGER_CST) 5275 { 5276 for (civ = loop->control_ivs; civ; civ = civ->next) 5277 { 5278 enum tree_code code; 5279 tree civ_type = TREE_TYPE (civ->step)((contains_struct_check ((civ->step), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5279, __FUNCTION__))->typed.type)
; 5280 5281 /* Have to consider type difference because operand_equal_p ignores 5282 that for constants. */ 5283 if (TYPE_UNSIGNED (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5283, __FUNCTION__))->base.u.bits.unsigned_flag)
!= TYPE_UNSIGNED (civ_type)((tree_class_check ((civ_type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5283, __FUNCTION__))->base.u.bits.unsigned_flag)
5284 || element_precision (type) != element_precision (civ_type)) 5285 continue; 5286 5287 /* Only consider control IV with same step. */ 5288 if (!operand_equal_p (step, civ->step, 0)) 5289 continue; 5290 5291 /* Done proving if this is a no-overflow control IV. */ 5292 if (operand_equal_p (base, civ->base, 0)) 5293 return true; 5294 5295 /* Control IV is recorded after expanding simple operations, 5296 Here we expand base and compare it too. */ 5297 tree expanded_base = expand_simple_operations (base); 5298 if (operand_equal_p (expanded_base, civ->base, 0)) 5299 return true; 5300 5301 /* If this is a before stepping control IV, in other words, we have 5302 5303 {civ_base, step} = {base + step, step} 5304 5305 Because civ {base + step, step} doesn't overflow during loop 5306 iterations, {base, step} will not overflow if we can prove the 5307 operation "base + step" does not overflow. Specifically, we try 5308 to prove below conditions are satisfied: 5309 5310 base <= UPPER_BOUND (type) - step ;;step > 0 5311 base >= LOWER_BOUND (type) - step ;;step < 0 5312 5313 by proving the reverse conditions are false using loop's initial 5314 condition. */ 5315 if (POINTER_TYPE_P (TREE_TYPE (base))(((enum tree_code) (((contains_struct_check ((base), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5315, __FUNCTION__))->typed.type))->base.code) == POINTER_TYPE
|| ((enum tree_code) (((contains_struct_check ((base), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5315, __FUNCTION__))->typed.type))->base.code) == REFERENCE_TYPE
)
) 5316 code = POINTER_PLUS_EXPR; 5317 else 5318 code = PLUS_EXPR; 5319 5320 tree stepped = fold_build2 (code, TREE_TYPE (base), base, step)fold_build2_loc (((location_t) 0), code, ((contains_struct_check
((base), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5320, __FUNCTION__))->typed.type), base, step )
; 5321 tree expanded_stepped = fold_build2 (code, TREE_TYPE (base),fold_build2_loc (((location_t) 0), code, ((contains_struct_check
((base), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5321, __FUNCTION__))->typed.type), expanded_base, step )
5322 expanded_base, step)fold_build2_loc (((location_t) 0), code, ((contains_struct_check
((base), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5321, __FUNCTION__))->typed.type), expanded_base, step )
; 5323 if (operand_equal_p (stepped, civ->base, 0) 5324 || operand_equal_p (expanded_stepped, civ->base, 0)) 5325 { 5326 tree extreme; 5327 5328 if (tree_int_cst_sign_bit (step)) 5329 { 5330 code = LT_EXPR; 5331 extreme = lower_bound_in_type (type, type); 5332 } 5333 else 5334 { 5335 code = GT_EXPR; 5336 extreme = upper_bound_in_type (type, type); 5337 } 5338 extreme = fold_build2 (MINUS_EXPR, type, extreme, step)fold_build2_loc (((location_t) 0), MINUS_EXPR, type, extreme,
step )
; 5339 e = fold_build2 (code, boolean_type_node, base, extreme)fold_build2_loc (((location_t) 0), code, global_trees[TI_BOOLEAN_TYPE
], base, extreme )
; 5340 e = simplify_using_initial_conditions (loop, e); 5341 if (integer_zerop (e)) 5342 return true; 5343 } 5344 } 5345 } 5346 5347 return false; 5348} 5349 5350/* VAR is scev variable whose evolution part is constant STEP, this function 5351 proves that VAR can't overflow by using value range info. If VAR's value 5352 range is [MIN, MAX], it can be proven by: 5353 MAX + step doesn't overflow ; if step > 0 5354 or 5355 MIN + step doesn't underflow ; if step < 0. 5356 5357 We can only do this if var is computed in every loop iteration, i.e, var's 5358 definition has to dominate loop latch. Consider below example: 5359 5360 { 5361 unsigned int i; 5362 5363 <bb 3>: 5364 5365 <bb 4>: 5366 # RANGE [0, 4294967294] NONZERO 65535 5367 # i_21 = PHI <0(3), i_18(9)> 5368 if (i_21 != 0) 5369 goto <bb 6>; 5370 else 5371 goto <bb 8>; 5372 5373 <bb 6>: 5374 # RANGE [0, 65533] NONZERO 65535 5375 _6 = i_21 + 4294967295; 5376 # RANGE [0, 65533] NONZERO 65535 5377 _7 = (long unsigned int) _6; 5378 # RANGE [0, 524264] NONZERO 524280 5379 _8 = _7 * 8; 5380 # PT = nonlocal escaped 5381 _9 = a_14 + _8; 5382 *_9 = 0; 5383 5384 <bb 8>: 5385 # RANGE [1, 65535] NONZERO 65535 5386 i_18 = i_21 + 1; 5387 if (i_18 >= 65535) 5388 goto <bb 10>; 5389 else 5390 goto <bb 9>; 5391 5392 <bb 9>: 5393 goto <bb 4>; 5394 5395 <bb 10>: 5396 return; 5397 } 5398 5399 VAR _6 doesn't overflow only with pre-condition (i_21 != 0), here we 5400 can't use _6 to prove no-overlfow for _7. In fact, var _7 takes value 5401 sequence (4294967295, 0, 1, ..., 65533) in loop life time, rather than 5402 (4294967295, 4294967296, ...). */ 5403 5404static bool 5405scev_var_range_cant_overflow (tree var, tree step, class loop *loop) 5406{ 5407 tree type; 5408 wide_int minv, maxv, diff, step_wi; 5409 5410 if (TREE_CODE (step)((enum tree_code) (step)->base.code) != INTEGER_CST || !INTEGRAL_TYPE_P (TREE_TYPE (var))(((enum tree_code) (((contains_struct_check ((var), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5410, __FUNCTION__))->typed.type))->base.code) == ENUMERAL_TYPE
|| ((enum tree_code) (((contains_struct_check ((var), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5410, __FUNCTION__))->typed.type))->base.code) == BOOLEAN_TYPE
|| ((enum tree_code) (((contains_struct_check ((var), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5410, __FUNCTION__))->typed.type))->base.code) == INTEGER_TYPE
)
) 5411 return false; 5412 5413 /* Check if VAR evaluates in every loop iteration. It's not the case 5414 if VAR is default definition or does not dominate loop's latch. */ 5415 basic_block def_bb = gimple_bb (SSA_NAME_DEF_STMT (var)(tree_check ((var), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5415, __FUNCTION__, (SSA_NAME)))->ssa_name.def_stmt
); 5416 if (!def_bb || !dominated_by_p (CDI_DOMINATORS, loop->latch, def_bb)) 5417 return false; 5418 5419 Value_Range r (TREE_TYPE (var)((contains_struct_check ((var), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5419, __FUNCTION__))->typed.type)
); 5420 get_range_query (cfun(cfun + 0))->range_of_expr (r, var); 5421 if (r.kind () != VR_RANGE) 5422 return false; 5423 5424 /* VAR is a scev whose evolution part is STEP and value range info 5425 is [MIN, MAX], we can prove its no-overflowness by conditions: 5426 5427 type_MAX - MAX >= step ; if step > 0 5428 MIN - type_MIN >= |step| ; if step < 0. 5429 5430 Or VAR must take value outside of value range, which is not true. */ 5431 step_wi = wi::to_wide (step); 5432 type = TREE_TYPE (var)((contains_struct_check ((var), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/tree-ssa-loop-niter.cc"
, 5432, __FUNCTION__))->typed.type)
; 5433 if (tree_int_cst_sign_bit (step)) 5434 { 5435 diff = r.lower_bound () - wi::to_wide (lower_bound_in_type (type, type)); 5436 step_wi = - step_wi; 5437 } 5438 else 5439 diff = wi::to_wide (upper_bound_in_type (type, type)) - r.upper_bound (); 5440 5441 return (wi::geu_p (diff, step_wi)); 5442} 5443 5444/* Return false only when the induction variable BASE + STEP * I is 5445 known to not overflow: i.e. when the number of iterations is small 5446 enough with respect to the step and initial condition in order to 5447 keep the evolution confined in TYPEs bounds. Return true when the 5448 iv is known to overflow or when the property is not computable. 5449 5450 USE_OVERFLOW_SEMANTICS is true if this function should assume that 5451 the rules for overflow of the given language apply (e.g., that signed 5452 arithmetics in C does not overflow). 5453 5454 If VAR is a ssa variable, this function also returns false if VAR can 5455 be proven not overflow with value range info. */ 5456 5457bool 5458scev_probably_wraps_p (tree var, tree base, tree step, 5459 gimple *at_stmt, class loop *loop, 5460 bool use_overflow_semantics) 5461{ 5462 /* FIXME: We really need something like 5463 http://gcc.gnu.org/ml/gcc-patches/2005-06/msg02025.html. 5464 5465 We used to test for the following situation that frequently appears 5466 during address arithmetics: 5467 5468 D.1621_13 = (long unsigned intD.4) D.1620_12; 5469 D.1622_14 = D.1621_13 * 8; 5470 D.1623_15 = (doubleD.29 *) D.1622_14; 5471 5472 And derived that the sequence corresponding to D_14 5473 can be proved to not wrap because it is used for computing a 5474 memory access; however, this is not really the case -- for example, 5475 if D_12 = (unsigned char) [254,+,1], then D_14 has values 5476 2032, 2040, 0, 8, ..., but the code is still legal. */ 5477 5478 if (chrec_contains_undetermined (base) 5479 || chrec_contains_undetermined (step)) 5480 return true; 5481 5482 if (integer_zerop (step)) 5483 return false; 5484 5485 /* If we can use the fact that signed and pointer arithmetics does not 5486 wrap, we are done. */ 5487 if