Bug Summary

File:build/gcc/machmode.h
Warning:line 422, column 60
Undefined or garbage value returned to caller

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-suse-linux -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name loop-iv.cc -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model static -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -resource-dir /usr/lib64/clang/15.0.7 -D IN_GCC -D HAVE_CONFIG_H -I . -I . -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/. -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../include -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcpp/include -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcody -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber/bid -I ../libdecnumber -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libbacktrace -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13 -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13/x86_64-suse-linux -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13/backward -internal-isystem /usr/lib64/clang/15.0.7/include -internal-isystem /usr/local/include -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../x86_64-suse-linux/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-narrowing -Wwrite-strings -Wno-long-long -Wno-variadic-macros -Wno-overlength-strings -fdeprecated-macro -fdebug-compilation-dir=/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -ferror-limit 19 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=plist-html -analyzer-config silence-checkers=core.NullDereference -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /buildworker/marxinbox-gcc-clang-static-analyzer/objdir/clang-static-analyzer/2023-03-27-141847-20772-1/report-XW2GA9.plist -x c++ /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc

/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc

1/* Rtl-level induction variable analysis.
2 Copyright (C) 2004-2023 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it
7under the terms of the GNU General Public License as published by the
8Free Software Foundation; either version 3, or (at your option) any
9later version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT
12ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20/* This is a simple analysis of induction variables of the loop. The major use
21 is for determining the number of iterations of a loop for loop unrolling,
22 doloop optimization and branch prediction. The iv information is computed
23 on demand.
24
25 Induction variables are analyzed by walking the use-def chains. When
26 a basic induction variable (biv) is found, it is cached in the bivs
27 hash table. When register is proved to be a biv, its description
28 is stored to DF_REF_DATA of the def reference.
29
30 The analysis works always with one loop -- you must call
31 iv_analysis_loop_init (loop) for it. All the other functions then work with
32 this loop. When you need to work with another loop, just call
33 iv_analysis_loop_init for it. When you no longer need iv analysis, call
34 iv_analysis_done () to clean up the memory.
35
36 The available functions are:
37
38 iv_analyze (insn, mode, reg, iv): Stores the description of the induction
39 variable corresponding to the use of register REG in INSN to IV, given
40 that REG has mode MODE. Returns true if REG is an induction variable
41 in INSN. false otherwise. If a use of REG is not found in INSN,
42 the following insns are scanned (so that we may call this function
43 on insns returned by get_condition).
44 iv_analyze_result (insn, def, iv): Stores to IV the description of the iv
45 corresponding to DEF, which is a register defined in INSN.
46 iv_analyze_expr (insn, mode, expr, iv): Stores to IV the description of iv
47 corresponding to expression EXPR evaluated at INSN. All registers used by
48 EXPR must also be used in INSN. MODE is the mode of EXPR.
49*/
50
51#include "config.h"
52#include "system.h"
53#include "coretypes.h"
54#include "backend.h"
55#include "rtl.h"
56#include "df.h"
57#include "memmodel.h"
58#include "emit-rtl.h"
59#include "diagnostic-core.h"
60#include "cfgloop.h"
61#include "intl.h"
62#include "dumpfile.h"
63#include "rtl-iter.h"
64#include "tree-ssa-loop-niter.h"
65#include "regs.h"
66#include "function-abi.h"
67
68/* Possible return values of iv_get_reaching_def. */
69
70enum iv_grd_result
71{
72 /* More than one reaching def, or reaching def that does not
73 dominate the use. */
74 GRD_INVALID,
75
76 /* The use is trivial invariant of the loop, i.e. is not changed
77 inside the loop. */
78 GRD_INVARIANT,
79
80 /* The use is reached by initial value and a value from the
81 previous iteration. */
82 GRD_MAYBE_BIV,
83
84 /* The use has single dominating def. */
85 GRD_SINGLE_DOM
86};
87
88/* Information about a biv. */
89
90class biv_entry
91{
92public:
93 unsigned regno; /* The register of the biv. */
94 class rtx_iv iv; /* Value of the biv. */
95};
96
97static bool clean_slate = true;
98
99static unsigned int iv_ref_table_size = 0;
100
101/* Table of rtx_ivs indexed by the df_ref uid field. */
102static class rtx_iv ** iv_ref_table;
103
104/* Induction variable stored at the reference. */
105#define DF_REF_IV(REF)iv_ref_table[((REF)->base.id)] iv_ref_table[DF_REF_ID (REF)((REF)->base.id)]
106#define DF_REF_IV_SET(REF, IV)iv_ref_table[((REF)->base.id)] = (IV) iv_ref_table[DF_REF_ID (REF)((REF)->base.id)] = (IV)
107
108/* The current loop. */
109
110static class loop *current_loop;
111
112/* Hashtable helper. */
113
114struct biv_entry_hasher : free_ptr_hash <biv_entry>
115{
116 typedef rtx_def *compare_type;
117 static inline hashval_t hash (const biv_entry *);
118 static inline bool equal (const biv_entry *, const rtx_def *);
119};
120
121/* Returns hash value for biv B. */
122
123inline hashval_t
124biv_entry_hasher::hash (const biv_entry *b)
125{
126 return b->regno;
127}
128
129/* Compares biv B and register R. */
130
131inline bool
132biv_entry_hasher::equal (const biv_entry *b, const rtx_def *r)
133{
134 return b->regno == REGNO (r)(rhs_regno(r));
135}
136
137/* Bivs of the current loop. */
138
139static hash_table<biv_entry_hasher> *bivs;
140
141static bool iv_analyze_op (rtx_insn *, scalar_int_mode, rtx, class rtx_iv *);
142
143/* Return the RTX code corresponding to the IV extend code EXTEND. */
144static inline enum rtx_code
145iv_extend_to_rtx_code (enum iv_extend_code extend)
146{
147 switch (extend)
148 {
149 case IV_SIGN_EXTEND:
150 return SIGN_EXTEND;
151 case IV_ZERO_EXTEND:
152 return ZERO_EXTEND;
153 case IV_UNKNOWN_EXTEND:
154 return UNKNOWN;
155 }
156 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 156, __FUNCTION__))
;
157}
158
159/* Dumps information about IV to FILE. */
160
161extern void dump_iv_info (FILE *, class rtx_iv *);
162void
163dump_iv_info (FILE *file, class rtx_iv *iv)
164{
165 if (!iv->base)
166 {
167 fprintf (file, "not simple");
168 return;
169 }
170
171 if (iv->step == const0_rtx(const_int_rtx[64])
172 && !iv->first_special)
173 fprintf (file, "invariant ");
174
175 print_rtl (file, iv->base);
176 if (iv->step != const0_rtx(const_int_rtx[64]))
177 {
178 fprintf (file, " + ");
179 print_rtl (file, iv->step);
180 fprintf (file, " * iteration");
181 }
182 fprintf (file, " (in %s)", GET_MODE_NAME (iv->mode)mode_name[iv->mode]);
183
184 if (iv->mode != iv->extend_mode)
185 fprintf (file, " %s to %s",
186 rtx_name[iv_extend_to_rtx_code (iv->extend)],
187 GET_MODE_NAME (iv->extend_mode)mode_name[iv->extend_mode]);
188
189 if (iv->mult != const1_rtx(const_int_rtx[64 +1]))
190 {
191 fprintf (file, " * ");
192 print_rtl (file, iv->mult);
193 }
194 if (iv->delta != const0_rtx(const_int_rtx[64]))
195 {
196 fprintf (file, " + ");
197 print_rtl (file, iv->delta);
198 }
199 if (iv->first_special)
200 fprintf (file, " (first special)");
201}
202
203static void
204check_iv_ref_table_size (void)
205{
206 if (iv_ref_table_size < DF_DEFS_TABLE_SIZE ()(df->def_info.table_size))
207 {
208 unsigned int new_size = DF_DEFS_TABLE_SIZE ()(df->def_info.table_size) + (DF_DEFS_TABLE_SIZE ()(df->def_info.table_size) / 4);
209 iv_ref_table = XRESIZEVEC (class rtx_iv *, iv_ref_table, new_size)((class rtx_iv * *) xrealloc ((void *) (iv_ref_table), sizeof
(class rtx_iv *) * (new_size)))
;
210 memset (&iv_ref_table[iv_ref_table_size], 0,
211 (new_size - iv_ref_table_size) * sizeof (class rtx_iv *));
212 iv_ref_table_size = new_size;
213 }
214}
215
216
217/* Checks whether REG is a well-behaved register. */
218
219static bool
220simple_reg_p (rtx reg)
221{
222 unsigned r;
223
224 if (GET_CODE (reg)((enum rtx_code) (reg)->code) == SUBREG)
225 {
226 if (!subreg_lowpart_p (reg))
227 return false;
228 reg = SUBREG_REG (reg)(((reg)->u.fld[0]).rt_rtx);
229 }
230
231 if (!REG_P (reg)(((enum rtx_code) (reg)->code) == REG))
232 return false;
233
234 r = REGNO (reg)(rhs_regno(reg));
235 if (HARD_REGISTER_NUM_P (r)((r) < 76))
236 return false;
237
238 if (GET_MODE_CLASS (GET_MODE (reg))((enum mode_class) mode_class[((machine_mode) (reg)->mode)
])
!= MODE_INT)
239 return false;
240
241 return true;
242}
243
244/* Clears the information about ivs stored in df. */
245
246static void
247clear_iv_info (void)
248{
249 unsigned i, n_defs = DF_DEFS_TABLE_SIZE ()(df->def_info.table_size);
250 class rtx_iv *iv;
251
252 check_iv_ref_table_size ();
253 for (i = 0; i < n_defs; i++)
254 {
255 iv = iv_ref_table[i];
256 if (iv)
257 {
258 free (iv);
259 iv_ref_table[i] = NULL__null;
260 }
261 }
262
263 bivs->empty ();
264}
265
266
267/* Prepare the data for an induction variable analysis of a LOOP. */
268
269void
270iv_analysis_loop_init (class loop *loop)
271{
272 current_loop = loop;
273
274 /* Clear the information from the analysis of the previous loop. */
275 if (clean_slate)
276 {
277 df_set_flags (DF_EQ_NOTES + DF_DEFER_INSN_RESCAN);
278 bivs = new hash_table<biv_entry_hasher> (10);
279 clean_slate = false;
280 }
281 else
282 clear_iv_info ();
283
284 /* Get rid of the ud chains before processing the rescans. Then add
285 the problem back. */
286 df_remove_problem (df_chain(df->problems_by_index[DF_CHAIN]));
287 df_process_deferred_rescans ();
288 df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
289 df_chain_add_problem (DF_UD_CHAIN);
290 df_note_add_problem ();
291 df_analyze_loop (loop);
292 if (dump_file)
293 df_dump_region (dump_file);
294
295 check_iv_ref_table_size ();
296}
297
298/* Finds the definition of REG that dominates loop latch and stores
299 it to DEF. Returns false if there is not a single definition
300 dominating the latch. If REG has no definition in loop, DEF
301 is set to NULL and true is returned. */
302
303static bool
304latch_dominating_def (rtx reg, df_ref *def)
305{
306 df_ref single_rd = NULL__null, adef;
307 unsigned regno = REGNO (reg)(rhs_regno(reg));
308 class df_rd_bb_info *bb_info = DF_RD_BB_INFO (current_loop->latch)(df_rd_get_bb_info ((current_loop->latch)->index));
309
310 for (adef = DF_REG_DEF_CHAIN (regno)(df->def_regs[(regno)]->reg_chain); adef; adef = DF_REF_NEXT_REG (adef)((adef)->base.next_reg))
311 {
312 if (!bitmap_bit_p (df->blocks_to_analyze, DF_REF_BBNO (adef)(((((adef)->base.cl) == DF_REF_ARTIFICIAL) ? (adef)->artificial_ref
.bb : BLOCK_FOR_INSN (((adef)->base.insn_info->insn)))->
index)
)
313 || !bitmap_bit_p (&bb_info->out, DF_REF_ID (adef)((adef)->base.id)))
314 continue;
315
316 /* More than one reaching definition. */
317 if (single_rd)
318 return false;
319
320 if (!just_once_each_iteration_p (current_loop, DF_REF_BB (adef)((((adef)->base.cl) == DF_REF_ARTIFICIAL) ? (adef)->artificial_ref
.bb : BLOCK_FOR_INSN (((adef)->base.insn_info->insn)))
))
321 return false;
322
323 single_rd = adef;
324 }
325
326 *def = single_rd;
327 return true;
328}
329
330/* Gets definition of REG reaching its use in INSN and stores it to DEF. */
331
332static enum iv_grd_result
333iv_get_reaching_def (rtx_insn *insn, rtx reg, df_ref *def)
334{
335 df_ref use, adef;
336 basic_block def_bb, use_bb;
337 rtx_insn *def_insn;
338 bool dom_p;
339
340 *def = NULL__null;
341 if (!simple_reg_p (reg))
342 return GRD_INVALID;
343 if (GET_CODE (reg)((enum rtx_code) (reg)->code) == SUBREG)
344 reg = SUBREG_REG (reg)(((reg)->u.fld[0]).rt_rtx);
345 gcc_assert (REG_P (reg))((void)(!((((enum rtx_code) (reg)->code) == REG)) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 345, __FUNCTION__), 0 : 0))
;
346
347 use = df_find_use (insn, reg);
348 gcc_assert (use != NULL)((void)(!(use != __null) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 348, __FUNCTION__), 0 : 0))
;
349
350 if (!DF_REF_CHAIN (use)((use)->base.chain))
351 return GRD_INVARIANT;
352
353 /* More than one reaching def. */
354 if (DF_REF_CHAIN (use)((use)->base.chain)->next)
355 return GRD_INVALID;
356
357 adef = DF_REF_CHAIN (use)((use)->base.chain)->ref;
358
359 /* We do not handle setting only part of the register. */
360 if (DF_REF_FLAGS (adef)((adef)->base.flags) & DF_REF_READ_WRITE)
361 return GRD_INVALID;
362
363 def_insn = DF_REF_INSN (adef)((adef)->base.insn_info->insn);
364 def_bb = DF_REF_BB (adef)((((adef)->base.cl) == DF_REF_ARTIFICIAL) ? (adef)->artificial_ref
.bb : BLOCK_FOR_INSN (((adef)->base.insn_info->insn)))
;
365 use_bb = BLOCK_FOR_INSN (insn);
366
367 if (use_bb == def_bb)
368 dom_p = (DF_INSN_LUID (def_insn)((((df->insns[(INSN_UID (def_insn))]))->luid)) < DF_INSN_LUID (insn)((((df->insns[(INSN_UID (insn))]))->luid)));
369 else
370 dom_p = dominated_by_p (CDI_DOMINATORS, use_bb, def_bb);
371
372 if (dom_p)
373 {
374 *def = adef;
375 return GRD_SINGLE_DOM;
376 }
377
378 /* The definition does not dominate the use. This is still OK if
379 this may be a use of a biv, i.e. if the def_bb dominates loop
380 latch. */
381 if (just_once_each_iteration_p (current_loop, def_bb))
382 return GRD_MAYBE_BIV;
383
384 return GRD_INVALID;
385}
386
387/* Sets IV to invariant CST in MODE. Always returns true (just for
388 consistency with other iv manipulation functions that may fail). */
389
390static bool
391iv_constant (class rtx_iv *iv, scalar_int_mode mode, rtx cst)
392{
393 iv->mode = mode;
394 iv->base = cst;
395 iv->step = const0_rtx(const_int_rtx[64]);
396 iv->first_special = false;
397 iv->extend = IV_UNKNOWN_EXTEND;
398 iv->extend_mode = iv->mode;
399 iv->delta = const0_rtx(const_int_rtx[64]);
400 iv->mult = const1_rtx(const_int_rtx[64 +1]);
401
402 return true;
403}
404
405/* Evaluates application of subreg to MODE on IV. */
406
407static bool
408iv_subreg (class rtx_iv *iv, scalar_int_mode mode)
409{
410 /* If iv is invariant, just calculate the new value. */
411 if (iv->step == const0_rtx(const_int_rtx[64])
412 && !iv->first_special)
413 {
414 rtx val = get_iv_value (iv, const0_rtx(const_int_rtx[64]));
415 val = lowpart_subreg (mode, val,
416 iv->extend == IV_UNKNOWN_EXTEND
417 ? iv->mode : iv->extend_mode);
418
419 iv->base = val;
420 iv->extend = IV_UNKNOWN_EXTEND;
421 iv->mode = iv->extend_mode = mode;
422 iv->delta = const0_rtx(const_int_rtx[64]);
423 iv->mult = const1_rtx(const_int_rtx[64 +1]);
424 return true;
425 }
426
427 if (iv->extend_mode == mode)
428 return true;
429
430 if (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (iv->mode))
431 return false;
432
433 iv->extend = IV_UNKNOWN_EXTEND;
434 iv->mode = mode;
435
436 iv->base = simplify_gen_binary (PLUS, iv->extend_mode, iv->delta,
437 simplify_gen_binary (MULT, iv->extend_mode,
438 iv->base, iv->mult));
439 iv->step = simplify_gen_binary (MULT, iv->extend_mode, iv->step, iv->mult);
440 iv->mult = const1_rtx(const_int_rtx[64 +1]);
441 iv->delta = const0_rtx(const_int_rtx[64]);
442 iv->first_special = false;
443
444 return true;
445}
446
447/* Evaluates application of EXTEND to MODE on IV. */
448
449static bool
450iv_extend (class rtx_iv *iv, enum iv_extend_code extend, scalar_int_mode mode)
451{
452 /* If iv is invariant, just calculate the new value. */
453 if (iv->step == const0_rtx(const_int_rtx[64])
454 && !iv->first_special)
455 {
456 rtx val = get_iv_value (iv, const0_rtx(const_int_rtx[64]));
457 if (iv->extend_mode != iv->mode
458 && iv->extend != IV_UNKNOWN_EXTEND
459 && iv->extend != extend)
460 val = lowpart_subreg (iv->mode, val, iv->extend_mode);
461 val = simplify_gen_unary (iv_extend_to_rtx_code (extend), mode,
462 val,
463 iv->extend == extend
464 ? iv->extend_mode : iv->mode);
465 iv->base = val;
466 iv->extend = IV_UNKNOWN_EXTEND;
467 iv->mode = iv->extend_mode = mode;
468 iv->delta = const0_rtx(const_int_rtx[64]);
469 iv->mult = const1_rtx(const_int_rtx[64 +1]);
470 return true;
471 }
472
473 if (mode != iv->extend_mode)
474 return false;
475
476 if (iv->extend != IV_UNKNOWN_EXTEND
477 && iv->extend != extend)
478 return false;
479
480 iv->extend = extend;
481
482 return true;
483}
484
485/* Evaluates negation of IV. */
486
487static bool
488iv_neg (class rtx_iv *iv)
489{
490 if (iv->extend == IV_UNKNOWN_EXTEND)
491 {
492 iv->base = simplify_gen_unary (NEG, iv->extend_mode,
493 iv->base, iv->extend_mode);
494 iv->step = simplify_gen_unary (NEG, iv->extend_mode,
495 iv->step, iv->extend_mode);
496 }
497 else
498 {
499 iv->delta = simplify_gen_unary (NEG, iv->extend_mode,
500 iv->delta, iv->extend_mode);
501 iv->mult = simplify_gen_unary (NEG, iv->extend_mode,
502 iv->mult, iv->extend_mode);
503 }
504
505 return true;
506}
507
508/* Evaluates addition or subtraction (according to OP) of IV1 to IV0. */
509
510static bool
511iv_add (class rtx_iv *iv0, class rtx_iv *iv1, enum rtx_code op)
512{
513 scalar_int_mode mode;
514 rtx arg;
515
516 /* Extend the constant to extend_mode of the other operand if necessary. */
517 if (iv0->extend == IV_UNKNOWN_EXTEND
518 && iv0->mode == iv0->extend_mode
519 && iv0->step == const0_rtx(const_int_rtx[64])
520 && GET_MODE_SIZE (iv0->extend_mode) < GET_MODE_SIZE (iv1->extend_mode))
521 {
522 iv0->extend_mode = iv1->extend_mode;
523 iv0->base = simplify_gen_unary (ZERO_EXTEND, iv0->extend_mode,
524 iv0->base, iv0->mode);
525 }
526 if (iv1->extend == IV_UNKNOWN_EXTEND
527 && iv1->mode == iv1->extend_mode
528 && iv1->step == const0_rtx(const_int_rtx[64])
529 && GET_MODE_SIZE (iv1->extend_mode) < GET_MODE_SIZE (iv0->extend_mode))
530 {
531 iv1->extend_mode = iv0->extend_mode;
532 iv1->base = simplify_gen_unary (ZERO_EXTEND, iv1->extend_mode,
533 iv1->base, iv1->mode);
534 }
535
536 mode = iv0->extend_mode;
537 if (mode != iv1->extend_mode)
538 return false;
539
540 if (iv0->extend == IV_UNKNOWN_EXTEND
541 && iv1->extend == IV_UNKNOWN_EXTEND)
542 {
543 if (iv0->mode != iv1->mode)
544 return false;
545
546 iv0->base = simplify_gen_binary (op, mode, iv0->base, iv1->base);
547 iv0->step = simplify_gen_binary (op, mode, iv0->step, iv1->step);
548
549 return true;
550 }
551
552 /* Handle addition of constant. */
553 if (iv1->extend == IV_UNKNOWN_EXTEND
554 && iv1->mode == mode
555 && iv1->step == const0_rtx(const_int_rtx[64]))
556 {
557 iv0->delta = simplify_gen_binary (op, mode, iv0->delta, iv1->base);
558 return true;
559 }
560
561 if (iv0->extend == IV_UNKNOWN_EXTEND
562 && iv0->mode == mode
563 && iv0->step == const0_rtx(const_int_rtx[64]))
564 {
565 arg = iv0->base;
566 *iv0 = *iv1;
567 if (op == MINUS
568 && !iv_neg (iv0))
569 return false;
570
571 iv0->delta = simplify_gen_binary (PLUS, mode, iv0->delta, arg);
572 return true;
573 }
574
575 return false;
576}
577
578/* Evaluates multiplication of IV by constant CST. */
579
580static bool
581iv_mult (class rtx_iv *iv, rtx mby)
582{
583 scalar_int_mode mode = iv->extend_mode;
584
585 if (GET_MODE (mby)((machine_mode) (mby)->mode) != VOIDmode((void) 0, E_VOIDmode)
586 && GET_MODE (mby)((machine_mode) (mby)->mode) != mode)
587 return false;
588
589 if (iv->extend == IV_UNKNOWN_EXTEND)
590 {
591 iv->base = simplify_gen_binary (MULT, mode, iv->base, mby);
592 iv->step = simplify_gen_binary (MULT, mode, iv->step, mby);
593 }
594 else
595 {
596 iv->delta = simplify_gen_binary (MULT, mode, iv->delta, mby);
597 iv->mult = simplify_gen_binary (MULT, mode, iv->mult, mby);
598 }
599
600 return true;
601}
602
603/* Evaluates shift of IV by constant CST. */
604
605static bool
606iv_shift (class rtx_iv *iv, rtx mby)
607{
608 scalar_int_mode mode = iv->extend_mode;
609
610 if (GET_MODE (mby)((machine_mode) (mby)->mode) != VOIDmode((void) 0, E_VOIDmode)
30
Assuming the condition is true
611 && GET_MODE (mby)((machine_mode) (mby)->mode) != mode)
31
Calling 'scalar_int_mode::operator machine_mode'
612 return false;
613
614 if (iv->extend == IV_UNKNOWN_EXTEND)
615 {
616 iv->base = simplify_gen_binary (ASHIFT, mode, iv->base, mby);
617 iv->step = simplify_gen_binary (ASHIFT, mode, iv->step, mby);
618 }
619 else
620 {
621 iv->delta = simplify_gen_binary (ASHIFT, mode, iv->delta, mby);
622 iv->mult = simplify_gen_binary (ASHIFT, mode, iv->mult, mby);
623 }
624
625 return true;
626}
627
628/* The recursive part of get_biv_step. Gets the value of the single value
629 defined by DEF wrto initial value of REG inside loop, in shape described
630 at get_biv_step. */
631
632static bool
633get_biv_step_1 (df_ref def, scalar_int_mode outer_mode, rtx reg,
634 rtx *inner_step, scalar_int_mode *inner_mode,
635 enum iv_extend_code *extend,
636 rtx *outer_step)
637{
638 rtx set, rhs, op0 = NULL_RTX(rtx) 0, op1 = NULL_RTX(rtx) 0;
639 rtx next, nextr;
640 enum rtx_code code;
641 rtx_insn *insn = DF_REF_INSN (def)((def)->base.insn_info->insn);
642 df_ref next_def;
643 enum iv_grd_result res;
644
645 set = single_set (insn);
646 if (!set)
647 return false;
648
649 rhs = find_reg_equal_equiv_note (insn);
650 if (rhs)
651 rhs = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx);
652 else
653 rhs = SET_SRC (set)(((set)->u.fld[1]).rt_rtx);
654
655 code = GET_CODE (rhs)((enum rtx_code) (rhs)->code);
656 switch (code)
657 {
658 case SUBREG:
659 case REG:
660 next = rhs;
661 break;
662
663 case PLUS:
664 case MINUS:
665 op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx);
666 op1 = XEXP (rhs, 1)(((rhs)->u.fld[1]).rt_rtx);
667
668 if (code == PLUS && CONSTANT_P (op0)((rtx_class[(int) (((enum rtx_code) (op0)->code))]) == RTX_CONST_OBJ
)
)
669 std::swap (op0, op1);
670
671 if (!simple_reg_p (op0)
672 || !CONSTANT_P (op1)((rtx_class[(int) (((enum rtx_code) (op1)->code))]) == RTX_CONST_OBJ
)
)
673 return false;
674
675 if (GET_MODE (rhs)((machine_mode) (rhs)->mode) != outer_mode)
676 {
677 /* ppc64 uses expressions like
678
679 (set x:SI (plus:SI (subreg:SI y:DI) 1)).
680
681 this is equivalent to
682
683 (set x':DI (plus:DI y:DI 1))
684 (set x:SI (subreg:SI (x':DI)). */
685 if (GET_CODE (op0)((enum rtx_code) (op0)->code) != SUBREG)
686 return false;
687 if (GET_MODE (SUBREG_REG (op0))((machine_mode) ((((op0)->u.fld[0]).rt_rtx))->mode) != outer_mode)
688 return false;
689 }
690
691 next = op0;
692 break;
693
694 case SIGN_EXTEND:
695 case ZERO_EXTEND:
696 if (GET_MODE (rhs)((machine_mode) (rhs)->mode) != outer_mode)
697 return false;
698
699 op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx);
700 if (!simple_reg_p (op0))
701 return false;
702
703 next = op0;
704 break;
705
706 default:
707 return false;
708 }
709
710 if (GET_CODE (next)((enum rtx_code) (next)->code) == SUBREG)
711 {
712 if (!subreg_lowpart_p (next))
713 return false;
714
715 nextr = SUBREG_REG (next)(((next)->u.fld[0]).rt_rtx);
716 if (GET_MODE (nextr)((machine_mode) (nextr)->mode) != outer_mode)
717 return false;
718 }
719 else
720 nextr = next;
721
722 res = iv_get_reaching_def (insn, nextr, &next_def);
723
724 if (res == GRD_INVALID || res == GRD_INVARIANT)
725 return false;
726
727 if (res == GRD_MAYBE_BIV)
728 {
729 if (!rtx_equal_p (nextr, reg))
730 return false;
731
732 *inner_step = const0_rtx(const_int_rtx[64]);
733 *extend = IV_UNKNOWN_EXTEND;
734 *inner_mode = outer_mode;
735 *outer_step = const0_rtx(const_int_rtx[64]);
736 }
737 else if (!get_biv_step_1 (next_def, outer_mode, reg,
738 inner_step, inner_mode, extend,
739 outer_step))
740 return false;
741
742 if (GET_CODE (next)((enum rtx_code) (next)->code) == SUBREG)
743 {
744 scalar_int_mode amode;
745 if (!is_a <scalar_int_mode> (GET_MODE (next)((machine_mode) (next)->mode), &amode)
746 || GET_MODE_SIZE (amode) > GET_MODE_SIZE (*inner_mode))
747 return false;
748
749 *inner_mode = amode;
750 *inner_step = simplify_gen_binary (PLUS, outer_mode,
751 *inner_step, *outer_step);
752 *outer_step = const0_rtx(const_int_rtx[64]);
753 *extend = IV_UNKNOWN_EXTEND;
754 }
755
756 switch (code)
757 {
758 case REG:
759 case SUBREG:
760 break;
761
762 case PLUS:
763 case MINUS:
764 if (*inner_mode == outer_mode
765 /* See comment in previous switch. */
766 || GET_MODE (rhs)((machine_mode) (rhs)->mode) != outer_mode)
767 *inner_step = simplify_gen_binary (code, outer_mode,
768 *inner_step, op1);
769 else
770 *outer_step = simplify_gen_binary (code, outer_mode,
771 *outer_step, op1);
772 break;
773
774 case SIGN_EXTEND:
775 case ZERO_EXTEND:
776 gcc_assert (GET_MODE (op0) == *inner_mode((void)(!(((machine_mode) (op0)->mode) == *inner_mode &&
*extend == IV_UNKNOWN_EXTEND && *outer_step == (const_int_rtx
[64])) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 778, __FUNCTION__), 0 : 0))
777 && *extend == IV_UNKNOWN_EXTEND((void)(!(((machine_mode) (op0)->mode) == *inner_mode &&
*extend == IV_UNKNOWN_EXTEND && *outer_step == (const_int_rtx
[64])) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 778, __FUNCTION__), 0 : 0))
778 && *outer_step == const0_rtx)((void)(!(((machine_mode) (op0)->mode) == *inner_mode &&
*extend == IV_UNKNOWN_EXTEND && *outer_step == (const_int_rtx
[64])) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 778, __FUNCTION__), 0 : 0))
;
779
780 *extend = (code == SIGN_EXTEND) ? IV_SIGN_EXTEND : IV_ZERO_EXTEND;
781 break;
782
783 default:
784 return false;
785 }
786
787 return true;
788}
789
790/* Gets the operation on register REG inside loop, in shape
791
792 OUTER_STEP + EXTEND_{OUTER_MODE} (SUBREG_{INNER_MODE} (REG + INNER_STEP))
793
794 If the operation cannot be described in this shape, return false.
795 LAST_DEF is the definition of REG that dominates loop latch. */
796
797static bool
798get_biv_step (df_ref last_def, scalar_int_mode outer_mode, rtx reg,
799 rtx *inner_step, scalar_int_mode *inner_mode,
800 enum iv_extend_code *extend, rtx *outer_step)
801{
802 if (!get_biv_step_1 (last_def, outer_mode, reg,
803 inner_step, inner_mode, extend,
804 outer_step))
805 return false;
806
807 gcc_assert ((*inner_mode == outer_mode) != (*extend != IV_UNKNOWN_EXTEND))((void)(!((*inner_mode == outer_mode) != (*extend != IV_UNKNOWN_EXTEND
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 807, __FUNCTION__), 0 : 0))
;
808 gcc_assert (*inner_mode != outer_mode || *outer_step == const0_rtx)((void)(!(*inner_mode != outer_mode || *outer_step == (const_int_rtx
[64])) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 808, __FUNCTION__), 0 : 0))
;
809
810 return true;
811}
812
813/* Records information that DEF is induction variable IV. */
814
815static void
816record_iv (df_ref def, class rtx_iv *iv)
817{
818 class rtx_iv *recorded_iv = XNEW (class rtx_iv)((class rtx_iv *) xmalloc (sizeof (class rtx_iv)));
819
820 *recorded_iv = *iv;
821 check_iv_ref_table_size ();
822 DF_REF_IV_SET (def, recorded_iv)iv_ref_table[((def)->base.id)] = (recorded_iv);
823}
824
825/* If DEF was already analyzed for bivness, store the description of the biv to
826 IV and return true. Otherwise return false. */
827
828static bool
829analyzed_for_bivness_p (rtx def, class rtx_iv *iv)
830{
831 class biv_entry *biv = bivs->find_with_hash (def, REGNO (def)(rhs_regno(def)));
832
833 if (!biv)
834 return false;
835
836 *iv = biv->iv;
837 return true;
838}
839
840static void
841record_biv (rtx def, class rtx_iv *iv)
842{
843 class biv_entry *biv = XNEW (class biv_entry)((class biv_entry *) xmalloc (sizeof (class biv_entry)));
844 biv_entry **slot = bivs->find_slot_with_hash (def, REGNO (def)(rhs_regno(def)), INSERT);
845
846 biv->regno = REGNO (def)(rhs_regno(def));
847 biv->iv = *iv;
848 gcc_assert (!*slot)((void)(!(!*slot) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 848, __FUNCTION__), 0 : 0))
;
849 *slot = biv;
850}
851
852/* Determines whether DEF is a biv and if so, stores its description
853 to *IV. OUTER_MODE is the mode of DEF. */
854
855static bool
856iv_analyze_biv (scalar_int_mode outer_mode, rtx def, class rtx_iv *iv)
857{
858 rtx inner_step, outer_step;
859 scalar_int_mode inner_mode;
860 enum iv_extend_code extend;
861 df_ref last_def;
862
863 if (dump_file)
864 {
865 fprintf (dump_file, "Analyzing ");
866 print_rtl (dump_file, def);
867 fprintf (dump_file, " for bivness.\n");
868 }
869
870 if (!REG_P (def)(((enum rtx_code) (def)->code) == REG))
871 {
872 if (!CONSTANT_P (def)((rtx_class[(int) (((enum rtx_code) (def)->code))]) == RTX_CONST_OBJ
)
)
873 return false;
874
875 return iv_constant (iv, outer_mode, def);
876 }
877
878 if (!latch_dominating_def (def, &last_def))
879 {
880 if (dump_file)
881 fprintf (dump_file, " not simple.\n");
882 return false;
883 }
884
885 if (!last_def)
886 return iv_constant (iv, outer_mode, def);
887
888 if (analyzed_for_bivness_p (def, iv))
889 {
890 if (dump_file)
891 fprintf (dump_file, " already analysed.\n");
892 return iv->base != NULL_RTX(rtx) 0;
893 }
894
895 if (!get_biv_step (last_def, outer_mode, def, &inner_step, &inner_mode,
896 &extend, &outer_step))
897 {
898 iv->base = NULL_RTX(rtx) 0;
899 goto end;
900 }
901
902 /* Loop transforms base to es (base + inner_step) + outer_step,
903 where es means extend of subreg between inner_mode and outer_mode.
904 The corresponding induction variable is
905
906 es ((base - outer_step) + i * (inner_step + outer_step)) + outer_step */
907
908 iv->base = simplify_gen_binary (MINUS, outer_mode, def, outer_step);
909 iv->step = simplify_gen_binary (PLUS, outer_mode, inner_step, outer_step);
910 iv->mode = inner_mode;
911 iv->extend_mode = outer_mode;
912 iv->extend = extend;
913 iv->mult = const1_rtx(const_int_rtx[64 +1]);
914 iv->delta = outer_step;
915 iv->first_special = inner_mode != outer_mode;
916
917 end:
918 if (dump_file)
919 {
920 fprintf (dump_file, " ");
921 dump_iv_info (dump_file, iv);
922 fprintf (dump_file, "\n");
923 }
924
925 record_biv (def, iv);
926 return iv->base != NULL_RTX(rtx) 0;
927}
928
929/* Analyzes expression RHS used at INSN and stores the result to *IV.
930 The mode of the induction variable is MODE. */
931
932bool
933iv_analyze_expr (rtx_insn *insn, scalar_int_mode mode, rtx rhs,
934 class rtx_iv *iv)
935{
936 rtx mby = NULL_RTX(rtx) 0;
937 rtx op0 = NULL_RTX(rtx) 0, op1 = NULL_RTX(rtx) 0;
938 class rtx_iv iv0, iv1;
939 enum rtx_code code = GET_CODE (rhs)((enum rtx_code) (rhs)->code);
940 scalar_int_mode omode = mode;
941
942 iv->base = NULL_RTX(rtx) 0;
943 iv->step = NULL_RTX(rtx) 0;
944
945 gcc_assert (GET_MODE (rhs) == mode || GET_MODE (rhs) == VOIDmode)((void)(!(((machine_mode) (rhs)->mode) == mode || ((machine_mode
) (rhs)->mode) == ((void) 0, E_VOIDmode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 945, __FUNCTION__), 0 : 0))
;
18
Assuming the condition is true
19
'?' condition is false
946
947 if (CONSTANT_P (rhs)((rtx_class[(int) (((enum rtx_code) (rhs)->code))]) == RTX_CONST_OBJ
)
20
Assuming the condition is false
23
Taking false branch
948 || REG_P (rhs)(((enum rtx_code) (rhs)->code) == REG)
21
Assuming field 'code' is not equal to REG
949 || code == SUBREG)
22
Assuming 'code' is not equal to SUBREG
950 return iv_analyze_op (insn, mode, rhs, iv);
951
952 switch (code)
24
Control jumps to 'case ASHIFT:' at line 982
953 {
954 case REG:
955 op0 = rhs;
956 break;
957
958 case SIGN_EXTEND:
959 case ZERO_EXTEND:
960 case NEG:
961 op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx);
962 /* We don't know how many bits there are in a sign-extended constant. */
963 if (!is_a <scalar_int_mode> (GET_MODE (op0)((machine_mode) (op0)->mode), &omode))
964 return false;
965 break;
966
967 case PLUS:
968 case MINUS:
969 op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx);
970 op1 = XEXP (rhs, 1)(((rhs)->u.fld[1]).rt_rtx);
971 break;
972
973 case MULT:
974 op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx);
975 mby = XEXP (rhs, 1)(((rhs)->u.fld[1]).rt_rtx);
976 if (!CONSTANT_P (mby)((rtx_class[(int) (((enum rtx_code) (mby)->code))]) == RTX_CONST_OBJ
)
)
977 std::swap (op0, mby);
978 if (!CONSTANT_P (mby)((rtx_class[(int) (((enum rtx_code) (mby)->code))]) == RTX_CONST_OBJ
)
)
979 return false;
980 break;
981
982 case ASHIFT:
983 op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx);
984 mby = XEXP (rhs, 1)(((rhs)->u.fld[1]).rt_rtx);
985 if (!CONSTANT_P (mby)((rtx_class[(int) (((enum rtx_code) (mby)->code))]) == RTX_CONST_OBJ
)
)
25
Assuming the condition is false
26
Taking false branch
986 return false;
987 break;
988
989 default:
990 return false;
991 }
992
993 if (op0
27
Assuming 'op0' is null
994 && !iv_analyze_expr (insn, omode, op0, &iv0))
995 return false;
996
997 if (op1
27.1
'op1' is null
27.1
'op1' is null
998 && !iv_analyze_expr (insn, omode, op1, &iv1)) 999 return false; 1000 1001 switch (code)
28
Control jumps to 'case ASHIFT:' at line 1029
1002 { 1003 case SIGN_EXTEND: 1004 if (!iv_extend (&iv0, IV_SIGN_EXTEND, mode)) 1005 return false; 1006 break; 1007 1008 case ZERO_EXTEND: 1009 if (!iv_extend (&iv0, IV_ZERO_EXTEND, mode)) 1010 return false; 1011 break; 1012 1013 case NEG: 1014 if (!iv_neg (&iv0)) 1015 return false; 1016 break; 1017 1018 case PLUS: 1019 case MINUS: 1020 if (!iv_add (&iv0, &iv1, code)) 1021 return false; 1022 break; 1023 1024 case MULT: 1025 if (!iv_mult (&iv0, mby)) 1026 return false; 1027 break; 1028 1029 case ASHIFT: 1030 if (!iv_shift (&iv0, mby))
29
Calling 'iv_shift'
1031 return false; 1032 break; 1033 1034 default: 1035 break; 1036 } 1037 1038 *iv = iv0; 1039 return iv->base != NULL_RTX(rtx) 0; 1040} 1041 1042/* Analyzes iv DEF and stores the result to *IV. */ 1043 1044static bool 1045iv_analyze_def (df_ref def, class rtx_iv *iv) 1046{ 1047 rtx_insn *insn = DF_REF_INSN (def)((def)->base.insn_info->insn); 1048 rtx reg = DF_REF_REG (def)((def)->base.reg); 1049 rtx set, rhs; 1050 1051 if (dump_file)
4
Assuming 'dump_file' is null
5
Taking false branch
1052 { 1053 fprintf (dump_file, "Analyzing def of "); 1054 print_rtl (dump_file, reg); 1055 fprintf (dump_file, " in insn "); 1056 print_rtl_single (dump_file, insn); 1057 } 1058 1059 check_iv_ref_table_size (); 1060 if (DF_REF_IV (def)iv_ref_table[((def)->base.id)])
6
Assuming the condition is false
7
Taking false branch
1061 { 1062 if (dump_file) 1063 fprintf (dump_file, " already analysed.\n"); 1064 *iv = *DF_REF_IV (def)iv_ref_table[((def)->base.id)]; 1065 return iv->base != NULL_RTX(rtx) 0; 1066 } 1067 1068 iv->base = NULL_RTX(rtx) 0; 1069 iv->step = NULL_RTX(rtx) 0; 1070 1071 scalar_int_mode mode; 1072 if (!REG_P (reg)(((enum rtx_code) (reg)->code) == REG) || !is_a <scalar_int_mode> (GET_MODE (reg)((machine_mode) (reg)->mode), &mode))
8
Assuming field 'code' is equal to REG
9
Taking false branch
1073 return false; 1074 1075 set = single_set (insn); 1076 if (!set
9.1
'set' is non-null
9.1
'set' is non-null
)
10
Taking false branch
1077 return false; 1078 1079 if (!REG_P (SET_DEST (set))(((enum rtx_code) ((((set)->u.fld[0]).rt_rtx))->code) ==
REG)
)
11
Assuming field 'code' is equal to REG
12
Taking false branch
1080 return false; 1081 1082 gcc_assert (SET_DEST (set) == reg)((void)(!((((set)->u.fld[0]).rt_rtx) == reg) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 1082, __FUNCTION__), 0 : 0))
;
13
Assuming 'reg' is equal to field 'rt_rtx'
14
'?' condition is false
1083 rhs = find_reg_equal_equiv_note (insn); 1084 if (rhs)
15
Assuming 'rhs' is non-null
16
Taking true branch
1085 rhs = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx); 1086 else 1087 rhs = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); 1088 1089 iv_analyze_expr (insn, mode, rhs, iv);
17
Calling 'iv_analyze_expr'
1090 record_iv (def, iv); 1091 1092 if (dump_file) 1093 { 1094 print_rtl (dump_file, reg); 1095 fprintf (dump_file, " in insn "); 1096 print_rtl_single (dump_file, insn); 1097 fprintf (dump_file, " is "); 1098 dump_iv_info (dump_file, iv); 1099 fprintf (dump_file, "\n"); 1100 } 1101 1102 return iv->base != NULL_RTX(rtx) 0; 1103} 1104 1105/* Analyzes operand OP of INSN and stores the result to *IV. MODE is the 1106 mode of OP. */ 1107 1108static bool 1109iv_analyze_op (rtx_insn *insn, scalar_int_mode mode, rtx op, class rtx_iv *iv) 1110{ 1111 df_ref def = NULL__null; 1112 enum iv_grd_result res; 1113 1114 if (dump_file) 1115 { 1116 fprintf (dump_file, "Analyzing operand "); 1117 print_rtl (dump_file, op); 1118 fprintf (dump_file, " of insn "); 1119 print_rtl_single (dump_file, insn); 1120 } 1121 1122 if (function_invariant_p (op)) 1123 res = GRD_INVARIANT; 1124 else if (GET_CODE (op)((enum rtx_code) (op)->code) == SUBREG) 1125 { 1126 scalar_int_mode inner_mode; 1127 if (!subreg_lowpart_p (op) 1128 || !is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op))((machine_mode) ((((op)->u.fld[0]).rt_rtx))->mode), &inner_mode)) 1129 return false; 1130 1131 if (!iv_analyze_op (insn, inner_mode, SUBREG_REG (op)(((op)->u.fld[0]).rt_rtx), iv)) 1132 return false; 1133 1134 return iv_subreg (iv, mode); 1135 } 1136 else 1137 { 1138 res = iv_get_reaching_def (insn, op, &def); 1139 if (res == GRD_INVALID) 1140 { 1141 if (dump_file) 1142 fprintf (dump_file, " not simple.\n"); 1143 return false; 1144 } 1145 } 1146 1147 if (res == GRD_INVARIANT) 1148 { 1149 iv_constant (iv, mode, op); 1150 1151 if (dump_file) 1152 { 1153 fprintf (dump_file, " "); 1154 dump_iv_info (dump_file, iv); 1155 fprintf (dump_file, "\n"); 1156 } 1157 return true; 1158 } 1159 1160 if (res == GRD_MAYBE_BIV) 1161 return iv_analyze_biv (mode, op, iv); 1162 1163 return iv_analyze_def (def, iv); 1164} 1165 1166/* Analyzes value VAL at INSN and stores the result to *IV. MODE is the 1167 mode of VAL. */ 1168 1169bool 1170iv_analyze (rtx_insn *insn, scalar_int_mode mode, rtx val, class rtx_iv *iv) 1171{ 1172 rtx reg; 1173 1174 /* We must find the insn in that val is used, so that we get to UD chains. 1175 Since the function is sometimes called on result of get_condition, 1176 this does not necessarily have to be directly INSN; scan also the 1177 following insns. */ 1178 if (simple_reg_p (val)) 1179 { 1180 if (GET_CODE (val)((enum rtx_code) (val)->code) == SUBREG) 1181 reg = SUBREG_REG (val)(((val)->u.fld[0]).rt_rtx); 1182 else 1183 reg = val; 1184 1185 while (!df_find_use (insn, reg)) 1186 insn = NEXT_INSN (insn); 1187 } 1188 1189 return iv_analyze_op (insn, mode, val, iv); 1190} 1191 1192/* Analyzes definition of DEF in INSN and stores the result to IV. */ 1193 1194bool 1195iv_analyze_result (rtx_insn *insn, rtx def, class rtx_iv *iv) 1196{ 1197 df_ref adef; 1198 1199 adef = df_find_def (insn, def); 1200 if (!adef)
1
Assuming 'adef' is non-null
2
Taking false branch
1201 return false; 1202 1203 return iv_analyze_def (adef, iv);
3
Calling 'iv_analyze_def'
1204} 1205 1206/* Checks whether definition of register REG in INSN is a basic induction 1207 variable. MODE is the mode of REG. 1208 1209 IV analysis must have been initialized (via a call to 1210 iv_analysis_loop_init) for this function to produce a result. */ 1211 1212bool 1213biv_p (rtx_insn *insn, scalar_int_mode mode, rtx reg) 1214{ 1215 class rtx_iv iv; 1216 df_ref def, last_def; 1217 1218 if (!simple_reg_p (reg)) 1219 return false; 1220 1221 def = df_find_def (insn, reg); 1222 gcc_assert (def != NULL)((void)(!(def != __null) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 1222, __FUNCTION__), 0 : 0))
; 1223 if (!latch_dominating_def (reg, &last_def)) 1224 return false; 1225 if (last_def != def) 1226 return false; 1227 1228 if (!iv_analyze_biv (mode, reg, &iv)) 1229 return false; 1230 1231 return iv.step != const0_rtx(const_int_rtx[64]); 1232} 1233 1234/* Calculates value of IV at ITERATION-th iteration. */ 1235 1236rtx 1237get_iv_value (class rtx_iv *iv, rtx iteration) 1238{ 1239 rtx val; 1240 1241 /* We would need to generate some if_then_else patterns, and so far 1242 it is not needed anywhere. */ 1243 gcc_assert (!iv->first_special)((void)(!(!iv->first_special) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 1243, __FUNCTION__), 0 : 0))
; 1244 1245 if (iv->step != const0_rtx(const_int_rtx[64]) && iteration != const0_rtx(const_int_rtx[64])) 1246 val = simplify_gen_binary (PLUS, iv->extend_mode, iv->base, 1247 simplify_gen_binary (MULT, iv->extend_mode, 1248 iv->step, iteration)); 1249 else 1250 val = iv->base; 1251 1252 if (iv->extend_mode == iv->mode) 1253 return val; 1254 1255 val = lowpart_subreg (iv->mode, val, iv->extend_mode); 1256 1257 if (iv->extend == IV_UNKNOWN_EXTEND) 1258 return val; 1259 1260 val = simplify_gen_unary (iv_extend_to_rtx_code (iv->extend), 1261 iv->extend_mode, val, iv->mode); 1262 val = simplify_gen_binary (PLUS, iv->extend_mode, iv->delta, 1263 simplify_gen_binary (MULT, iv->extend_mode, 1264 iv->mult, val)); 1265 1266 return val; 1267} 1268 1269/* Free the data for an induction variable analysis. */ 1270 1271void 1272iv_analysis_done (void) 1273{ 1274 if (!clean_slate) 1275 { 1276 clear_iv_info (); 1277 clean_slate = true; 1278 df_finish_pass (true); 1279 delete bivs; 1280 bivs = NULL__null; 1281 free (iv_ref_table); 1282 iv_ref_table = NULL__null; 1283 iv_ref_table_size = 0; 1284 } 1285} 1286 1287/* Computes inverse to X modulo (1 << MOD). */ 1288 1289static uint64_t 1290inverse (uint64_t x, int mod) 1291{ 1292 uint64_t mask = 1293 ((uint64_t) 1 << (mod - 1) << 1) - 1; 1294 uint64_t rslt = 1; 1295 int i; 1296 1297 for (i = 0; i < mod - 1; i++) 1298 { 1299 rslt = (rslt * x) & mask; 1300 x = (x * x) & mask; 1301 } 1302 1303 return rslt; 1304} 1305 1306/* Checks whether any register in X is in set ALT. */ 1307 1308static bool 1309altered_reg_used (const_rtx x, bitmap alt) 1310{ 1311 subrtx_iterator::array_type array; 1312 FOR_EACH_SUBRTX (iter, array, x, NONCONST)for (subrtx_iterator iter (array, x, rtx_nonconst_subrtx_bounds
); !iter.at_end (); iter.next ())
1313 { 1314 const_rtx x = *iter; 1315 if (REG_P (x)(((enum rtx_code) (x)->code) == REG) && REGNO_REG_SET_P (alt, REGNO (x))bitmap_bit_p (alt, (rhs_regno(x)))) 1316 return true; 1317 } 1318 return false; 1319} 1320 1321/* Marks registers altered by EXPR in set ALT. */ 1322 1323static void 1324mark_altered (rtx expr, const_rtx by ATTRIBUTE_UNUSED__attribute__ ((__unused__)), void *alt) 1325{ 1326 if (GET_CODE (expr)((enum rtx_code) (expr)->code) == SUBREG) 1327 expr = SUBREG_REG (expr)(((expr)->u.fld[0]).rt_rtx); 1328 if (!REG_P (expr)(((enum rtx_code) (expr)->code) == REG)) 1329 return; 1330 1331 SET_REGNO_REG_SET ((bitmap) alt, REGNO (expr))bitmap_set_bit ((bitmap) alt, (rhs_regno(expr))); 1332} 1333 1334/* Checks whether RHS is simple enough to process. */ 1335 1336static bool 1337simple_rhs_p (rtx rhs) 1338{ 1339 rtx op0, op1; 1340 1341 if (function_invariant_p (rhs) 1342 || (REG_P (rhs)(((enum rtx_code) (rhs)->code) == REG) && !HARD_REGISTER_P (rhs)((((rhs_regno(rhs))) < 76)))) 1343 return true; 1344 1345 switch (GET_CODE (rhs)((enum rtx_code) (rhs)->code)) 1346 { 1347 case PLUS: 1348 case MINUS: 1349 case AND: 1350 op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx); 1351 op1 = XEXP (rhs, 1)(((rhs)->u.fld[1]).rt_rtx); 1352 /* Allow reg OP const and reg OP reg. */ 1353 if (!(REG_P (op0)(((enum rtx_code) (op0)->code) == REG) && !HARD_REGISTER_P (op0)((((rhs_regno(op0))) < 76))) 1354 && !function_invariant_p (op0)) 1355 return false; 1356 if (!(REG_P (op1)(((enum rtx_code) (op1)->code) == REG) && !HARD_REGISTER_P (op1)((((rhs_regno(op1))) < 76))) 1357 && !function_invariant_p (op1)) 1358 return false; 1359 1360 return true; 1361 1362 case ASHIFT: 1363 case ASHIFTRT: 1364 case LSHIFTRT: 1365 case MULT: 1366 op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx); 1367 op1 = XEXP (rhs, 1)(((rhs)->u.fld[1]).rt_rtx); 1368 /* Allow reg OP const. */ 1369 if (!(REG_P (op0)(((enum rtx_code) (op0)->code) == REG) && !HARD_REGISTER_P (op0)((((rhs_regno(op0))) < 76)))) 1370 return false; 1371 if (!function_invariant_p (op1)) 1372 return false; 1373 1374 return true; 1375 1376 default: 1377 return false; 1378 } 1379} 1380 1381/* If any registers in *EXPR that have a single definition, try to replace 1382 them with the known-equivalent values. */ 1383 1384static void 1385replace_single_def_regs (rtx *expr) 1386{ 1387 subrtx_var_iterator::array_type array; 1388 repeat: 1389 FOR_EACH_SUBRTX_VAR (iter, array, *expr, NONCONST)for (subrtx_var_iterator iter (array, *expr, rtx_nonconst_subrtx_bounds
); !iter.at_end (); iter.next ())
1390 { 1391 rtx x = *iter; 1392 if (REG_P (x)(((enum rtx_code) (x)->code) == REG)) 1393 if (rtx new_x = df_find_single_def_src (x)) 1394 { 1395 *expr = simplify_replace_rtx (*expr, x, new_x); 1396 goto repeat; 1397 } 1398 } 1399} 1400 1401/* A subroutine of simplify_using_initial_values, this function examines INSN 1402 to see if it contains a suitable set that we can use to make a replacement. 1403 If it is suitable, return true and set DEST and SRC to the lhs and rhs of 1404 the set; return false otherwise. */ 1405 1406static bool 1407suitable_set_for_replacement (rtx_insn *insn, rtx *dest, rtx *src) 1408{ 1409 rtx set = single_set (insn); 1410 rtx lhs = NULL_RTX(rtx) 0, rhs; 1411 1412 if (!set) 1413 return false; 1414 1415 lhs = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); 1416 if (!REG_P (lhs)(((enum rtx_code) (lhs)->code) == REG)) 1417 return false; 1418 1419 rhs = find_reg_equal_equiv_note (insn); 1420 if (rhs) 1421 rhs = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx); 1422 else 1423 rhs = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); 1424 1425 if (!simple_rhs_p (rhs)) 1426 return false; 1427 1428 *dest = lhs; 1429 *src = rhs; 1430 return true; 1431} 1432 1433/* Using the data returned by suitable_set_for_replacement, replace DEST 1434 with SRC in *EXPR and return the new expression. Also call 1435 replace_single_def_regs if the replacement changed something. */ 1436static void 1437replace_in_expr (rtx *expr, rtx dest, rtx src) 1438{ 1439 rtx old = *expr; 1440 *expr = simplify_replace_rtx (*expr, dest, src); 1441 if (old == *expr) 1442 return; 1443 replace_single_def_regs (expr); 1444} 1445 1446/* Checks whether A implies B. */ 1447 1448static bool 1449implies_p (rtx a, rtx b) 1450{ 1451 rtx op0, op1, opb0, opb1; 1452 machine_mode mode; 1453 1454 if (rtx_equal_p (a, b)) 1455 return true; 1456 1457 if (GET_CODE (a)((enum rtx_code) (a)->code) == EQ) 1458 { 1459 op0 = XEXP (a, 0)(((a)->u.fld[0]).rt_rtx); 1460 op1 = XEXP (a, 1)(((a)->u.fld[1]).rt_rtx); 1461 1462 if (REG_P (op0)(((enum rtx_code) (op0)->code) == REG) 1463 || (GET_CODE (op0)((enum rtx_code) (op0)->code) == SUBREG 1464 && REG_P (SUBREG_REG (op0))(((enum rtx_code) ((((op0)->u.fld[0]).rt_rtx))->code) ==
REG)
)) 1465 { 1466 rtx r = simplify_replace_rtx (b, op0, op1); 1467 if (r == const_true_rtx) 1468 return true; 1469 } 1470 1471 if (REG_P (op1)(((enum rtx_code) (op1)->code) == REG) 1472 || (GET_CODE (op1)((enum rtx_code) (op1)->code) == SUBREG 1473 && REG_P (SUBREG_REG (op1))(((enum rtx_code) ((((op1)->u.fld[0]).rt_rtx))->code) ==
REG)
)) 1474 { 1475 rtx r = simplify_replace_rtx (b, op1, op0); 1476 if (r == const_true_rtx) 1477 return true; 1478 } 1479 } 1480 1481 if (b == const_true_rtx) 1482 return true; 1483 1484 if ((GET_RTX_CLASS (GET_CODE (a))(rtx_class[(int) (((enum rtx_code) (a)->code))]) != RTX_COMM_COMPARE 1485 && GET_RTX_CLASS (GET_CODE (a))(rtx_class[(int) (((enum rtx_code) (a)->code))]) != RTX_COMPARE) 1486 || (GET_RTX_CLASS (GET_CODE (b))(rtx_class[(int) (((enum rtx_code) (b)->code))]) != RTX_COMM_COMPARE 1487 && GET_RTX_CLASS (GET_CODE (b))(rtx_class[(int) (((enum rtx_code) (b)->code))]) != RTX_COMPARE)) 1488 return false; 1489 1490 op0 = XEXP (a, 0)(((a)->u.fld[0]).rt_rtx); 1491 op1 = XEXP (a, 1)(((a)->u.fld[1]).rt_rtx); 1492 opb0 = XEXP (b, 0)(((b)->u.fld[0]).rt_rtx); 1493 opb1 = XEXP (b, 1)(((b)->u.fld[1]).rt_rtx); 1494 1495 mode = GET_MODE (op0)((machine_mode) (op0)->mode); 1496 if (mode != GET_MODE (opb0)((machine_mode) (opb0)->mode)) 1497 mode = VOIDmode((void) 0, E_VOIDmode); 1498 else if (mode == VOIDmode((void) 0, E_VOIDmode)) 1499 { 1500 mode = GET_MODE (op1)((machine_mode) (op1)->mode); 1501 if (mode != GET_MODE (opb1)((machine_mode) (opb1)->mode)) 1502 mode = VOIDmode((void) 0, E_VOIDmode); 1503 } 1504 1505 /* A < B implies A + 1 <= B. */ 1506 if ((GET_CODE (a)((enum rtx_code) (a)->code) == GT || GET_CODE (a)((enum rtx_code) (a)->code) == LT) 1507 && (GET_CODE (b)((enum rtx_code) (b)->code) == GE || GET_CODE (b)((enum rtx_code) (b)->code) == LE)) 1508 { 1509 1510 if (GET_CODE (a)((enum rtx_code) (a)->code) == GT) 1511 std::swap (op0, op1); 1512 1513 if (GET_CODE (b)((enum rtx_code) (b)->code) == GE) 1514 std::swap (opb0, opb1); 1515 1516 if (SCALAR_INT_MODE_P (mode)(((enum mode_class) mode_class[mode]) == MODE_INT || ((enum mode_class
) mode_class[mode]) == MODE_PARTIAL_INT)
1517 && rtx_equal_p (op1, opb1) 1518 && simplify_gen_binary (MINUS, mode, opb0, op0) == const1_rtx(const_int_rtx[64 +1])) 1519 return true; 1520 return false; 1521 } 1522 1523 /* A < B or A > B imply A != B. TODO: Likewise 1524 A + n < B implies A != B + n if neither wraps. */ 1525 if (GET_CODE (b)((enum rtx_code) (b)->code) == NE 1526 && (GET_CODE (a)((enum rtx_code) (a)->code) == GT || GET_CODE (a)((enum rtx_code) (a)->code) == GTU 1527 || GET_CODE (a)((enum rtx_code) (a)->code) == LT || GET_CODE (a)((enum rtx_code) (a)->code) == LTU)) 1528 { 1529 if (rtx_equal_p (op0, opb0) 1530 && rtx_equal_p (op1, opb1)) 1531 return true; 1532 } 1533 1534 /* For unsigned comparisons, A != 0 implies A > 0 and A >= 1. */ 1535 if (GET_CODE (a)((enum rtx_code) (a)->code) == NE 1536 && op1 == const0_rtx(const_int_rtx[64])) 1537 { 1538 if ((GET_CODE (b)((enum rtx_code) (b)->code) == GTU 1539 && opb1 == const0_rtx(const_int_rtx[64])) 1540 || (GET_CODE (b)((enum rtx_code) (b)->code) == GEU 1541 && opb1 == const1_rtx(const_int_rtx[64 +1]))) 1542 return rtx_equal_p (op0, opb0); 1543 } 1544 1545 /* A != N is equivalent to A - (N + 1) <u -1. */ 1546 if (GET_CODE (a)((enum rtx_code) (a)->code) == NE 1547 && CONST_INT_P (op1)(((enum rtx_code) (op1)->code) == CONST_INT) 1548 && GET_CODE (b)((enum rtx_code) (b)->code) == LTU 1549 && opb1 == constm1_rtx(const_int_rtx[64 -1]) 1550 && GET_CODE (opb0)((enum rtx_code) (opb0)->code) == PLUS 1551 && CONST_INT_P (XEXP (opb0, 1))(((enum rtx_code) ((((opb0)->u.fld[1]).rt_rtx))->code) ==
CONST_INT)
1552 /* Avoid overflows. */ 1553 && ((unsigned HOST_WIDE_INTlong) INTVAL (XEXP (opb0, 1))(((((opb0)->u.fld[1]).rt_rtx))->u.hwint[0]) 1554 != ((unsigned HOST_WIDE_INTlong)1 1555 << (HOST_BITS_PER_WIDE_INT64 - 1)) - 1) 1556 && INTVAL (XEXP (opb0, 1))(((((opb0)->u.fld[1]).rt_rtx))->u.hwint[0]) + 1 == -INTVAL (op1)((op1)->u.hwint[0])) 1557 return rtx_equal_p (op0, XEXP (opb0, 0)(((opb0)->u.fld[0]).rt_rtx)); 1558 1559 /* Likewise, A != N implies A - N > 0. */ 1560 if (GET_CODE (a)((enum rtx_code) (a)->code) == NE 1561 && CONST_INT_P (op1)(((enum rtx_code) (op1)->code) == CONST_INT)) 1562 { 1563 if (GET_CODE (b)((enum rtx_code) (b)->code) == GTU 1564 && GET_CODE (opb0)((enum rtx_code) (opb0)->code) == PLUS 1565 && opb1 == const0_rtx(const_int_rtx[64]) 1566 && CONST_INT_P (XEXP (opb0, 1))(((enum rtx_code) ((((opb0)->u.fld[1]).rt_rtx))->code) ==
CONST_INT)
1567 /* Avoid overflows. */ 1568 && ((unsigned HOST_WIDE_INTlong) INTVAL (XEXP (opb0, 1))(((((opb0)->u.fld[1]).rt_rtx))->u.hwint[0]) 1569 != (HOST_WIDE_INT_1U1UL << (HOST_BITS_PER_WIDE_INT64 - 1))) 1570 && rtx_equal_p (XEXP (opb0, 0)(((opb0)->u.fld[0]).rt_rtx), op0)) 1571 return INTVAL (op1)((op1)->u.hwint[0]) == -INTVAL (XEXP (opb0, 1))(((((opb0)->u.fld[1]).rt_rtx))->u.hwint[0]); 1572 if (GET_CODE (b)((enum rtx_code) (b)->code) == GEU 1573 && GET_CODE (opb0)((enum rtx_code) (opb0)->code) == PLUS 1574 && opb1 == const1_rtx(const_int_rtx[64 +1]) 1575 && CONST_INT_P (XEXP (opb0, 1))(((enum rtx_code) ((((opb0)->u.fld[1]).rt_rtx))->code) ==
CONST_INT)
1576 /* Avoid overflows. */ 1577 && ((unsigned HOST_WIDE_INTlong) INTVAL (XEXP (opb0, 1))(((((opb0)->u.fld[1]).rt_rtx))->u.hwint[0]) 1578 != (HOST_WIDE_INT_1U1UL << (HOST_BITS_PER_WIDE_INT64 - 1))) 1579 && rtx_equal_p (XEXP (opb0, 0)(((opb0)->u.fld[0]).rt_rtx), op0)) 1580 return INTVAL (op1)((op1)->u.hwint[0]) == -INTVAL (XEXP (opb0, 1))(((((opb0)->u.fld[1]).rt_rtx))->u.hwint[0]); 1581 } 1582 1583 /* A >s X, where X is positive, implies A <u Y, if Y is negative. */ 1584 if ((GET_CODE (a)((enum rtx_code) (a)->code) == GT || GET_CODE (a)((enum rtx_code) (a)->code) == GE) 1585 && CONST_INT_P (op1)(((enum rtx_code) (op1)->code) == CONST_INT) 1586 && ((GET_CODE (a)((enum rtx_code) (a)->code) == GT && op1 == constm1_rtx(const_int_rtx[64 -1])) 1587 || INTVAL (op1)((op1)->u.hwint[0]) >= 0) 1588 && GET_CODE (b)((enum rtx_code) (b)->code) == LTU 1589 && CONST_INT_P (opb1)(((enum rtx_code) (opb1)->code) == CONST_INT) 1590 && rtx_equal_p (op0, opb0)) 1591 return INTVAL (opb1)((opb1)->u.hwint[0]) < 0; 1592 1593 return false; 1594} 1595 1596/* Canonicalizes COND so that 1597 1598 (1) Ensure that operands are ordered according to 1599 swap_commutative_operands_p. 1600 (2) (LE x const) will be replaced with (LT x <const+1>) and similarly 1601 for GE, GEU, and LEU. */ 1602 1603rtx 1604canon_condition (rtx cond) 1605{ 1606 rtx op0, op1; 1607 enum rtx_code code; 1608 machine_mode mode; 1609 1610 code = GET_CODE (cond)((enum rtx_code) (cond)->code); 1611 op0 = XEXP (cond, 0)(((cond)->u.fld[0]).rt_rtx); 1612 op1 = XEXP (cond, 1)(((cond)->u.fld[1]).rt_rtx); 1613 1614 if (swap_commutative_operands_p (op0, op1)) 1615 { 1616 code = swap_condition (code); 1617 std::swap (op0, op1); 1618 } 1619 1620 mode = GET_MODE (op0)((machine_mode) (op0)->mode); 1621 if (mode == VOIDmode((void) 0, E_VOIDmode)) 1622 mode = GET_MODE (op1)((machine_mode) (op1)->mode); 1623 gcc_assert (mode != VOIDmode)((void)(!(mode != ((void) 0, E_VOIDmode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 1623, __FUNCTION__), 0 : 0))
; 1624 1625 if (CONST_SCALAR_INT_P (op1)((((enum rtx_code) (op1)->code) == CONST_INT) || (((enum rtx_code
) (op1)->code) == CONST_WIDE_INT))
&& GET_MODE_CLASS (mode)((enum mode_class) mode_class[mode]) != MODE_CC) 1626 { 1627 rtx_mode_t const_val (op1, mode); 1628 1629 switch (code) 1630 { 1631 case LE: 1632 if (wi::ne_p (const_val, wi::max_value (mode, SIGNED))) 1633 { 1634 code = LT; 1635 op1 = immed_wide_int_const (wi::add (const_val, 1), mode); 1636 } 1637 break; 1638 1639 case GE: 1640 if (wi::ne_p (const_val, wi::min_value (mode, SIGNED))) 1641 { 1642 code = GT; 1643 op1 = immed_wide_int_const (wi::sub (const_val, 1), mode); 1644 } 1645 break; 1646 1647 case LEU: 1648 if (wi::ne_p (const_val, -1)) 1649 { 1650 code = LTU; 1651 op1 = immed_wide_int_const (wi::add (const_val, 1), mode); 1652 } 1653 break; 1654 1655 case GEU: 1656 if (wi::ne_p (const_val, 0)) 1657 { 1658 code = GTU; 1659 op1 = immed_wide_int_const (wi::sub (const_val, 1), mode); 1660 } 1661 break; 1662 1663 default: 1664 break; 1665 } 1666 } 1667 1668 if (op0 != XEXP (cond, 0)(((cond)->u.fld[0]).rt_rtx) 1669 || op1 != XEXP (cond, 1)(((cond)->u.fld[1]).rt_rtx) 1670 || code != GET_CODE (cond)((enum rtx_code) (cond)->code) 1671 || GET_MODE (cond)((machine_mode) (cond)->mode) != SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode))) 1672 cond = gen_rtx_fmt_ee (code, SImode, op0, op1)gen_rtx_fmt_ee_stat ((code), ((scalar_int_mode ((scalar_int_mode
::from_int) E_SImode))), (op0), (op1) )
; 1673 1674 return cond; 1675} 1676 1677/* Reverses CONDition; returns NULL if we cannot. */ 1678 1679static rtx 1680reversed_condition (rtx cond) 1681{ 1682 enum rtx_code reversed; 1683 reversed = reversed_comparison_code (cond, NULL__null); 1684 if (reversed == UNKNOWN) 1685 return NULL_RTX(rtx) 0; 1686 else 1687 return gen_rtx_fmt_ee (reversed,gen_rtx_fmt_ee_stat ((reversed), (((machine_mode) (cond)->
mode)), ((((cond)->u.fld[0]).rt_rtx)), ((((cond)->u.fld
[1]).rt_rtx)) )
1688 GET_MODE (cond), XEXP (cond, 0),gen_rtx_fmt_ee_stat ((reversed), (((machine_mode) (cond)->
mode)), ((((cond)->u.fld[0]).rt_rtx)), ((((cond)->u.fld
[1]).rt_rtx)) )
1689 XEXP (cond, 1))gen_rtx_fmt_ee_stat ((reversed), (((machine_mode) (cond)->
mode)), ((((cond)->u.fld[0]).rt_rtx)), ((((cond)->u.fld
[1]).rt_rtx)) )
; 1690} 1691 1692/* Tries to use the fact that COND holds to simplify EXPR. ALTERED is the 1693 set of altered regs. */ 1694 1695void 1696simplify_using_condition (rtx cond, rtx *expr, regset altered) 1697{ 1698 rtx rev, reve, exp = *expr; 1699 1700 /* If some register gets altered later, we do not really speak about its 1701 value at the time of comparison. */ 1702 if (altered && altered_reg_used (cond, altered)) 1703 return; 1704 1705 if (GET_CODE (cond)((enum rtx_code) (cond)->code) == EQ 1706 && REG_P (XEXP (cond, 0))(((enum rtx_code) ((((cond)->u.fld[0]).rt_rtx))->code) ==
REG)
&& CONSTANT_P (XEXP (cond, 1))((rtx_class[(int) (((enum rtx_code) ((((cond)->u.fld[1]).rt_rtx
))->code))]) == RTX_CONST_OBJ)
) 1707 { 1708 *expr = simplify_replace_rtx (*expr, XEXP (cond, 0)(((cond)->u.fld[0]).rt_rtx), XEXP (cond, 1)(((cond)->u.fld[1]).rt_rtx)); 1709 return; 1710 } 1711 1712 if (!COMPARISON_P (exp)(((rtx_class[(int) (((enum rtx_code) (exp)->code))]) &
(~1)) == (RTX_COMPARE & (~1)))
) 1713 return; 1714 1715 rev = reversed_condition (cond); 1716 reve = reversed_condition (exp); 1717 1718 cond = canon_condition (cond); 1719 exp = canon_condition (exp); 1720 if (rev) 1721 rev = canon_condition (rev); 1722 if (reve) 1723 reve = canon_condition (reve); 1724 1725 if (rtx_equal_p (exp, cond)) 1726 { 1727 *expr = const_true_rtx; 1728 return; 1729 } 1730 1731 if (rev && rtx_equal_p (exp, rev)) 1732 { 1733 *expr = const0_rtx(const_int_rtx[64]); 1734 return; 1735 } 1736 1737 if (implies_p (cond, exp)) 1738 { 1739 *expr = const_true_rtx; 1740 return; 1741 } 1742 1743 if (reve && implies_p (cond, reve)) 1744 { 1745 *expr = const0_rtx(const_int_rtx[64]); 1746 return; 1747 } 1748 1749 /* A proof by contradiction. If *EXPR implies (not cond), *EXPR must 1750 be false. */ 1751 if (rev && implies_p (exp, rev)) 1752 { 1753 *expr = const0_rtx(const_int_rtx[64]); 1754 return; 1755 } 1756 1757 /* Similarly, If (not *EXPR) implies (not cond), *EXPR must be true. */ 1758 if (rev && reve && implies_p (reve, rev)) 1759 { 1760 *expr = const_true_rtx; 1761 return; 1762 } 1763 1764 /* We would like to have some other tests here. TODO. */ 1765 1766 return; 1767} 1768 1769/* Use relationship between A and *B to eventually eliminate *B. 1770 OP is the operation we consider. */ 1771 1772static void 1773eliminate_implied_condition (enum rtx_code op, rtx a, rtx *b) 1774{ 1775 switch (op) 1776 { 1777 case AND: 1778 /* If A implies *B, we may replace *B by true. */ 1779 if (implies_p (a, *b)) 1780 *b = const_true_rtx; 1781 break; 1782 1783 case IOR: 1784 /* If *B implies A, we may replace *B by false. */ 1785 if (implies_p (*b, a)) 1786 *b = const0_rtx(const_int_rtx[64]); 1787 break; 1788 1789 default: 1790 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 1790, __FUNCTION__))
; 1791 } 1792} 1793 1794/* Eliminates the conditions in TAIL that are implied by HEAD. OP is the 1795 operation we consider. */ 1796 1797static void 1798eliminate_implied_conditions (enum rtx_code op, rtx *head, rtx tail) 1799{ 1800 rtx elt; 1801 1802 for (elt = tail; elt; elt = XEXP (elt, 1)(((elt)->u.fld[1]).rt_rtx)) 1803 eliminate_implied_condition (op, *head, &XEXP (elt, 0)(((elt)->u.fld[0]).rt_rtx)); 1804 for (elt = tail; elt; elt = XEXP (elt, 1)(((elt)->u.fld[1]).rt_rtx)) 1805 eliminate_implied_condition (op, XEXP (elt, 0)(((elt)->u.fld[0]).rt_rtx), head); 1806} 1807 1808/* Simplifies *EXPR using initial values at the start of the LOOP. If *EXPR 1809 is a list, its elements are assumed to be combined using OP. */ 1810 1811static void 1812simplify_using_initial_values (class loop *loop, enum rtx_code op, rtx *expr) 1813{ 1814 bool expression_valid; 1815 rtx head, tail, last_valid_expr; 1816 rtx_expr_list *cond_list; 1817 rtx_insn *insn; 1818 rtx neutral, aggr; 1819 regset altered, this_altered; 1820 edge e; 1821 1822 if (!*expr) 1823 return; 1824 1825 if (CONSTANT_P (*expr)((rtx_class[(int) (((enum rtx_code) (*expr)->code))]) == RTX_CONST_OBJ
)
) 1826 return; 1827 1828 if (GET_CODE (*expr)((enum rtx_code) (*expr)->code) == EXPR_LIST) 1829 { 1830 head = XEXP (*expr, 0)(((*expr)->u.fld[0]).rt_rtx); 1831 tail = XEXP (*expr, 1)(((*expr)->u.fld[1]).rt_rtx); 1832 1833 eliminate_implied_conditions (op, &head, tail); 1834 1835 switch (op) 1836 { 1837 case AND: 1838 neutral = const_true_rtx; 1839 aggr = const0_rtx(const_int_rtx[64]); 1840 break; 1841 1842 case IOR: 1843 neutral = const0_rtx(const_int_rtx[64]); 1844 aggr = const_true_rtx; 1845 break; 1846 1847 default: 1848 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 1848, __FUNCTION__))
; 1849 } 1850 1851 simplify_using_initial_values (loop, UNKNOWN, &head); 1852 if (head == aggr) 1853 { 1854 XEXP (*expr, 0)(((*expr)->u.fld[0]).rt_rtx) = aggr; 1855 XEXP (*expr, 1)(((*expr)->u.fld[1]).rt_rtx) = NULL_RTX(rtx) 0; 1856 return; 1857 } 1858 else if (head == neutral) 1859 { 1860 *expr = tail; 1861 simplify_using_initial_values (loop, op, expr); 1862 return; 1863 } 1864 simplify_using_initial_values (loop, op, &tail); 1865 1866 if (tail && XEXP (tail, 0)(((tail)->u.fld[0]).rt_rtx) == aggr) 1867 { 1868 *expr = tail; 1869 return; 1870 } 1871 1872 XEXP (*expr, 0)(((*expr)->u.fld[0]).rt_rtx) = head; 1873 XEXP (*expr, 1)(((*expr)->u.fld[1]).rt_rtx) = tail; 1874 return; 1875 } 1876 1877 gcc_assert (op == UNKNOWN)((void)(!(op == UNKNOWN) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 1877, __FUNCTION__), 0 : 0))
; 1878 1879 replace_single_def_regs (expr); 1880 if (CONSTANT_P (*expr)((rtx_class[(int) (((enum rtx_code) (*expr)->code))]) == RTX_CONST_OBJ
)
) 1881 return; 1882 1883 e = loop_preheader_edge (loop); 1884 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)) 1885 return; 1886 1887 altered = ALLOC_REG_SET (&reg_obstack)bitmap_alloc (&reg_obstack); 1888 this_altered = ALLOC_REG_SET (&reg_obstack)bitmap_alloc (&reg_obstack); 1889 1890 expression_valid = true; 1891 last_valid_expr = *expr; 1892 cond_list = NULL__null; 1893 while (1) 1894 { 1895 insn = BB_END (e->src)(e->src)->il.x.rtl->end_; 1896 if (any_condjump_p (insn) && onlyjump_p (insn)) 1897 { 1898 rtx cond = get_condition (BB_END (e->src)(e->src)->il.x.rtl->end_, NULL__null, false, true); 1899 1900 if (cond && (e->flags & EDGE_FALLTHRU)) 1901 cond = reversed_condition (cond); 1902 if (cond) 1903 { 1904 rtx old = *expr; 1905 simplify_using_condition (cond, expr, altered); 1906 if (old != *expr) 1907 { 1908 rtx note; 1909 if (CONSTANT_P (*expr)((rtx_class[(int) (((enum rtx_code) (*expr)->code))]) == RTX_CONST_OBJ
)
) 1910 goto out; 1911 for (note = cond_list; note; note = XEXP (note, 1)(((note)->u.fld[1]).rt_rtx)) 1912 { 1913 simplify_using_condition (XEXP (note, 0)(((note)->u.fld[0]).rt_rtx), expr, altered); 1914 if (CONSTANT_P (*expr)((rtx_class[(int) (((enum rtx_code) (*expr)->code))]) == RTX_CONST_OBJ
)
) 1915 goto out; 1916 } 1917 } 1918 cond_list = alloc_EXPR_LIST (0, cond, cond_list); 1919 } 1920 } 1921 1922 FOR_BB_INSNS_REVERSE (e->src, insn)for ((insn) = (e->src)->il.x.rtl->end_; (insn) &&
(insn) != PREV_INSN ((e->src)->il.x.head_); (insn) = PREV_INSN
(insn))
1923 { 1924 rtx src, dest; 1925 rtx old = *expr; 1926 1927 if (!INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
) 1928 continue; 1929 1930 CLEAR_REG_SET (this_altered)bitmap_clear (this_altered); 1931 note_stores (insn, mark_altered, this_altered); 1932 if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)) 1933 { 1934 /* Kill all registers that might be clobbered by the call. 1935 We don't track modes of hard registers, so we need to be 1936 conservative and assume that partial kills are full kills. */ 1937 function_abi callee_abi = insn_callee_abi (insn); 1938 IOR_REG_SET_HRS (this_altered,bitmap_ior_into (this_altered, bitmap_view<HARD_REG_SET>
(callee_abi.full_and_partial_reg_clobbers ()))
1939 callee_abi.full_and_partial_reg_clobbers ())bitmap_ior_into (this_altered, bitmap_view<HARD_REG_SET>
(callee_abi.full_and_partial_reg_clobbers ()))
; 1940 } 1941 1942 if (suitable_set_for_replacement (insn, &dest, &src)) 1943 { 1944 rtx_expr_list **pnote, **pnote_next; 1945 1946 replace_in_expr (expr, dest, src); 1947 if (CONSTANT_P (*expr)((rtx_class[(int) (((enum rtx_code) (*expr)->code))]) == RTX_CONST_OBJ
)
) 1948 goto out; 1949 1950 for (pnote = &cond_list; *pnote; pnote = pnote_next) 1951 { 1952 rtx_expr_list *note = *pnote; 1953 rtx old_cond = XEXP (note, 0)(((note)->u.fld[0]).rt_rtx); 1954 1955 pnote_next = (rtx_expr_list **)&XEXP (note, 1)(((note)->u.fld[1]).rt_rtx); 1956 replace_in_expr (&XEXP (note, 0)(((note)->u.fld[0]).rt_rtx), dest, src); 1957 1958 /* We can no longer use a condition that has been simplified 1959 to a constant, and simplify_using_condition will abort if 1960 we try. */ 1961 if (CONSTANT_P (XEXP (note, 0))((rtx_class[(int) (((enum rtx_code) ((((note)->u.fld[0]).rt_rtx
))->code))]) == RTX_CONST_OBJ)
) 1962 { 1963 *pnote = *pnote_next; 1964 pnote_next = pnote; 1965 free_EXPR_LIST_node (note); 1966 } 1967 /* Retry simplifications with this condition if either the 1968 expression or the condition changed. */ 1969 else if (old_cond != XEXP (note, 0)(((note)->u.fld[0]).rt_rtx) || old != *expr) 1970 simplify_using_condition (XEXP (note, 0)(((note)->u.fld[0]).rt_rtx), expr, altered); 1971 } 1972 } 1973 else 1974 { 1975 rtx_expr_list **pnote, **pnote_next; 1976 1977 /* If we did not use this insn to make a replacement, any overlap 1978 between stores in this insn and our expression will cause the 1979 expression to become invalid. */ 1980 if (altered_reg_used (*expr, this_altered)) 1981 goto out; 1982 1983 /* Likewise for the conditions. */ 1984 for (pnote = &cond_list; *pnote; pnote = pnote_next) 1985 { 1986 rtx_expr_list *note = *pnote; 1987 rtx old_cond = XEXP (note, 0)(((note)->u.fld[0]).rt_rtx); 1988 1989 pnote_next = (rtx_expr_list **)&XEXP (note, 1)(((note)->u.fld[1]).rt_rtx); 1990 if (altered_reg_used (old_cond, this_altered)) 1991 { 1992 *pnote = *pnote_next; 1993 pnote_next = pnote; 1994 free_EXPR_LIST_node (note); 1995 } 1996 } 1997 } 1998 1999 if (CONSTANT_P (*expr)((rtx_class[(int) (((enum rtx_code) (*expr)->code))]) == RTX_CONST_OBJ
)
) 2000 goto out; 2001 2002 IOR_REG_SET (altered, this_altered)bitmap_ior_into (altered, this_altered); 2003 2004 /* If the expression now contains regs that have been altered, we 2005 can't return it to the caller. However, it is still valid for 2006 further simplification, so keep searching to see if we can 2007 eventually turn it into a constant. */ 2008 if (altered_reg_used (*expr, altered)) 2009 expression_valid = false; 2010 if (expression_valid) 2011 last_valid_expr = *expr; 2012 } 2013 2014 if (!single_pred_p (e->src) 2015 || single_pred (e->src) == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)) 2016 break; 2017 e = single_pred_edge (e->src); 2018 } 2019 2020 out: 2021 free_EXPR_LIST_list (&cond_list); 2022 if (!CONSTANT_P (*expr)((rtx_class[(int) (((enum rtx_code) (*expr)->code))]) == RTX_CONST_OBJ
)
) 2023 *expr = last_valid_expr; 2024 FREE_REG_SET (altered)((void) (bitmap_obstack_free ((bitmap) altered), (altered) = (
bitmap) __null))
; 2025 FREE_REG_SET (this_altered)((void) (bitmap_obstack_free ((bitmap) this_altered), (this_altered
) = (bitmap) __null))
; 2026} 2027 2028/* Transforms invariant IV into MODE. Adds assumptions based on the fact 2029 that IV occurs as left operands of comparison COND and its signedness 2030 is SIGNED_P to DESC. */ 2031 2032static void 2033shorten_into_mode (class rtx_iv *iv, scalar_int_mode mode, 2034 enum rtx_code cond, bool signed_p, class niter_desc *desc) 2035{ 2036 rtx mmin, mmax, cond_over, cond_under; 2037 2038 get_mode_bounds (mode, signed_p, iv->extend_mode, &mmin, &mmax); 2039 cond_under = simplify_gen_relational (LT, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), iv->extend_mode, 2040 iv->base, mmin); 2041 cond_over = simplify_gen_relational (GT, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), iv->extend_mode, 2042 iv->base, mmax); 2043 2044 switch (cond) 2045 { 2046 case LE: 2047 case LT: 2048 case LEU: 2049 case LTU: 2050 if (cond_under != const0_rtx(const_int_rtx[64])) 2051 desc->infinite = 2052 alloc_EXPR_LIST (0, cond_under, desc->infinite); 2053 if (cond_over != const0_rtx(const_int_rtx[64])) 2054 desc->noloop_assumptions = 2055 alloc_EXPR_LIST (0, cond_over, desc->noloop_assumptions); 2056 break; 2057 2058 case GE: 2059 case GT: 2060 case GEU: 2061 case GTU: 2062 if (cond_over != const0_rtx(const_int_rtx[64])) 2063 desc->infinite = 2064 alloc_EXPR_LIST (0, cond_over, desc->infinite); 2065 if (cond_under != const0_rtx(const_int_rtx[64])) 2066 desc->noloop_assumptions = 2067 alloc_EXPR_LIST (0, cond_under, desc->noloop_assumptions); 2068 break; 2069 2070 case NE: 2071 if (cond_over != const0_rtx(const_int_rtx[64])) 2072 desc->infinite = 2073 alloc_EXPR_LIST (0, cond_over, desc->infinite); 2074 if (cond_under != const0_rtx(const_int_rtx[64])) 2075 desc->infinite = 2076 alloc_EXPR_LIST (0, cond_under, desc->infinite); 2077 break; 2078 2079 default: 2080 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 2080, __FUNCTION__))
; 2081 } 2082 2083 iv->mode = mode; 2084 iv->extend = signed_p ? IV_SIGN_EXTEND : IV_ZERO_EXTEND; 2085} 2086 2087/* Transforms IV0 and IV1 compared by COND so that they are both compared as 2088 subregs of the same mode if possible (sometimes it is necessary to add 2089 some assumptions to DESC). */ 2090 2091static bool 2092canonicalize_iv_subregs (class rtx_iv *iv0, class rtx_iv *iv1, 2093 enum rtx_code cond, class niter_desc *desc) 2094{ 2095 scalar_int_mode comp_mode; 2096 bool signed_p; 2097 2098 /* If the ivs behave specially in the first iteration, or are 2099 added/multiplied after extending, we ignore them. */ 2100 if (iv0->first_special || iv0->mult != const1_rtx(const_int_rtx[64 +1]) || iv0->delta != const0_rtx(const_int_rtx[64])) 2101 return false; 2102 if (iv1->first_special || iv1->mult != const1_rtx(const_int_rtx[64 +1]) || iv1->delta != const0_rtx(const_int_rtx[64])) 2103 return false; 2104 2105 /* If there is some extend, it must match signedness of the comparison. */ 2106 switch (cond) 2107 { 2108 case LE: 2109 case LT: 2110 if (iv0->extend == IV_ZERO_EXTEND 2111 || iv1->extend == IV_ZERO_EXTEND) 2112 return false; 2113 signed_p = true; 2114 break; 2115 2116 case LEU: 2117 case LTU: 2118 if (iv0->extend == IV_SIGN_EXTEND 2119 || iv1->extend == IV_SIGN_EXTEND) 2120 return false; 2121 signed_p = false; 2122 break; 2123 2124 case NE: 2125 if (iv0->extend != IV_UNKNOWN_EXTEND 2126 && iv1->extend != IV_UNKNOWN_EXTEND 2127 && iv0->extend != iv1->extend) 2128 return false; 2129 2130 signed_p = false; 2131 if (iv0->extend != IV_UNKNOWN_EXTEND) 2132 signed_p = iv0->extend == IV_SIGN_EXTEND; 2133 if (iv1->extend != IV_UNKNOWN_EXTEND) 2134 signed_p = iv1->extend == IV_SIGN_EXTEND; 2135 break; 2136 2137 default: 2138 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 2138, __FUNCTION__))
; 2139 } 2140 2141 /* Values of both variables should be computed in the same mode. These 2142 might indeed be different, if we have comparison like 2143 2144 (compare (subreg:SI (iv0)) (subreg:SI (iv1))) 2145 2146 and iv0 and iv1 are both ivs iterating in SI mode, but calculated 2147 in different modes. This does not seem impossible to handle, but 2148 it hardly ever occurs in practice. 2149 2150 The only exception is the case when one of operands is invariant. 2151 For example pentium 3 generates comparisons like 2152 (lt (subreg:HI (reg:SI)) 100). Here we assign HImode to 100, but we 2153 definitely do not want this prevent the optimization. */ 2154 comp_mode = iv0->extend_mode; 2155 if (GET_MODE_BITSIZE (comp_mode) < GET_MODE_BITSIZE (iv1->extend_mode)) 2156 comp_mode = iv1->extend_mode; 2157 2158 if (iv0->extend_mode != comp_mode) 2159 { 2160 if (iv0->mode != iv0->extend_mode 2161 || iv0->step != const0_rtx(const_int_rtx[64])) 2162 return false; 2163 2164 iv0->base = simplify_gen_unary (signed_p ? SIGN_EXTEND : ZERO_EXTEND, 2165 comp_mode, iv0->base, iv0->mode); 2166 iv0->extend_mode = comp_mode; 2167 } 2168 2169 if (iv1->extend_mode != comp_mode) 2170 { 2171 if (iv1->mode != iv1->extend_mode 2172 || iv1->step != const0_rtx(const_int_rtx[64])) 2173 return false; 2174 2175 iv1->base = simplify_gen_unary (signed_p ? SIGN_EXTEND : ZERO_EXTEND, 2176 comp_mode, iv1->base, iv1->mode); 2177 iv1->extend_mode = comp_mode; 2178 } 2179 2180 /* Check that both ivs belong to a range of a single mode. If one of the 2181 operands is an invariant, we may need to shorten it into the common 2182 mode. */ 2183 if (iv0->mode == iv0->extend_mode 2184 && iv0->step == const0_rtx(const_int_rtx[64]) 2185 && iv0->mode != iv1->mode) 2186 shorten_into_mode (iv0, iv1->mode, cond, signed_p, desc); 2187 2188 if (iv1->mode == iv1->extend_mode 2189 && iv1->step == const0_rtx(const_int_rtx[64]) 2190 && iv0->mode != iv1->mode) 2191 shorten_into_mode (iv1, iv0->mode, swap_condition (cond), signed_p, desc); 2192 2193 if (iv0->mode != iv1->mode) 2194 return false; 2195 2196 desc->mode = iv0->mode; 2197 desc->signed_p = signed_p; 2198 2199 return true; 2200} 2201 2202/* Tries to estimate the maximum number of iterations in LOOP, and return the 2203 result. This function is called from iv_number_of_iterations with 2204 a number of fields in DESC already filled in. OLD_NITER is the original 2205 expression for the number of iterations, before we tried to simplify it. */ 2206 2207static uint64_t 2208determine_max_iter (class loop *loop, class niter_desc *desc, rtx old_niter) 2209{ 2210 rtx niter = desc->niter_expr; 2211 rtx mmin, mmax, cmp; 2212 uint64_t nmax, inc; 2213 uint64_t andmax = 0; 2214 2215 /* We used to look for constant operand 0 of AND, 2216 but canonicalization should always make this impossible. */ 2217 gcc_checking_assert (GET_CODE (niter) != AND((void)(!(((enum rtx_code) (niter)->code) != AND || !(((enum
rtx_code) ((((niter)->u.fld[0]).rt_rtx))->code) == CONST_INT
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 2218, __FUNCTION__), 0 : 0))
2218 || !CONST_INT_P (XEXP (niter, 0)))((void)(!(((enum rtx_code) (niter)->code) != AND || !(((enum
rtx_code) ((((niter)->u.fld[0]).rt_rtx))->code) == CONST_INT
)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 2218, __FUNCTION__), 0 : 0))
; 2219 2220 if (GET_CODE (niter)((enum rtx_code) (niter)->code) == AND 2221 && CONST_INT_P (XEXP (niter, 1))(((enum rtx_code) ((((niter)->u.fld[1]).rt_rtx))->code)
== CONST_INT)
) 2222 { 2223 andmax = UINTVAL (XEXP (niter, 1))((unsigned long) (((((niter)->u.fld[1]).rt_rtx))->u.hwint
[0]))
; 2224 niter = XEXP (niter, 0)(((niter)->u.fld[0]).rt_rtx); 2225 } 2226 2227 get_mode_bounds (desc->mode, desc->signed_p, desc->mode, &mmin, &mmax); 2228 nmax = UINTVAL (mmax)((unsigned long) ((mmax)->u.hwint[0])) - UINTVAL (mmin)((unsigned long) ((mmin)->u.hwint[0])); 2229 2230 if (GET_CODE (niter)((enum rtx_code) (niter)->code) == UDIV) 2231 { 2232 if (!CONST_INT_P (XEXP (niter, 1))(((enum rtx_code) ((((niter)->u.fld[1]).rt_rtx))->code)
== CONST_INT)
) 2233 return nmax; 2234 inc = INTVAL (XEXP (niter, 1))(((((niter)->u.fld[1]).rt_rtx))->u.hwint[0]); 2235 niter = XEXP (niter, 0)(((niter)->u.fld[0]).rt_rtx); 2236 } 2237 else 2238 inc = 1; 2239 2240 /* We could use a binary search here, but for now improving the upper 2241 bound by just one eliminates one important corner case. */ 2242 cmp = simplify_gen_relational (desc->signed_p ? LT : LTU, VOIDmode((void) 0, E_VOIDmode), 2243 desc->mode, old_niter, mmax); 2244 simplify_using_initial_values (loop, UNKNOWN, &cmp); 2245 if (cmp == const_true_rtx) 2246 { 2247 nmax--; 2248 2249 if (dump_file) 2250 fprintf (dump_file, ";; improved upper bound by one.\n"); 2251 } 2252 nmax /= inc; 2253 if (andmax) 2254 nmax = MIN (nmax, andmax)((nmax) < (andmax) ? (nmax) : (andmax)); 2255 if (dump_file) 2256 fprintf (dump_file, ";; Determined upper bound %" PRId64"l" "d"".\n", 2257 nmax); 2258 return nmax; 2259} 2260 2261/* Computes number of iterations of the CONDITION in INSN in LOOP and stores 2262 the result into DESC. Very similar to determine_number_of_iterations 2263 (basically its rtl version), complicated by things like subregs. */ 2264 2265static void 2266iv_number_of_iterations (class loop *loop, rtx_insn *insn, rtx condition, 2267 class niter_desc *desc) 2268{ 2269 rtx op0, op1, delta, step, bound, may_xform, tmp, tmp0, tmp1; 2270 class rtx_iv iv0, iv1; 2271 rtx assumption, may_not_xform; 2272 enum rtx_code cond; 2273 machine_mode nonvoid_mode; 2274 scalar_int_mode comp_mode; 2275 rtx mmin, mmax, mode_mmin, mode_mmax; 2276 uint64_t s, size, d, inv, max, up, down; 2277 int64_t inc, step_val; 2278 int was_sharp = false; 2279 rtx old_niter; 2280 bool step_is_pow2; 2281 2282 /* The meaning of these assumptions is this: 2283 if !assumptions 2284 then the rest of information does not have to be valid 2285 if noloop_assumptions then the loop does not roll 2286 if infinite then this exit is never used */ 2287 2288 desc->assumptions = NULL_RTX(rtx) 0; 2289 desc->noloop_assumptions = NULL_RTX(rtx) 0; 2290 desc->infinite = NULL_RTX(rtx) 0; 2291 desc->simple_p = true; 2292 2293 desc->const_iter = false; 2294 desc->niter_expr = NULL_RTX(rtx) 0; 2295 2296 cond = GET_CODE (condition)((enum rtx_code) (condition)->code); 2297 gcc_assert (COMPARISON_P (condition))((void)(!((((rtx_class[(int) (((enum rtx_code) (condition)->
code))]) & (~1)) == (RTX_COMPARE & (~1)))) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 2297, __FUNCTION__), 0 : 0))
; 2298 2299 nonvoid_mode = GET_MODE (XEXP (condition, 0))((machine_mode) ((((condition)->u.fld[0]).rt_rtx))->mode
)
; 2300 if (nonvoid_mode == VOIDmode((void) 0, E_VOIDmode)) 2301 nonvoid_mode = GET_MODE (XEXP (condition, 1))((machine_mode) ((((condition)->u.fld[1]).rt_rtx))->mode
)
; 2302 /* The constant comparisons should be folded. */ 2303 gcc_assert (nonvoid_mode != VOIDmode)((void)(!(nonvoid_mode != ((void) 0, E_VOIDmode)) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc"
, 2303, __FUNCTION__), 0 : 0))
; 2304 2305 /* We only handle integers or pointers. */ 2306 scalar_int_mode mode; 2307 if (!is_a <scalar_int_mode> (nonvoid_mode, &mode)) 2308 goto fail; 2309 2310 op0 = XEXP (condition, 0)(((condition)->u.fld[0]).rt_rtx); 2311 if (!iv_analyze (insn, mode, op0, &iv0)) 2312 goto fail; 2313 2314 op1 = XEXP (condition, 1)(((condition)->u.fld[1]).rt_rtx); 2315 if (!iv_analyze (insn, mode, op1, &iv1)) 2316 goto fail; 2317 2318 if (GET_MODE_BITSIZE (iv0.extend_mode) > HOST_BITS_PER_WIDE_INT64 2319 || GET_MODE_BITSIZE (iv1.extend_mode) > HOST_BITS_PER_WIDE_INT64) 2320 goto fail; 2321 2322 /* Check condition and normalize it. */ 2323 2324 switch (cond) 2325 { 2326 case GE: 2327 case GT: 2328 case GEU: 2329 case GTU: 2330 std::swap (iv0, iv1); 2331 cond = swap_condition (cond); 2332 break; 2333 case NE: 2334 case LE: 2335 case LEU: 2336 case LT: 2337 case LTU: 2338 break; 2339 default: 2340 goto fail; 2341 } 2342 2343 /* Handle extends. This is relatively nontrivial, so we only try in some 2344 easy cases, when we can canonicalize the ivs (possibly by adding some 2345 assumptions) to shape subreg (base + i * step). This function also fills 2346 in desc->mode and desc->signed_p. */ 2347 2348 if (!canonicalize_iv_subregs (&iv0, &iv1, cond, desc)) 2349 goto fail; 2350 2351 comp_mode = iv0.extend_mode; 2352 mode = iv0.mode; 2353 size = GET_MODE_PRECISION (mode); 2354 get_mode_bounds (mode, (cond == LE || cond == LT), comp_mode, &mmin, &mmax); 2355 mode_mmin = lowpart_subreg (mode, mmin, comp_mode); 2356 mode_mmax = lowpart_subreg (mode, mmax, comp_mode); 2357 2358 if (!CONST_INT_P (iv0.step)(((enum rtx_code) (iv0.step)->code) == CONST_INT) || !CONST_INT_P (iv1.step)(((enum rtx_code) (iv1.step)->code) == CONST_INT)) 2359 goto fail; 2360 2361 /* We can take care of the case of two induction variables chasing each other 2362 if the test is NE. I have never seen a loop using it, but still it is 2363 cool. */ 2364 if (iv0.step != const0_rtx(const_int_rtx[64]) && iv1.step != const0_rtx(const_int_rtx[64])) 2365 { 2366 if (cond != NE) 2367 goto fail; 2368 2369 iv0.step = simplify_gen_binary (MINUS, comp_mode, iv0.step, iv1.step); 2370 iv1.step = const0_rtx(const_int_rtx[64]); 2371 } 2372 2373 iv0.step = lowpart_subreg (mode, iv0.step, comp_mode); 2374 iv1.step = lowpart_subreg (mode, iv1.step, comp_mode); 2375 2376 /* This is either infinite loop or the one that ends immediately, depending 2377 on initial values. Unswitching should remove this kind of conditions. */ 2378 if (iv0.step == const0_rtx(const_int_rtx[64]) && iv1.step == const0_rtx(const_int_rtx[64])) 2379 goto fail; 2380 2381 if (cond != NE) 2382 { 2383 if (iv0.step == const0_rtx(const_int_rtx[64])) 2384 step_val = -INTVAL (iv1.step)((iv1.step)->u.hwint[0]); 2385 else 2386 step_val = INTVAL (iv0.step)((iv0.step)->u.hwint[0]); 2387 2388 /* Ignore loops of while (i-- < 10) type. */ 2389 if (step_val < 0) 2390 goto fail; 2391 2392 step_is_pow2 = !(step_val & (step_val - 1)); 2393 } 2394 else 2395 { 2396 /* We do not care about whether the step is power of two in this 2397 case. */ 2398 step_is_pow2 = false; 2399 step_val = 0; 2400 } 2401 2402 /* Some more condition normalization. We must record some assumptions 2403 due to overflows. */ 2404 switch (cond) 2405 { 2406 case LT: 2407 case LTU: 2408 /* We want to take care only of non-sharp relationals; this is easy, 2409 as in cases the overflow would make the transformation unsafe 2410 the loop does not roll. Seemingly it would make more sense to want 2411 to take care of sharp relationals instead, as NE is more similar to 2412 them, but the problem is that here the transformation would be more 2413 difficult due to possibly infinite loops. */ 2414 if (iv0.step == const0_rtx(const_int_rtx[64])) 2415 { 2416 tmp = lowpart_subreg (mode, iv0.base, comp_mode); 2417 assumption = simplify_gen_relational (EQ, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, tmp, 2418 mode_mmax); 2419 if (assumption == const_true_rtx) 2420 goto zero_iter_simplify; 2421 iv0.base = simplify_gen_binary (PLUS, comp_mode, 2422 iv0.base, const1_rtx(const_int_rtx[64 +1])); 2423 } 2424 else 2425 { 2426 tmp = lowpart_subreg (mode, iv1.base, comp_mode); 2427 assumption = simplify_gen_relational (EQ, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, tmp, 2428 mode_mmin); 2429 if (assumption == const_true_rtx) 2430 goto zero_iter_simplify; 2431 iv1.base = simplify_gen_binary (PLUS, comp_mode, 2432 iv1.base, constm1_rtx(const_int_rtx[64 -1])); 2433 } 2434 2435 if (assumption != const0_rtx(const_int_rtx[64])) 2436 desc->noloop_assumptions = 2437 alloc_EXPR_LIST (0, assumption, desc->noloop_assumptions); 2438 cond = (cond == LT) ? LE : LEU; 2439 2440 /* It will be useful to be able to tell the difference once more in 2441 LE -> NE reduction. */ 2442 was_sharp = true; 2443 break; 2444 default: ; 2445 } 2446 2447 /* Take care of trivially infinite loops. */ 2448 if (cond != NE) 2449 { 2450 if (iv0.step == const0_rtx(const_int_rtx[64])) 2451 { 2452 tmp = lowpart_subreg (mode, iv0.base, comp_mode); 2453 if (rtx_equal_p (tmp, mode_mmin)) 2454 { 2455 desc->infinite = 2456 alloc_EXPR_LIST (0, const_true_rtx, NULL_RTX(rtx) 0); 2457 /* Fill in the remaining fields somehow. */ 2458 goto zero_iter_simplify; 2459 } 2460 } 2461 else 2462 { 2463 tmp = lowpart_subreg (mode, iv1.base, comp_mode); 2464 if (rtx_equal_p (tmp, mode_mmax)) 2465 { 2466 desc->infinite = 2467 alloc_EXPR_LIST (0, const_true_rtx, NULL_RTX(rtx) 0); 2468 /* Fill in the remaining fields somehow. */ 2469 goto zero_iter_simplify; 2470 } 2471 } 2472 } 2473 2474 /* If we can we want to take care of NE conditions instead of size 2475 comparisons, as they are much more friendly (most importantly 2476 this takes care of special handling of loops with step 1). We can 2477 do it if we first check that upper bound is greater or equal to 2478 lower bound, their difference is constant c modulo step and that 2479 there is not an overflow. */ 2480 if (cond != NE) 2481 { 2482 if (iv0.step == const0_rtx(const_int_rtx[64])) 2483 step = simplify_gen_unary (NEG, comp_mode, iv1.step, comp_mode); 2484 else 2485 step = iv0.step; 2486 step = lowpart_subreg (mode, step, comp_mode); 2487 delta = simplify_gen_binary (MINUS, comp_mode, iv1.base, iv0.base); 2488 delta = lowpart_subreg (mode, delta, comp_mode); 2489 delta = simplify_gen_binary (UMOD, mode, delta, step); 2490 may_xform = const0_rtx(const_int_rtx[64]); 2491 may_not_xform = const_true_rtx; 2492 2493 if (CONST_INT_P (delta)(((enum rtx_code) (delta)->code) == CONST_INT)) 2494 { 2495 if (was_sharp && INTVAL (delta)((delta)->u.hwint[0]) == INTVAL (step)((step)->u.hwint[0]) - 1) 2496 { 2497 /* A special case. We have transformed condition of type 2498 for (i = 0; i < 4; i += 4) 2499 into 2500 for (i = 0; i <= 3; i += 4) 2501 obviously if the test for overflow during that transformation 2502 passed, we cannot overflow here. Most importantly any 2503 loop with sharp end condition and step 1 falls into this 2504 category, so handling this case specially is definitely 2505 worth the troubles. */ 2506 may_xform = const_true_rtx; 2507 } 2508 else if (iv0.step == const0_rtx(const_int_rtx[64])) 2509 { 2510 bound = simplify_gen_binary (PLUS, comp_mode, mmin, step); 2511 bound = simplify_gen_binary (MINUS, comp_mode, bound, delta); 2512 bound = lowpart_subreg (mode, bound, comp_mode); 2513 tmp = lowpart_subreg (mode, iv0.base, comp_mode); 2514 may_xform = simplify_gen_relational (cond, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, 2515 bound, tmp); 2516 may_not_xform = simplify_gen_relational (reverse_condition (cond), 2517 SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, 2518 bound, tmp); 2519 } 2520 else 2521 { 2522 bound = simplify_gen_binary (MINUS, comp_mode, mmax, step); 2523 bound = simplify_gen_binary (PLUS, comp_mode, bound, delta); 2524 bound = lowpart_subreg (mode, bound, comp_mode); 2525 tmp = lowpart_subreg (mode, iv1.base, comp_mode); 2526 may_xform = simplify_gen_relational (cond, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, 2527 tmp, bound); 2528 may_not_xform = simplify_gen_relational (reverse_condition (cond), 2529 SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, 2530 tmp, bound); 2531 } 2532 } 2533 2534 if (may_xform != const0_rtx(const_int_rtx[64])) 2535 { 2536 /* We perform the transformation always provided that it is not 2537 completely senseless. This is OK, as we would need this assumption 2538 to determine the number of iterations anyway. */ 2539 if (may_xform != const_true_rtx) 2540 { 2541 /* If the step is a power of two and the final value we have 2542 computed overflows, the cycle is infinite. Otherwise it 2543 is nontrivial to compute the number of iterations. */ 2544 if (step_is_pow2) 2545 desc->infinite = alloc_EXPR_LIST (0, may_not_xform, 2546 desc->infinite); 2547 else 2548 desc->assumptions = alloc_EXPR_LIST (0, may_xform, 2549 desc->assumptions); 2550 } 2551 2552 /* We are going to lose some information about upper bound on 2553 number of iterations in this step, so record the information 2554 here. */ 2555 inc = INTVAL (iv0.step)((iv0.step)->u.hwint[0]) - INTVAL (iv1.step)((iv1.step)->u.hwint[0]); 2556 if (CONST_INT_P (iv1.base)(((enum rtx_code) (iv1.base)->code) == CONST_INT)) 2557 up = INTVAL (iv1.base)((iv1.base)->u.hwint[0]); 2558 else 2559 up = INTVAL (mode_mmax)((mode_mmax)->u.hwint[0]) - inc; 2560 down = INTVAL (CONST_INT_P (iv0.base)(((((enum rtx_code) (iv0.base)->code) == CONST_INT) ? iv0.
base : mode_mmin)->u.hwint[0])
2561 ? iv0.base(((((enum rtx_code) (iv0.base)->code) == CONST_INT) ? iv0.
base : mode_mmin)->u.hwint[0])
2562 : mode_mmin)(((((enum rtx_code) (iv0.base)->code) == CONST_INT) ? iv0.
base : mode_mmin)->u.hwint[0])
; 2563 max = (up - down) / inc + 1; 2564 if (!desc->infinite 2565 && !desc->assumptions) 2566 record_niter_bound (loop, max, false, true); 2567 2568 if (iv0.step == const0_rtx(const_int_rtx[64])) 2569 { 2570 iv0.base = simplify_gen_binary (PLUS, comp_mode, iv0.base, delta); 2571 iv0.base = simplify_gen_binary (MINUS, comp_mode, iv0.base, step); 2572 } 2573 else 2574 { 2575 iv1.base = simplify_gen_binary (MINUS, comp_mode, iv1.base, delta); 2576 iv1.base = simplify_gen_binary (PLUS, comp_mode, iv1.base, step); 2577 } 2578 2579 tmp0 = lowpart_subreg (mode, iv0.base, comp_mode); 2580 tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); 2581 assumption = simplify_gen_relational (reverse_condition (cond), 2582 SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, tmp0, tmp1); 2583 if (assumption == const_true_rtx) 2584 goto zero_iter_simplify; 2585 else if (assumption != const0_rtx(const_int_rtx[64])) 2586 desc->noloop_assumptions = 2587 alloc_EXPR_LIST (0, assumption, desc->noloop_assumptions); 2588 cond = NE; 2589 } 2590 } 2591 2592 /* Count the number of iterations. */ 2593 if (cond == NE) 2594 { 2595 /* Everything we do here is just arithmetics modulo size of mode. This 2596 makes us able to do more involved computations of number of iterations 2597 than in other cases. First transform the condition into shape 2598 s * i <> c, with s positive. */ 2599 iv1.base = simplify_gen_binary (MINUS, comp_mode, iv1.base, iv0.base); 2600 iv0.base = const0_rtx(const_int_rtx[64]); 2601 iv0.step = simplify_gen_binary (MINUS, comp_mode, iv0.step, iv1.step); 2602 iv1.step = const0_rtx(const_int_rtx[64]); 2603 if (INTVAL (iv0.step)((iv0.step)->u.hwint[0]) < 0) 2604 { 2605 iv0.step = simplify_gen_unary (NEG, comp_mode, iv0.step, comp_mode); 2606 iv1.base = simplify_gen_unary (NEG, comp_mode, iv1.base, comp_mode); 2607 } 2608 iv0.step = lowpart_subreg (mode, iv0.step, comp_mode); 2609 2610 /* Let nsd (s, size of mode) = d. If d does not divide c, the loop 2611 is infinite. Otherwise, the number of iterations is 2612 (inverse(s/d) * (c/d)) mod (size of mode/d). */ 2613 s = INTVAL (iv0.step)((iv0.step)->u.hwint[0]); d = 1; 2614 while (s % 2 != 1) 2615 { 2616 s /= 2; 2617 d *= 2; 2618 size--; 2619 } 2620 bound = GEN_INT (((uint64_t) 1 << (size - 1 ) << 1) - 1)gen_rtx_CONST_INT (((void) 0, E_VOIDmode), (((uint64_t) 1 <<
(size - 1 ) << 1) - 1))
; 2621 2622 tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); 2623 tmp = simplify_gen_binary (UMOD, mode, tmp1, gen_int_mode (d, mode)); 2624 assumption = simplify_gen_relational (NE, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, tmp, const0_rtx(const_int_rtx[64])); 2625 desc->infinite = alloc_EXPR_LIST (0, assumption, desc->infinite); 2626 2627 tmp = simplify_gen_binary (UDIV, mode, tmp1, gen_int_mode (d, mode)); 2628 inv = inverse (s, size); 2629 tmp = simplify_gen_binary (MULT, mode, tmp, gen_int_mode (inv, mode)); 2630 desc->niter_expr = simplify_gen_binary (AND, mode, tmp, bound); 2631 } 2632 else 2633 { 2634 if (iv1.step == const0_rtx(const_int_rtx[64])) 2635 /* Condition in shape a + s * i <= b 2636 We must know that b + s does not overflow and a <= b + s and then we 2637 can compute number of iterations as (b + s - a) / s. (It might 2638 seem that we in fact could be more clever about testing the b + s 2639 overflow condition using some information about b - a mod s, 2640 but it was already taken into account during LE -> NE transform). */ 2641 { 2642 step = iv0.step; 2643 tmp0 = lowpart_subreg (mode, iv0.base, comp_mode); 2644 tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); 2645 2646 bound = simplify_gen_binary (MINUS, mode, mode_mmax, 2647 lowpart_subreg (mode, step, 2648 comp_mode)); 2649 if (step_is_pow2) 2650 { 2651 rtx t0, t1; 2652 2653 /* If s is power of 2, we know that the loop is infinite if 2654 a % s <= b % s and b + s overflows. */ 2655 assumption = simplify_gen_relational (reverse_condition (cond), 2656 SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, 2657 tmp1, bound); 2658 2659 t0 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp0), step); 2660 t1 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp1), step); 2661 tmp = simplify_gen_relational (cond, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, t0, t1); 2662 assumption = simplify_gen_binary (AND, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), assumption, tmp); 2663 desc->infinite = 2664 alloc_EXPR_LIST (0, assumption, desc->infinite); 2665 } 2666 else 2667 { 2668 assumption = simplify_gen_relational (cond, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, 2669 tmp1, bound); 2670 desc->assumptions = 2671 alloc_EXPR_LIST (0, assumption, desc->assumptions); 2672 } 2673 2674 tmp = simplify_gen_binary (PLUS, comp_mode, iv1.base, iv0.step); 2675 tmp = lowpart_subreg (mode, tmp, comp_mode); 2676 assumption = simplify_gen_relational (reverse_condition (cond), 2677 SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, tmp0, tmp); 2678 2679 delta = simplify_gen_binary (PLUS, mode, tmp1, step); 2680 delta = simplify_gen_binary (MINUS, mode, delta, tmp0); 2681 } 2682 else 2683 { 2684 /* Condition in shape a <= b - s * i 2685 We must know that a - s does not overflow and a - s <= b and then 2686 we can again compute number of iterations as (b - (a - s)) / s. */ 2687 step = simplify_gen_unary (NEG, mode, iv1.step, mode); 2688 tmp0 = lowpart_subreg (mode, iv0.base, comp_mode); 2689 tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); 2690 2691 bound = simplify_gen_binary (PLUS, mode, mode_mmin, 2692 lowpart_subreg (mode, step, comp_mode)); 2693 if (step_is_pow2) 2694 { 2695 rtx t0, t1; 2696 2697 /* If s is power of 2, we know that the loop is infinite if 2698 a % s <= b % s and a - s overflows. */ 2699 assumption = simplify_gen_relational (reverse_condition (cond), 2700 SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, 2701 bound, tmp0); 2702 2703 t0 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp0), step); 2704 t1 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp1), step); 2705 tmp = simplify_gen_relational (cond, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, t0, t1); 2706 assumption = simplify_gen_binary (AND, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), assumption, tmp); 2707 desc->infinite = 2708 alloc_EXPR_LIST (0, assumption, desc->infinite); 2709 } 2710 else 2711 { 2712 assumption = simplify_gen_relational (cond, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, 2713 bound, tmp0); 2714 desc->assumptions = 2715 alloc_EXPR_LIST (0, assumption, desc->assumptions); 2716 } 2717 2718 tmp = simplify_gen_binary (PLUS, comp_mode, iv0.base, iv1.step); 2719 tmp = lowpart_subreg (mode, tmp, comp_mode); 2720 assumption = simplify_gen_relational (reverse_condition (cond), 2721 SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, 2722 tmp, tmp1); 2723 delta = simplify_gen_binary (MINUS, mode, tmp0, step); 2724 delta = simplify_gen_binary (MINUS, mode, tmp1, delta); 2725 } 2726 if (assumption == const_true_rtx) 2727 goto zero_iter_simplify; 2728 else if (assumption != const0_rtx(const_int_rtx[64])) 2729 desc->noloop_assumptions = 2730 alloc_EXPR_LIST (0, assumption, desc->noloop_assumptions); 2731 delta = simplify_gen_binary (UDIV, mode, delta, step); 2732 desc->niter_expr = delta; 2733 } 2734 2735 old_niter = desc->niter_expr; 2736 2737 simplify_using_initial_values (loop, AND, &desc->assumptions); 2738 if (desc->assumptions 2739 && XEXP (desc->assumptions, 0)(((desc->assumptions)->u.fld[0]).rt_rtx) == const0_rtx(const_int_rtx[64])) 2740 goto fail; 2741 simplify_using_initial_values (loop, IOR, &desc->noloop_assumptions); 2742 simplify_using_initial_values (loop, IOR, &desc->infinite); 2743 simplify_using_initial_values (loop, UNKNOWN, &desc->niter_expr); 2744 2745 /* Rerun the simplification. Consider code (created by copying loop headers) 2746 2747 i = 0; 2748 2749 if (0 < n) 2750 { 2751 do 2752 { 2753 i++; 2754 } while (i < n); 2755 } 2756 2757 The first pass determines that i = 0, the second pass uses it to eliminate 2758 noloop assumption. */ 2759 2760 simplify_using_initial_values (loop, AND, &desc->assumptions); 2761 if (desc->assumptions 2762 && XEXP (desc->assumptions, 0)(((desc->assumptions)->u.fld[0]).rt_rtx) == const0_rtx(const_int_rtx[64])) 2763 goto fail; 2764 simplify_using_initial_values (loop, IOR, &desc->noloop_assumptions); 2765 simplify_using_initial_values (loop, IOR, &desc->infinite); 2766 simplify_using_initial_values (loop, UNKNOWN, &desc->niter_expr); 2767 2768 if (desc->noloop_assumptions 2769 && XEXP (desc->noloop_assumptions, 0)(((desc->noloop_assumptions)->u.fld[0]).rt_rtx) == const_true_rtx) 2770 goto zero_iter; 2771 2772 if (CONST_INT_P (desc->niter_expr)(((enum rtx_code) (desc->niter_expr)->code) == CONST_INT
)
) 2773 { 2774 uint64_t val = INTVAL (desc->niter_expr)((desc->niter_expr)->u.hwint[0]); 2775 2776 desc->const_iter = true; 2777 desc->niter = val & GET_MODE_MASK (desc->mode)mode_mask_array[desc->mode]; 2778 if (!desc->infinite 2779 && !desc->assumptions) 2780 record_niter_bound (loop, desc->niter, false, true); 2781 } 2782 else 2783 { 2784 max = determine_max_iter (loop, desc, old_niter); 2785 if (!max) 2786 goto zero_iter_simplify; 2787 if (!desc->infinite 2788 && !desc->assumptions) 2789 record_niter_bound (loop, max, false, true); 2790 2791 /* simplify_using_initial_values does a copy propagation on the registers 2792 in the expression for the number of iterations. This prolongs life 2793 ranges of registers and increases register pressure, and usually 2794 brings no gain (and if it happens to do, the cse pass will take care 2795 of it anyway). So prevent this behavior, unless it enabled us to 2796 derive that the number of iterations is a constant. */ 2797 desc->niter_expr = old_niter; 2798 } 2799 2800 return; 2801 2802zero_iter_simplify: 2803 /* Simplify the assumptions. */ 2804 simplify_using_initial_values (loop, AND, &desc->assumptions); 2805 if (desc->assumptions 2806 && XEXP (desc->assumptions, 0)(((desc->assumptions)->u.fld[0]).rt_rtx) == const0_rtx(const_int_rtx[64])) 2807 goto fail; 2808 simplify_using_initial_values (loop, IOR, &desc->infinite); 2809 2810 /* Fallthru. */ 2811zero_iter: 2812 desc->const_iter = true; 2813 desc->niter = 0; 2814 record_niter_bound (loop, 0, true, true); 2815 desc->noloop_assumptions = NULL_RTX(rtx) 0; 2816 desc->niter_expr = const0_rtx(const_int_rtx[64]); 2817 return; 2818 2819fail: 2820 desc->simple_p = false; 2821 return; 2822} 2823 2824/* Checks whether E is a simple exit from LOOP and stores its description 2825 into DESC. */ 2826 2827static void 2828check_simple_exit (class loop *loop, edge e, class niter_desc *desc) 2829{ 2830 basic_block exit_bb; 2831 rtx condition; 2832 rtx_insn *at; 2833 edge ein; 2834 2835 exit_bb = e->src; 2836 desc->simple_p = false; 2837 2838 /* It must belong directly to the loop. */ 2839 if (exit_bb->loop_father != loop) 2840 return; 2841 2842 /* It must be tested (at least) once during any iteration. */ 2843 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit_bb)) 2844 return; 2845 2846 /* It must end in a simple conditional jump. */ 2847 if (!any_condjump_p (BB_END (exit_bb)(exit_bb)->il.x.rtl->end_) || !onlyjump_p (BB_END (exit_bb)(exit_bb)->il.x.rtl->end_)) 2848 return; 2849 2850 ein = EDGE_SUCC (exit_bb, 0)(*(exit_bb)->succs)[(0)]; 2851 if (ein == e) 2852 ein = EDGE_SUCC (exit_bb, 1)(*(exit_bb)->succs)[(1)]; 2853 2854 desc->out_edge = e; 2855 desc->in_edge = ein; 2856 2857 /* Test whether the condition is suitable. */ 2858 if (!(condition = get_condition (BB_END (ein->src)(ein->src)->il.x.rtl->end_, &at, false, false))) 2859 return; 2860 2861 if (ein->flags & EDGE_FALLTHRU) 2862 { 2863 condition = reversed_condition (condition); 2864 if (!condition) 2865 return; 2866 } 2867 2868 /* Check that we are able to determine number of iterations and fill 2869 in information about it. */ 2870 iv_number_of_iterations (loop, at, condition, desc); 2871} 2872 2873/* Finds a simple exit of LOOP and stores its description into DESC. */ 2874 2875static void 2876find_simple_exit (class loop *loop, class niter_desc *desc) 2877{ 2878 unsigned i; 2879 basic_block *body; 2880 edge e; 2881 class niter_desc act; 2882 bool any = false; 2883 edge_iterator ei; 2884 2885 desc->simple_p = false; 2886 body = get_loop_body (loop); 2887 2888 for (i = 0; i < loop->num_nodes; i++) 2889 { 2890 FOR_EACH_EDGE (e, ei, body[i]->succs)for ((ei) = ei_start_1 (&((body[i]->succs))); ei_cond (
(ei), &(e)); ei_next (&(ei)))
2891 { 2892 if (flow_bb_inside_loop_p (loop, e->dest)) 2893 continue; 2894 2895 check_simple_exit (loop, e, &act); 2896 if (!act.simple_p) 2897 continue; 2898 2899 if (!any) 2900 any = true; 2901 else 2902 { 2903 /* Prefer constant iterations; the less the better. */ 2904 if (!act.const_iter 2905 || (desc->const_iter && act.niter >= desc->niter)) 2906 continue; 2907 2908 /* Also if the actual exit may be infinite, while the old one 2909 not, prefer the old one. */ 2910 if (act.infinite && !desc->infinite) 2911 continue; 2912 } 2913 2914 *desc = act; 2915 } 2916 } 2917 2918 if (dump_file) 2919 { 2920 if (desc->simple_p) 2921 { 2922 fprintf (dump_file, "Loop %d is simple:\n", loop->num); 2923 fprintf (dump_file, " simple exit %d -> %d\n", 2924 desc->out_edge->src->index, 2925 desc->out_edge->dest->index); 2926 if (desc->assumptions) 2927 { 2928 fprintf (dump_file, " assumptions: "); 2929 print_rtl (dump_file, desc->assumptions); 2930 fprintf (dump_file, "\n"); 2931 } 2932 if (desc->noloop_assumptions) 2933 { 2934 fprintf (dump_file, " does not roll if: "); 2935 print_rtl (dump_file, desc->noloop_assumptions); 2936 fprintf (dump_file, "\n"); 2937 } 2938 if (desc->infinite) 2939 { 2940 fprintf (dump_file, " infinite if: "); 2941 print_rtl (dump_file, desc->infinite); 2942 fprintf (dump_file, "\n"); 2943 } 2944 2945 fprintf (dump_file, " number of iterations: "); 2946 print_rtl (dump_file, desc->niter_expr); 2947 fprintf (dump_file, "\n"); 2948 2949 fprintf (dump_file, " upper bound: %li\n", 2950 (long)get_max_loop_iterations_int (loop)); 2951 fprintf (dump_file, " likely upper bound: %li\n", 2952 (long)get_likely_max_loop_iterations_int (loop)); 2953 fprintf (dump_file, " realistic bound: %li\n", 2954 (long)get_estimated_loop_iterations_int (loop)); 2955 } 2956 else 2957 fprintf (dump_file, "Loop %d is not simple.\n", loop->num); 2958 } 2959 2960 /* Fix up the finiteness if possible. We can only do it for single exit, 2961 since the loop is finite, but it's possible that we predicate one loop 2962 exit to be finite which can not be determined as finite in middle-end as 2963 well. It results in incorrect predicate information on the exit condition 2964 expression. For example, if says [(int) _1 + -8, + , -8] != 0 finite, 2965 it means _1 can exactly divide -8. */ 2966 if (desc->infinite && single_exit (loop) && finite_loop_p (loop)) 2967 { 2968 desc->infinite = NULL_RTX(rtx) 0; 2969 if (dump_file) 2970 fprintf (dump_file, " infinite updated to finite.\n"); 2971 } 2972 2973 free (body); 2974} 2975 2976/* Creates a simple loop description of LOOP if it was not computed 2977 already. */ 2978 2979class niter_desc * 2980get_simple_loop_desc (class loop *loop) 2981{ 2982 class niter_desc *desc = simple_loop_desc (loop); 2983 2984 if (desc) 2985 return desc; 2986 2987 /* At least desc->infinite is not always initialized by 2988 find_simple_loop_exit. */ 2989 desc = ggc_cleared_alloc<niter_desc> (); 2990 iv_analysis_loop_init (loop); 2991 find_simple_exit (loop, desc); 2992 loop->simple_loop_desc = desc; 2993 return desc; 2994} 2995 2996/* Releases simple loop description for LOOP. */ 2997 2998void 2999free_simple_loop_desc (class loop *loop) 3000{ 3001 class niter_desc *desc = simple_loop_desc (loop); 3002 3003 if (!desc) 3004 return; 3005 3006 ggc_free (desc); 3007 loop->simple_loop_desc = NULL__null; 3008}

/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/machmode.h

1/* Machine mode definitions for GCC; included by rtl.h and tree.h.
2 Copyright (C) 1991-2023 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20#ifndef HAVE_MACHINE_MODES
21#define HAVE_MACHINE_MODES
22
23typedef opt_mode<machine_mode> opt_machine_mode;
24
25extern CONST_MODE_SIZE poly_uint16_pod mode_size[NUM_MACHINE_MODES];
26extern CONST_MODE_PRECISIONconst poly_uint16_pod mode_precision[NUM_MACHINE_MODES];
27extern const unsigned char mode_inner[NUM_MACHINE_MODES];
28extern CONST_MODE_NUNITSconst poly_uint16_pod mode_nunits[NUM_MACHINE_MODES];
29extern CONST_MODE_UNIT_SIZE unsigned char mode_unit_size[NUM_MACHINE_MODES];
30extern const unsigned short mode_unit_precision[NUM_MACHINE_MODES];
31extern const unsigned char mode_next[NUM_MACHINE_MODES];
32extern const unsigned char mode_wider[NUM_MACHINE_MODES];
33extern const unsigned char mode_2xwider[NUM_MACHINE_MODES];
34
35template<typename T>
36struct mode_traits
37{
38 /* For use by the machmode support code only.
39
40 There are cases in which the machmode support code needs to forcibly
41 convert a machine_mode to a specific mode class T, and in which the
42 context guarantees that this is valid without the need for an assert.
43 This can be done using:
44
45 return typename mode_traits<T>::from_int (mode);
46
47 when returning a T and:
48
49 res = T (typename mode_traits<T>::from_int (mode));
50
51 when assigning to a value RES that must be assignment-compatible
52 with (but possibly not the same as) T. */
53#ifdef USE_ENUM_MODES
54 /* Allow direct conversion of enums to specific mode classes only
55 when USE_ENUM_MODES is defined. This is only intended for use
56 by gencondmd, so that it can tell more easily when .md conditions
57 are always false. */
58 typedef machine_mode from_int;
59#else
60 /* Here we use an enum type distinct from machine_mode but with the
61 same range as machine_mode. T should have a constructor that
62 accepts this enum type; it should not have a constructor that
63 accepts machine_mode.
64
65 We use this somewhat indirect approach to avoid too many constructor
66 calls when the compiler is built with -O0. For example, even in
67 unoptimized code, the return statement above would construct the
68 returned T directly from the numerical value of MODE. */
69 enum from_int { dummy = MAX_MACHINE_MODE };
70#endif
71};
72
73template<>
74struct mode_traits<machine_mode>
75{
76 /* machine_mode itself needs no conversion. */
77 typedef machine_mode from_int;
78};
79
80/* Always treat machine modes as fixed-size while compiling code specific
81 to targets that have no variable-size modes. */
82#if defined (IN_TARGET_CODE) && NUM_POLY_INT_COEFFS1 == 1
83#define ONLY_FIXED_SIZE_MODES0 1
84#else
85#define ONLY_FIXED_SIZE_MODES0 0
86#endif
87
88/* Get the name of mode MODE as a string. */
89
90extern const char * const mode_name[NUM_MACHINE_MODES];
91#define GET_MODE_NAME(MODE)mode_name[MODE] mode_name[MODE]
92
93/* Mode classes. */
94
95#include "mode-classes.def"
96#define DEF_MODE_CLASS(M) M
97enum mode_class { MODE_CLASSES, MAX_MODE_CLASS };
98#undef DEF_MODE_CLASS
99#undef MODE_CLASSES
100
101/* Get the general kind of object that mode MODE represents
102 (integer, floating, complex, etc.) */
103
104extern const unsigned char mode_class[NUM_MACHINE_MODES];
105#define GET_MODE_CLASS(MODE)((enum mode_class) mode_class[MODE]) ((enum mode_class) mode_class[MODE])
106
107/* Nonzero if MODE is an integral mode. */
108#define INTEGRAL_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_INT || ((enum mode_class
) mode_class[MODE]) == MODE_PARTIAL_INT || ((enum mode_class)
mode_class[MODE]) == MODE_COMPLEX_INT || ((enum mode_class) mode_class
[MODE]) == MODE_VECTOR_BOOL || ((enum mode_class) mode_class[
MODE]) == MODE_VECTOR_INT)
\
109 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_INT \
110 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_PARTIAL_INT \
111 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_COMPLEX_INT \
112 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_VECTOR_BOOL \
113 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_VECTOR_INT)
114
115/* Nonzero if MODE is a floating-point mode. */
116#define FLOAT_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_FLOAT || ((enum
mode_class) mode_class[MODE]) == MODE_DECIMAL_FLOAT || ((enum
mode_class) mode_class[MODE]) == MODE_COMPLEX_FLOAT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_FLOAT)
\
117 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_FLOAT \
118 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_DECIMAL_FLOAT \
119 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_COMPLEX_FLOAT \
120 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_VECTOR_FLOAT)
121
122/* Nonzero if MODE is a complex mode. */
123#define COMPLEX_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_COMPLEX_INT || (
(enum mode_class) mode_class[MODE]) == MODE_COMPLEX_FLOAT)
\
124 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_COMPLEX_INT \
125 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_COMPLEX_FLOAT)
126
127/* Nonzero if MODE is a vector mode. */
128#define VECTOR_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_VECTOR_BOOL || (
(enum mode_class) mode_class[MODE]) == MODE_VECTOR_INT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_FLOAT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_FRACT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_UFRACT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_ACCUM || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_UACCUM)
\
129 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_VECTOR_BOOL \
130 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_VECTOR_INT \
131 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_VECTOR_FLOAT \
132 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_VECTOR_FRACT \
133 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_VECTOR_UFRACT \
134 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_VECTOR_ACCUM \
135 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_VECTOR_UACCUM)
136
137/* Nonzero if MODE is a scalar integral mode. */
138#define SCALAR_INT_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_INT || ((enum mode_class
) mode_class[MODE]) == MODE_PARTIAL_INT)
\
139 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_INT \
140 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_PARTIAL_INT)
141
142/* Nonzero if MODE is a scalar floating point mode. */
143#define SCALAR_FLOAT_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_FLOAT || ((enum
mode_class) mode_class[MODE]) == MODE_DECIMAL_FLOAT)
\
144 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_FLOAT \
145 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_DECIMAL_FLOAT)
146
147/* Nonzero if MODE is a decimal floating point mode. */
148#define DECIMAL_FLOAT_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_DECIMAL_FLOAT) \
149 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_DECIMAL_FLOAT)
150
151/* Nonzero if MODE is a scalar fract mode. */
152#define SCALAR_FRACT_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_FRACT) \
153 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_FRACT)
154
155/* Nonzero if MODE is a scalar ufract mode. */
156#define SCALAR_UFRACT_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_UFRACT) \
157 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_UFRACT)
158
159/* Nonzero if MODE is a scalar fract or ufract mode. */
160#define ALL_SCALAR_FRACT_MODE_P(MODE)((((enum mode_class) mode_class[MODE]) == MODE_FRACT) || (((enum
mode_class) mode_class[MODE]) == MODE_UFRACT))
\
161 (SCALAR_FRACT_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_FRACT) || SCALAR_UFRACT_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_UFRACT))
162
163/* Nonzero if MODE is a scalar accum mode. */
164#define SCALAR_ACCUM_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_ACCUM) \
165 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_ACCUM)
166
167/* Nonzero if MODE is a scalar uaccum mode. */
168#define SCALAR_UACCUM_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_UACCUM) \
169 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_UACCUM)
170
171/* Nonzero if MODE is a scalar accum or uaccum mode. */
172#define ALL_SCALAR_ACCUM_MODE_P(MODE)((((enum mode_class) mode_class[MODE]) == MODE_ACCUM) || (((enum
mode_class) mode_class[MODE]) == MODE_UACCUM))
\
173 (SCALAR_ACCUM_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_ACCUM) || SCALAR_UACCUM_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_UACCUM))
174
175/* Nonzero if MODE is a scalar fract or accum mode. */
176#define SIGNED_SCALAR_FIXED_POINT_MODE_P(MODE)((((enum mode_class) mode_class[MODE]) == MODE_FRACT) || (((enum
mode_class) mode_class[MODE]) == MODE_ACCUM))
\
177 (SCALAR_FRACT_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_FRACT) || SCALAR_ACCUM_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_ACCUM))
178
179/* Nonzero if MODE is a scalar ufract or uaccum mode. */
180#define UNSIGNED_SCALAR_FIXED_POINT_MODE_P(MODE)((((enum mode_class) mode_class[MODE]) == MODE_UFRACT) || (((
enum mode_class) mode_class[MODE]) == MODE_UACCUM))
\
181 (SCALAR_UFRACT_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_UFRACT) || SCALAR_UACCUM_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_UACCUM))
182
183/* Nonzero if MODE is a scalar fract, ufract, accum or uaccum mode. */
184#define ALL_SCALAR_FIXED_POINT_MODE_P(MODE)(((((enum mode_class) mode_class[MODE]) == MODE_FRACT) || (((
enum mode_class) mode_class[MODE]) == MODE_ACCUM)) || ((((enum
mode_class) mode_class[MODE]) == MODE_UFRACT) || (((enum mode_class
) mode_class[MODE]) == MODE_UACCUM)))
\
185 (SIGNED_SCALAR_FIXED_POINT_MODE_P (MODE)((((enum mode_class) mode_class[MODE]) == MODE_FRACT) || (((enum
mode_class) mode_class[MODE]) == MODE_ACCUM))
\
186 || UNSIGNED_SCALAR_FIXED_POINT_MODE_P (MODE)((((enum mode_class) mode_class[MODE]) == MODE_UFRACT) || (((
enum mode_class) mode_class[MODE]) == MODE_UACCUM))
)
187
188/* Nonzero if MODE is a scalar/vector fract mode. */
189#define FRACT_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_FRACT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_FRACT)
\
190 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_FRACT \
191 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_VECTOR_FRACT)
192
193/* Nonzero if MODE is a scalar/vector ufract mode. */
194#define UFRACT_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_UFRACT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_UFRACT)
\
195 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_UFRACT \
196 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_VECTOR_UFRACT)
197
198/* Nonzero if MODE is a scalar/vector fract or ufract mode. */
199#define ALL_FRACT_MODE_P(MODE)((((enum mode_class) mode_class[MODE]) == MODE_FRACT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_FRACT) || (((enum
mode_class) mode_class[MODE]) == MODE_UFRACT || ((enum mode_class
) mode_class[MODE]) == MODE_VECTOR_UFRACT))
\
200 (FRACT_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_FRACT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_FRACT)
|| UFRACT_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_UFRACT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_UFRACT)
)
201
202/* Nonzero if MODE is a scalar/vector accum mode. */
203#define ACCUM_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_ACCUM || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_ACCUM)
\
204 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_ACCUM \
205 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_VECTOR_ACCUM)
206
207/* Nonzero if MODE is a scalar/vector uaccum mode. */
208#define UACCUM_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_UACCUM || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_UACCUM)
\
209 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_UACCUM \
210 || GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_VECTOR_UACCUM)
211
212/* Nonzero if MODE is a scalar/vector accum or uaccum mode. */
213#define ALL_ACCUM_MODE_P(MODE)((((enum mode_class) mode_class[MODE]) == MODE_ACCUM || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_ACCUM) || (((enum
mode_class) mode_class[MODE]) == MODE_UACCUM || ((enum mode_class
) mode_class[MODE]) == MODE_VECTOR_UACCUM))
\
214 (ACCUM_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_ACCUM || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_ACCUM)
|| UACCUM_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_UACCUM || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_UACCUM)
)
215
216/* Nonzero if MODE is a scalar/vector fract or accum mode. */
217#define SIGNED_FIXED_POINT_MODE_P(MODE)((((enum mode_class) mode_class[MODE]) == MODE_FRACT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_FRACT) || (((enum
mode_class) mode_class[MODE]) == MODE_ACCUM || ((enum mode_class
) mode_class[MODE]) == MODE_VECTOR_ACCUM))
\
218 (FRACT_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_FRACT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_FRACT)
|| ACCUM_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_ACCUM || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_ACCUM)
)
219
220/* Nonzero if MODE is a scalar/vector ufract or uaccum mode. */
221#define UNSIGNED_FIXED_POINT_MODE_P(MODE)((((enum mode_class) mode_class[MODE]) == MODE_UFRACT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_UFRACT) || (((enum
mode_class) mode_class[MODE]) == MODE_UACCUM || ((enum mode_class
) mode_class[MODE]) == MODE_VECTOR_UACCUM))
\
222 (UFRACT_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_UFRACT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_UFRACT)
|| UACCUM_MODE_P (MODE)(((enum mode_class) mode_class[MODE]) == MODE_UACCUM || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_UACCUM)
)
223
224/* Nonzero if MODE is a scalar/vector fract, ufract, accum or uaccum mode. */
225#define ALL_FIXED_POINT_MODE_P(MODE)(((((enum mode_class) mode_class[MODE]) == MODE_FRACT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_FRACT) || (((enum
mode_class) mode_class[MODE]) == MODE_ACCUM || ((enum mode_class
) mode_class[MODE]) == MODE_VECTOR_ACCUM)) || ((((enum mode_class
) mode_class[MODE]) == MODE_UFRACT || ((enum mode_class) mode_class
[MODE]) == MODE_VECTOR_UFRACT) || (((enum mode_class) mode_class
[MODE]) == MODE_UACCUM || ((enum mode_class) mode_class[MODE]
) == MODE_VECTOR_UACCUM)))
\
226 (SIGNED_FIXED_POINT_MODE_P (MODE)((((enum mode_class) mode_class[MODE]) == MODE_FRACT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_FRACT) || (((enum
mode_class) mode_class[MODE]) == MODE_ACCUM || ((enum mode_class
) mode_class[MODE]) == MODE_VECTOR_ACCUM))
\
227 || UNSIGNED_FIXED_POINT_MODE_P (MODE)((((enum mode_class) mode_class[MODE]) == MODE_UFRACT || ((enum
mode_class) mode_class[MODE]) == MODE_VECTOR_UFRACT) || (((enum
mode_class) mode_class[MODE]) == MODE_UACCUM || ((enum mode_class
) mode_class[MODE]) == MODE_VECTOR_UACCUM))
)
228
229/* Nonzero if MODE is opaque. */
230#define OPAQUE_MODE_P(MODE)(((enum mode_class) mode_class[MODE]) == MODE_OPAQUE) \
231 (GET_MODE_CLASS (MODE)((enum mode_class) mode_class[MODE]) == MODE_OPAQUE)
232
233/* Nonzero if CLASS modes can be widened. */
234#define CLASS_HAS_WIDER_MODES_P(CLASS)(CLASS == MODE_INT || CLASS == MODE_PARTIAL_INT || CLASS == MODE_FLOAT
|| CLASS == MODE_DECIMAL_FLOAT || CLASS == MODE_COMPLEX_FLOAT
|| CLASS == MODE_FRACT || CLASS == MODE_UFRACT || CLASS == MODE_ACCUM
|| CLASS == MODE_UACCUM)
\
235 (CLASS == MODE_INT \
236 || CLASS == MODE_PARTIAL_INT \
237 || CLASS == MODE_FLOAT \
238 || CLASS == MODE_DECIMAL_FLOAT \
239 || CLASS == MODE_COMPLEX_FLOAT \
240 || CLASS == MODE_FRACT \
241 || CLASS == MODE_UFRACT \
242 || CLASS == MODE_ACCUM \
243 || CLASS == MODE_UACCUM)
244
245/* An optional T (i.e. a T or nothing), where T is some form of mode class. */
246template<typename T>
247class opt_mode
248{
249public:
250 enum from_int { dummy = MAX_MACHINE_MODE };
251
252 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr opt_mode () : m_mode (E_VOIDmode) {}
253 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr opt_mode (const T &m) : m_mode (m) {}
254 template<typename U>
255 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr opt_mode (const U &m) : m_mode (T (m)) {}
256 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr opt_mode (from_int m) : m_mode (machine_mode (m)) {}
257
258 machine_mode else_void () const;
259 machine_mode else_blk () const { return else_mode (BLKmode((void) 0, E_BLKmode)); }
260 machine_mode else_mode (machine_mode) const;
261 T require () const;
262
263 bool exists () const;
264 template<typename U> bool exists (U *) const;
265
266 bool operator== (const T &m) const { return m_mode == m; }
267 bool operator!= (const T &m) const { return m_mode != m; }
268
269private:
270 machine_mode m_mode;
271};
272
273/* If the object contains a T, return its enum value, otherwise return
274 E_VOIDmode. */
275
276template<typename T>
277ALWAYS_INLINEinline __attribute__ ((always_inline)) machine_mode
278opt_mode<T>::else_void () const
279{
280 return m_mode;
281}
282
283/* If the T exists, return its enum value, otherwise return FALLBACK. */
284
285template<typename T>
286inline machine_mode
287opt_mode<T>::else_mode (machine_mode fallback) const
288{
289 return m_mode == E_VOIDmode ? fallback : m_mode;
290}
291
292/* Assert that the object contains a T and return it. */
293
294template<typename T>
295inline T
296opt_mode<T>::require () const
297{
298 gcc_checking_assert (m_mode != E_VOIDmode)((void)(!(m_mode != E_VOIDmode) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/machmode.h"
, 298, __FUNCTION__), 0 : 0))
;
299 return typename mode_traits<T>::from_int (m_mode);
300}
301
302/* Return true if the object contains a T rather than nothing. */
303
304template<typename T>
305ALWAYS_INLINEinline __attribute__ ((always_inline)) bool
306opt_mode<T>::exists () const
307{
308 return m_mode != E_VOIDmode;
309}
310
311/* Return true if the object contains a T, storing it in *MODE if so. */
312
313template<typename T>
314template<typename U>
315inline bool
316opt_mode<T>::exists (U *mode) const
317{
318 if (m_mode != E_VOIDmode)
319 {
320 *mode = T (typename mode_traits<T>::from_int (m_mode));
321 return true;
322 }
323 return false;
324}
325
326/* A POD version of mode class T. */
327
328template<typename T>
329struct pod_mode
330{
331 typedef typename mode_traits<T>::from_int from_int;
332 typedef typename T::measurement_type measurement_type;
333
334 machine_mode m_mode;
335 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
336 operator machine_mode () const { return m_mode; }
337
338 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
339 operator T () const { return from_int (m_mode); }
340
341 ALWAYS_INLINEinline __attribute__ ((always_inline)) pod_mode &operator = (const T &m) { m_mode = m; return *this; }
342};
343
344/* Return true if mode M has type T. */
345
346template<typename T>
347inline bool
348is_a (machine_mode m)
349{
350 return T::includes_p (m);
351}
352
353template<typename T, typename U>
354inline bool
355is_a (const opt_mode<U> &m)
356{
357 return T::includes_p (m.else_void ());
358}
359
360/* Assert that mode M has type T, and return it in that form. */
361
362template<typename T>
363inline T
364as_a (machine_mode m)
365{
366 gcc_checking_assert (T::includes_p (m))((void)(!(T::includes_p (m)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/machmode.h"
, 366, __FUNCTION__), 0 : 0))
;
367 return typename mode_traits<T>::from_int (m);
368}
369
370template<typename T, typename U>
371inline T
372as_a (const opt_mode<U> &m)
373{
374 return as_a <T> (m.else_void ());
375}
376
377/* Convert M to an opt_mode<T>. */
378
379template<typename T>
380inline opt_mode<T>
381dyn_cast (machine_mode m)
382{
383 if (T::includes_p (m))
384 return T (typename mode_traits<T>::from_int (m));
385 return opt_mode<T> ();
386}
387
388template<typename T, typename U>
389inline opt_mode<T>
390dyn_cast (const opt_mode<U> &m)
391{
392 return dyn_cast <T> (m.else_void ());
393}
394
395/* Return true if mode M has type T, storing it as a T in *RESULT
396 if so. */
397
398template<typename T, typename U>
399inline bool
400is_a (machine_mode m, U *result)
401{
402 if (T::includes_p (m))
403 {
404 *result = T (typename mode_traits<T>::from_int (m));
405 return true;
406 }
407 return false;
408}
409
410/* Represents a machine mode that is known to be a SCALAR_INT_MODE_P. */
411class scalar_int_mode
412{
413public:
414 typedef mode_traits<scalar_int_mode>::from_int from_int;
415 typedef unsigned short measurement_type;
416
417 ALWAYS_INLINEinline __attribute__ ((always_inline)) scalar_int_mode () {}
418
419 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
420 scalar_int_mode (from_int m) : m_mode (machine_mode (m)) {}
421
422 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr operator machine_mode () const { return m_mode; }
32
Undefined or garbage value returned to caller
423
424 static bool includes_p (machine_mode);
425
426protected:
427 machine_mode m_mode;
428};
429
430/* Return true if M is a scalar_int_mode. */
431
432inline bool
433scalar_int_mode::includes_p (machine_mode m)
434{
435 return SCALAR_INT_MODE_P (m)(((enum mode_class) mode_class[m]) == MODE_INT || ((enum mode_class
) mode_class[m]) == MODE_PARTIAL_INT)
;
436}
437
438/* Represents a machine mode that is known to be a SCALAR_FLOAT_MODE_P. */
439class scalar_float_mode
440{
441public:
442 typedef mode_traits<scalar_float_mode>::from_int from_int;
443 typedef unsigned short measurement_type;
444
445 ALWAYS_INLINEinline __attribute__ ((always_inline)) scalar_float_mode () {}
446
447 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
448 scalar_float_mode (from_int m) : m_mode (machine_mode (m)) {}
449
450 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr operator machine_mode () const { return m_mode; }
451
452 static bool includes_p (machine_mode);
453
454protected:
455 machine_mode m_mode;
456};
457
458/* Return true if M is a scalar_float_mode. */
459
460inline bool
461scalar_float_mode::includes_p (machine_mode m)
462{
463 return SCALAR_FLOAT_MODE_P (m)(((enum mode_class) mode_class[m]) == MODE_FLOAT || ((enum mode_class
) mode_class[m]) == MODE_DECIMAL_FLOAT)
;
464}
465
466/* Represents a machine mode that is known to be scalar. */
467class scalar_mode
468{
469public:
470 typedef mode_traits<scalar_mode>::from_int from_int;
471 typedef unsigned short measurement_type;
472
473 ALWAYS_INLINEinline __attribute__ ((always_inline)) scalar_mode () {}
474
475 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
476 scalar_mode (from_int m) : m_mode (machine_mode (m)) {}
477
478 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
479 scalar_mode (const scalar_int_mode &m) : m_mode (m) {}
480
481 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
482 scalar_mode (const scalar_float_mode &m) : m_mode (m) {}
483
484 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
485 scalar_mode (const scalar_int_mode_pod &m) : m_mode (m) {}
486
487 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr operator machine_mode () const { return m_mode; }
488
489 static bool includes_p (machine_mode);
490
491protected:
492 machine_mode m_mode;
493};
494
495/* Return true if M represents some kind of scalar value. */
496
497inline bool
498scalar_mode::includes_p (machine_mode m)
499{
500 switch (GET_MODE_CLASS (m)((enum mode_class) mode_class[m]))
501 {
502 case MODE_INT:
503 case MODE_PARTIAL_INT:
504 case MODE_FRACT:
505 case MODE_UFRACT:
506 case MODE_ACCUM:
507 case MODE_UACCUM:
508 case MODE_FLOAT:
509 case MODE_DECIMAL_FLOAT:
510 return true;
511 default:
512 return false;
513 }
514}
515
516/* Represents a machine mode that is known to be a COMPLEX_MODE_P. */
517class complex_mode
518{
519public:
520 typedef mode_traits<complex_mode>::from_int from_int;
521 typedef unsigned short measurement_type;
522
523 ALWAYS_INLINEinline __attribute__ ((always_inline)) complex_mode () {}
524
525 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
526 complex_mode (from_int m) : m_mode (machine_mode (m)) {}
527
528 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr operator machine_mode () const { return m_mode; }
529
530 static bool includes_p (machine_mode);
531
532protected:
533 machine_mode m_mode;
534};
535
536/* Return true if M is a complex_mode. */
537
538inline bool
539complex_mode::includes_p (machine_mode m)
540{
541 return COMPLEX_MODE_P (m)(((enum mode_class) mode_class[m]) == MODE_COMPLEX_INT || ((enum
mode_class) mode_class[m]) == MODE_COMPLEX_FLOAT)
;
542}
543
544/* Return the base GET_MODE_SIZE value for MODE. */
545
546ALWAYS_INLINEinline __attribute__ ((always_inline)) poly_uint16
547mode_to_bytes (machine_mode mode)
548{
549#if GCC_VERSION(4 * 1000 + 2) >= 4001
550 return (__builtin_constant_p (mode)
551 ? mode_size_inline (mode) : mode_size[mode]);
552#else
553 return mode_size[mode];
554#endif
555}
556
557/* Return the base GET_MODE_BITSIZE value for MODE. */
558
559ALWAYS_INLINEinline __attribute__ ((always_inline)) poly_uint16
560mode_to_bits (machine_mode mode)
561{
562 return mode_to_bytes (mode) * BITS_PER_UNIT(8);
563}
564
565/* Return the base GET_MODE_PRECISION value for MODE. */
566
567ALWAYS_INLINEinline __attribute__ ((always_inline)) poly_uint16
568mode_to_precision (machine_mode mode)
569{
570 return mode_precision[mode];
571}
572
573/* Return the base GET_MODE_INNER value for MODE. */
574
575ALWAYS_INLINEinline __attribute__ ((always_inline)) scalar_mode
576mode_to_inner (machine_mode mode)
577{
578#if GCC_VERSION(4 * 1000 + 2) >= 4001
579 return scalar_mode::from_int (__builtin_constant_p (mode)
580 ? mode_inner_inline (mode)
581 : mode_inner[mode]);
582#else
583 return scalar_mode::from_int (mode_inner[mode]);
584#endif
585}
586
587/* Return the base GET_MODE_UNIT_SIZE value for MODE. */
588
589ALWAYS_INLINEinline __attribute__ ((always_inline)) unsigned char
590mode_to_unit_size (machine_mode mode)
591{
592#if GCC_VERSION(4 * 1000 + 2) >= 4001
593 return (__builtin_constant_p (mode)
594 ? mode_unit_size_inline (mode) : mode_unit_size[mode]);
595#else
596 return mode_unit_size[mode];
597#endif
598}
599
600/* Return the base GET_MODE_UNIT_PRECISION value for MODE. */
601
602ALWAYS_INLINEinline __attribute__ ((always_inline)) unsigned short
603mode_to_unit_precision (machine_mode mode)
604{
605#if GCC_VERSION(4 * 1000 + 2) >= 4001
606 return (__builtin_constant_p (mode)
607 ? mode_unit_precision_inline (mode) : mode_unit_precision[mode]);
608#else
609 return mode_unit_precision[mode];
610#endif
611}
612
613/* Return the base GET_MODE_NUNITS value for MODE. */
614
615ALWAYS_INLINEinline __attribute__ ((always_inline)) poly_uint16
616mode_to_nunits (machine_mode mode)
617{
618#if GCC_VERSION(4 * 1000 + 2) >= 4001
619 return (__builtin_constant_p (mode)
620 ? mode_nunits_inline (mode) : mode_nunits[mode]);
621#else
622 return mode_nunits[mode];
623#endif
624}
625
626/* Get the size in bytes of an object of mode MODE. */
627
628#if ONLY_FIXED_SIZE_MODES0
629#define GET_MODE_SIZE(MODE) ((unsigned short) mode_to_bytes (MODE).coeffs[0])
630#else
631ALWAYS_INLINEinline __attribute__ ((always_inline)) poly_uint16
632GET_MODE_SIZE (machine_mode mode)
633{
634 return mode_to_bytes (mode);
635}
636
637template<typename T>
638ALWAYS_INLINEinline __attribute__ ((always_inline)) typename if_poly<typename T::measurement_type>::type
639GET_MODE_SIZE (const T &mode)
640{
641 return mode_to_bytes (mode);
642}
643
644template<typename T>
645ALWAYS_INLINEinline __attribute__ ((always_inline)) typename if_nonpoly<typename T::measurement_type>::type
646GET_MODE_SIZE (const T &mode)
647{
648 return mode_to_bytes (mode).coeffs[0];
649}
650#endif
651
652/* Get the size in bits of an object of mode MODE. */
653
654#if ONLY_FIXED_SIZE_MODES0
655#define GET_MODE_BITSIZE(MODE) ((unsigned short) mode_to_bits (MODE).coeffs[0])
656#else
657ALWAYS_INLINEinline __attribute__ ((always_inline)) poly_uint16
658GET_MODE_BITSIZE (machine_mode mode)
659{
660 return mode_to_bits (mode);
661}
662
663template<typename T>
664ALWAYS_INLINEinline __attribute__ ((always_inline)) typename if_poly<typename T::measurement_type>::type
665GET_MODE_BITSIZE (const T &mode)
666{
667 return mode_to_bits (mode);
668}
669
670template<typename T>
671ALWAYS_INLINEinline __attribute__ ((always_inline)) typename if_nonpoly<typename T::measurement_type>::type
672GET_MODE_BITSIZE (const T &mode)
673{
674 return mode_to_bits (mode).coeffs[0];
675}
676#endif
677
678/* Get the number of value bits of an object of mode MODE. */
679
680#if ONLY_FIXED_SIZE_MODES0
681#define GET_MODE_PRECISION(MODE) \
682 ((unsigned short) mode_to_precision (MODE).coeffs[0])
683#else
684ALWAYS_INLINEinline __attribute__ ((always_inline)) poly_uint16
685GET_MODE_PRECISION (machine_mode mode)
686{
687 return mode_to_precision (mode);
688}
689
690template<typename T>
691ALWAYS_INLINEinline __attribute__ ((always_inline)) typename if_poly<typename T::measurement_type>::type
692GET_MODE_PRECISION (const T &mode)
693{
694 return mode_to_precision (mode);
695}
696
697template<typename T>
698ALWAYS_INLINEinline __attribute__ ((always_inline)) typename if_nonpoly<typename T::measurement_type>::type
699GET_MODE_PRECISION (const T &mode)
700{
701 return mode_to_precision (mode).coeffs[0];
702}
703#endif
704
705/* Get the number of integral bits of an object of mode MODE. */
706extern CONST_MODE_IBITconst unsigned char mode_ibit[NUM_MACHINE_MODES];
707#define GET_MODE_IBIT(MODE)mode_ibit[MODE] mode_ibit[MODE]
708
709/* Get the number of fractional bits of an object of mode MODE. */
710extern CONST_MODE_FBITconst unsigned char mode_fbit[NUM_MACHINE_MODES];
711#define GET_MODE_FBIT(MODE)mode_fbit[MODE] mode_fbit[MODE]
712
713/* Get a bitmask containing 1 for all bits in a word
714 that fit within mode MODE. */
715
716extern CONST_MODE_MASKconst unsigned HOST_WIDE_INTlong
717 mode_mask_array[NUM_MACHINE_MODES];
718
719#define GET_MODE_MASK(MODE)mode_mask_array[MODE] mode_mask_array[MODE]
720
721/* Return the mode of the basic parts of MODE. For vector modes this is the
722 mode of the vector elements. For complex modes it is the mode of the real
723 and imaginary parts. For other modes it is MODE itself. */
724
725#define GET_MODE_INNER(MODE)(mode_to_inner (MODE)) (mode_to_inner (MODE))
726
727/* Get the size in bytes or bits of the basic parts of an
728 object of mode MODE. */
729
730#define GET_MODE_UNIT_SIZE(MODE)mode_to_unit_size (MODE) mode_to_unit_size (MODE)
731
732#define GET_MODE_UNIT_BITSIZE(MODE)((unsigned short) (mode_to_unit_size (MODE) * (8))) \
733 ((unsigned short) (GET_MODE_UNIT_SIZE (MODE)mode_to_unit_size (MODE) * BITS_PER_UNIT(8)))
734
735#define GET_MODE_UNIT_PRECISION(MODE)(mode_to_unit_precision (MODE)) (mode_to_unit_precision (MODE))
736
737/* Get the number of units in an object of mode MODE. This is 2 for
738 complex modes and the number of elements for vector modes. */
739
740#if ONLY_FIXED_SIZE_MODES0
741#define GET_MODE_NUNITS(MODE) (mode_to_nunits (MODE).coeffs[0])
742#else
743ALWAYS_INLINEinline __attribute__ ((always_inline)) poly_uint16
744GET_MODE_NUNITS (machine_mode mode)
745{
746 return mode_to_nunits (mode);
747}
748
749template<typename T>
750ALWAYS_INLINEinline __attribute__ ((always_inline)) typename if_poly<typename T::measurement_type>::type
751GET_MODE_NUNITS (const T &mode)
752{
753 return mode_to_nunits (mode);
754}
755
756template<typename T>
757ALWAYS_INLINEinline __attribute__ ((always_inline)) typename if_nonpoly<typename T::measurement_type>::type
758GET_MODE_NUNITS (const T &mode)
759{
760 return mode_to_nunits (mode).coeffs[0];
761}
762#endif
763
764/* Get the next natural mode (not narrower, eg, QI -> HI -> SI -> DI -> TI
765 or HF -> BF -> SF -> DF -> XF -> TF). */
766
767template<typename T>
768ALWAYS_INLINEinline __attribute__ ((always_inline)) opt_mode<T>
769GET_MODE_NEXT_MODE (const T &m)
770{
771 return typename opt_mode<T>::from_int (mode_next[m]);
772}
773
774/* Get the next wider mode (eg, QI -> HI -> SI -> DI -> TI
775 or { HF, BF } -> SF -> DF -> XF -> TF).
776 This is similar to GET_MODE_NEXT_MODE, but while GET_MODE_NEXT_MODE
777 can include mode that have the same precision (e.g.
778 GET_MODE_NEXT_MODE (HFmode) can be BFmode even when both have the same
779 precision), this one will skip those. And always VOIDmode for
780 modes whose class is !CLASS_HAS_WIDER_MODES_P. */
781
782template<typename T>
783ALWAYS_INLINEinline __attribute__ ((always_inline)) opt_mode<T>
784GET_MODE_WIDER_MODE (const T &m)
785{
786 return typename opt_mode<T>::from_int (mode_wider[m]);
787}
788
789/* For scalars, this is a mode with twice the precision. For vectors,
790 this is a mode with the same inner mode but with twice the elements. */
791
792template<typename T>
793ALWAYS_INLINEinline __attribute__ ((always_inline)) opt_mode<T>
794GET_MODE_2XWIDER_MODE (const T &m)
795{
796 return typename opt_mode<T>::from_int (mode_2xwider[m]);
797}
798
799/* Get the complex mode from the component mode. */
800extern const unsigned char mode_complex[NUM_MACHINE_MODES];
801#define GET_MODE_COMPLEX_MODE(MODE)((machine_mode) mode_complex[MODE]) ((machine_mode) mode_complex[MODE])
802
803/* Represents a machine mode that must have a fixed size. The main
804 use of this class is to represent the modes of objects that always
805 have static storage duration, such as constant pool entries.
806 (No current target supports the concept of variable-size static data.) */
807class fixed_size_mode
808{
809public:
810 typedef mode_traits<fixed_size_mode>::from_int from_int;
811 typedef unsigned short measurement_type;
812
813 ALWAYS_INLINEinline __attribute__ ((always_inline)) fixed_size_mode () {}
814
815 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
816 fixed_size_mode (from_int m) : m_mode (machine_mode (m)) {}
817
818 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
819 fixed_size_mode (const scalar_mode &m) : m_mode (m) {}
820
821 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
822 fixed_size_mode (const scalar_int_mode &m) : m_mode (m) {}
823
824 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
825 fixed_size_mode (const scalar_float_mode &m) : m_mode (m) {}
826
827 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
828 fixed_size_mode (const scalar_mode_pod &m) : m_mode (m) {}
829
830 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
831 fixed_size_mode (const scalar_int_mode_pod &m) : m_mode (m) {}
832
833 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr
834 fixed_size_mode (const complex_mode &m) : m_mode (m) {}
835
836 ALWAYS_INLINEinline __attribute__ ((always_inline)) CONSTEXPRconstexpr operator machine_mode () const { return m_mode; }
837
838 static bool includes_p (machine_mode);
839
840protected:
841 machine_mode m_mode;
842};
843
844/* Return true if MODE has a fixed size. */
845
846inline bool
847fixed_size_mode::includes_p (machine_mode mode)
848{
849 return mode_to_bytes (mode).is_constant ();
850}
851
852/* Wrapper for mode arguments to target macros, so that if a target
853 doesn't need polynomial-sized modes, its header file can continue
854 to treat everything as fixed_size_mode. This should go away once
855 macros are moved to target hooks. It shouldn't be used in other
856 contexts. */
857#if NUM_POLY_INT_COEFFS1 == 1
858#define MACRO_MODE(MODE)(as_a <fixed_size_mode> (MODE)) (as_a <fixed_size_mode> (MODE))
859#else
860#define MACRO_MODE(MODE)(as_a <fixed_size_mode> (MODE)) (MODE)
861#endif
862
863extern opt_machine_mode mode_for_size (poly_uint64, enum mode_class, int);
864
865/* Return the machine mode to use for a MODE_INT of SIZE bits, if one
866 exists. If LIMIT is nonzero, modes wider than MAX_FIXED_MODE_SIZE
867 will not be used. */
868
869inline opt_scalar_int_mode
870int_mode_for_size (poly_uint64 size, int limit)
871{
872 return dyn_cast <scalar_int_mode> (mode_for_size (size, MODE_INT, limit));
873}
874
875/* Return the machine mode to use for a MODE_FLOAT of SIZE bits, if one
876 exists. */
877
878inline opt_scalar_float_mode
879float_mode_for_size (poly_uint64 size)
880{
881 return dyn_cast <scalar_float_mode> (mode_for_size (size, MODE_FLOAT, 0));
882}
883
884/* Likewise for MODE_DECIMAL_FLOAT. */
885
886inline opt_scalar_float_mode
887decimal_float_mode_for_size (unsigned int size)
888{
889 return dyn_cast <scalar_float_mode>
890 (mode_for_size (size, MODE_DECIMAL_FLOAT, 0));
891}
892
893extern machine_mode smallest_mode_for_size (poly_uint64, enum mode_class);
894
895/* Find the narrowest integer mode that contains at least SIZE bits.
896 Such a mode must exist. */
897
898inline scalar_int_mode
899smallest_int_mode_for_size (poly_uint64 size)
900{
901 return as_a <scalar_int_mode> (smallest_mode_for_size (size, MODE_INT));
902}
903
904extern opt_scalar_int_mode int_mode_for_mode (machine_mode);
905extern opt_machine_mode bitwise_mode_for_mode (machine_mode);
906extern opt_machine_mode mode_for_vector (scalar_mode, poly_uint64);
907extern opt_machine_mode related_vector_mode (machine_mode, scalar_mode,
908 poly_uint64 = 0);
909extern opt_machine_mode related_int_vector_mode (machine_mode);
910
911/* A class for iterating through possible bitfield modes. */
912class bit_field_mode_iterator
913{
914public:
915 bit_field_mode_iterator (HOST_WIDE_INTlong, HOST_WIDE_INTlong,
916 poly_int64, poly_int64,
917 unsigned int, bool);
918 bool next_mode (scalar_int_mode *);
919 bool prefer_smaller_modes ();
920
921private:
922 opt_scalar_int_mode m_mode;
923 /* We use signed values here because the bit position can be negative
924 for invalid input such as gcc.dg/pr48335-8.c. */
925 HOST_WIDE_INTlong m_bitsize;
926 HOST_WIDE_INTlong m_bitpos;
927 poly_int64 m_bitregion_start;
928 poly_int64 m_bitregion_end;
929 unsigned int m_align;
930 bool m_volatilep;
931 int m_count;
932};
933
934/* Find the best mode to use to access a bit field. */
935
936extern bool get_best_mode (int, int, poly_uint64, poly_uint64, unsigned int,
937 unsigned HOST_WIDE_INTlong, bool, scalar_int_mode *);
938
939/* Determine alignment, 1<=result<=BIGGEST_ALIGNMENT. */
940
941extern CONST_MODE_BASE_ALIGN unsigned short mode_base_align[NUM_MACHINE_MODES];
942
943extern unsigned get_mode_alignment (machine_mode);
944
945#define GET_MODE_ALIGNMENT(MODE)get_mode_alignment (MODE) get_mode_alignment (MODE)
946
947/* For each class, get the narrowest mode in that class. */
948
949extern const unsigned char class_narrowest_mode[MAX_MODE_CLASS];
950#define GET_CLASS_NARROWEST_MODE(CLASS)((machine_mode) class_narrowest_mode[CLASS]) \
951 ((machine_mode) class_narrowest_mode[CLASS])
952
953/* The narrowest full integer mode available on the target. */
954
955#define NARROWEST_INT_MODE(scalar_int_mode (scalar_int_mode::from_int (class_narrowest_mode
[MODE_INT])))
\
956 (scalar_int_mode \
957 (scalar_int_mode::from_int (class_narrowest_mode[MODE_INT])))
958
959/* Return the narrowest mode in T's class. */
960
961template<typename T>
962inline T
963get_narrowest_mode (T mode)
964{
965 return typename mode_traits<T>::from_int
966 (class_narrowest_mode[GET_MODE_CLASS (mode)((enum mode_class) mode_class[mode])]);
967}
968
969/* Define the integer modes whose sizes are BITS_PER_UNIT and BITS_PER_WORD
970 and the mode whose class is Pmode and whose size is POINTER_SIZE. */
971
972extern scalar_int_mode byte_mode;
973extern scalar_int_mode word_mode;
974extern scalar_int_mode ptr_mode;
975
976/* Target-dependent machine mode initialization - in insn-modes.cc. */
977extern void init_adjust_machine_modes (void);
978
979#define TRULY_NOOP_TRUNCATION_MODES_P(MODE1, MODE2)(targetm.truly_noop_truncation (GET_MODE_PRECISION (MODE1), GET_MODE_PRECISION
(MODE2)))
\
980 (targetm.truly_noop_truncation (GET_MODE_PRECISION (MODE1), \
981 GET_MODE_PRECISION (MODE2)))
982
983/* Return true if MODE is a scalar integer mode that fits in a
984 HOST_WIDE_INT. */
985
986inline bool
987HWI_COMPUTABLE_MODE_P (machine_mode mode)
988{
989 machine_mode mme = mode;
990 return (SCALAR_INT_MODE_P (mme)(((enum mode_class) mode_class[mme]) == MODE_INT || ((enum mode_class
) mode_class[mme]) == MODE_PARTIAL_INT)
991 && mode_to_precision (mme).coeffs[0] <= HOST_BITS_PER_WIDE_INT64);
992}
993
994inline bool
995HWI_COMPUTABLE_MODE_P (scalar_int_mode mode)
996{
997 return GET_MODE_PRECISION (mode) <= HOST_BITS_PER_WIDE_INT64;
998}
999
1000struct int_n_data_t {
1001 /* These parts are initailized by genmodes output */
1002 unsigned int bitsize;
1003 scalar_int_mode_pod m;
1004 /* RID_* is RID_INTN_BASE + index into this array */
1005};
1006
1007/* This is also in tree.h. genmodes.cc guarantees the're sorted from
1008 smallest bitsize to largest bitsize. */
1009extern bool int_n_enabled_p[NUM_INT_N_ENTS1];
1010extern const int_n_data_t int_n_data[NUM_INT_N_ENTS1];
1011
1012/* Return true if MODE has class MODE_INT, storing it as a scalar_int_mode
1013 in *INT_MODE if so. */
1014
1015template<typename T>
1016inline bool
1017is_int_mode (machine_mode mode, T *int_mode)
1018{
1019 if (GET_MODE_CLASS (mode)((enum mode_class) mode_class[mode]) == MODE_INT)
1020 {
1021 *int_mode = scalar_int_mode (scalar_int_mode::from_int (mode));
1022 return true;
1023 }
1024 return false;
1025}
1026
1027/* Return true if MODE has class MODE_FLOAT, storing it as a
1028 scalar_float_mode in *FLOAT_MODE if so. */
1029
1030template<typename T>
1031inline bool
1032is_float_mode (machine_mode mode, T *float_mode)
1033{
1034 if (GET_MODE_CLASS (mode)((enum mode_class) mode_class[mode]) == MODE_FLOAT)
1035 {
1036 *float_mode = scalar_float_mode (scalar_float_mode::from_int (mode));
1037 return true;
1038 }
1039 return false;
1040}
1041
1042/* Return true if MODE has class MODE_COMPLEX_INT, storing it as
1043 a complex_mode in *CMODE if so. */
1044
1045template<typename T>
1046inline bool
1047is_complex_int_mode (machine_mode mode, T *cmode)
1048{
1049 if (GET_MODE_CLASS (mode)((enum mode_class) mode_class[mode]) == MODE_COMPLEX_INT)
1050 {
1051 *cmode = complex_mode (complex_mode::from_int (mode));
1052 return true;
1053 }
1054 return false;
1055}
1056
1057/* Return true if MODE has class MODE_COMPLEX_FLOAT, storing it as
1058 a complex_mode in *CMODE if so. */
1059
1060template<typename T>
1061inline bool
1062is_complex_float_mode (machine_mode mode, T *cmode)
1063{
1064 if (GET_MODE_CLASS (mode)((enum mode_class) mode_class[mode]) == MODE_COMPLEX_FLOAT)
1065 {
1066 *cmode = complex_mode (complex_mode::from_int (mode));
1067 return true;
1068 }
1069 return false;
1070}
1071
1072/* Return true if MODE is a scalar integer mode with a precision
1073 smaller than LIMIT's precision. */
1074
1075inline bool
1076is_narrower_int_mode (machine_mode mode, scalar_int_mode limit)
1077{
1078 scalar_int_mode int_mode;
1079 return (is_a <scalar_int_mode> (mode, &int_mode)
1080 && GET_MODE_PRECISION (int_mode) < GET_MODE_PRECISION (limit));
1081}
1082
1083namespace mode_iterator
1084{
1085 /* Start mode iterator *ITER at the first mode in class MCLASS, if any. */
1086
1087 template<typename T>
1088 inline void
1089 start (opt_mode<T> *iter, enum mode_class mclass)
1090 {
1091 if (GET_CLASS_NARROWEST_MODE (mclass)((machine_mode) class_narrowest_mode[mclass]) == E_VOIDmode)
1092 *iter = opt_mode<T> ();
1093 else
1094 *iter = as_a<T> (GET_CLASS_NARROWEST_MODE (mclass)((machine_mode) class_narrowest_mode[mclass]));
1095 }
1096
1097 inline void
1098 start (machine_mode *iter, enum mode_class mclass)
1099 {
1100 *iter = GET_CLASS_NARROWEST_MODE (mclass)((machine_mode) class_narrowest_mode[mclass]);
1101 }
1102
1103 /* Return true if mode iterator *ITER has not reached the end. */
1104
1105 template<typename T>
1106 inline bool
1107 iterate_p (opt_mode<T> *iter)
1108 {
1109 return iter->exists ();
1110 }
1111
1112 inline bool
1113 iterate_p (machine_mode *iter)
1114 {
1115 return *iter != E_VOIDmode;
1116 }
1117
1118 /* Set mode iterator *ITER to the next mode in the same class,
1119 if any. */
1120
1121 template<typename T>
1122 inline void
1123 get_next (opt_mode<T> *iter)
1124 {
1125 *iter = GET_MODE_NEXT_MODE (iter->require ());
1126 }
1127
1128 inline void
1129 get_next (machine_mode *iter)
1130 {
1131 *iter = GET_MODE_NEXT_MODE (*iter).else_void ();
1132 }
1133
1134 /* Set mode iterator *ITER to the next mode in the same class.
1135 Such a mode is known to exist. */
1136
1137 template<typename T>
1138 inline void
1139 get_known_next (T *iter)
1140 {
1141 *iter = GET_MODE_NEXT_MODE (*iter).require ();
1142 }
1143
1144 /* Set mode iterator *ITER to the next wider mode in the same class,
1145 if any. */
1146
1147 template<typename T>
1148 inline void
1149 get_wider (opt_mode<T> *iter)
1150 {
1151 *iter = GET_MODE_WIDER_MODE (iter->require ());
1152 }
1153
1154 inline void
1155 get_wider (machine_mode *iter)
1156 {
1157 *iter = GET_MODE_WIDER_MODE (*iter).else_void ();
1158 }
1159
1160 /* Set mode iterator *ITER to the next wider mode in the same class.
1161 Such a mode is known to exist. */
1162
1163 template<typename T>
1164 inline void
1165 get_known_wider (T *iter)
1166 {
1167 *iter = GET_MODE_WIDER_MODE (*iter).require ();
1168 }
1169
1170 /* Set mode iterator *ITER to the mode that is two times wider than the
1171 current one, if such a mode exists. */
1172
1173 template<typename T>
1174 inline void
1175 get_2xwider (opt_mode<T> *iter)
1176 {
1177 *iter = GET_MODE_2XWIDER_MODE (iter->require ());
1178 }
1179
1180 inline void
1181 get_2xwider (machine_mode *iter)
1182 {
1183 *iter = GET_MODE_2XWIDER_MODE (*iter).else_void ();
1184 }
1185}
1186
1187/* Make ITERATOR iterate over all the modes in mode class CLASS,
1188 from narrowest to widest. */
1189#define FOR_EACH_MODE_IN_CLASS(ITERATOR, CLASS)for (mode_iterator::start (&(ITERATOR), CLASS); mode_iterator
::iterate_p (&(ITERATOR)); mode_iterator::get_next (&
(ITERATOR)))
\
1190 for (mode_iterator::start (&(ITERATOR), CLASS); \
1191 mode_iterator::iterate_p (&(ITERATOR)); \
1192 mode_iterator::get_next (&(ITERATOR)))
1193
1194/* Make ITERATOR iterate over all the modes in the range [START, END),
1195 in order of increasing width. */
1196#define FOR_EACH_MODE(ITERATOR, START, END)for ((ITERATOR) = (START); (ITERATOR) != (END); mode_iterator
::get_known_next (&(ITERATOR)))
\
1197 for ((ITERATOR) = (START); \
1198 (ITERATOR) != (END); \
1199 mode_iterator::get_known_next (&(ITERATOR)))
1200
1201/* Make ITERATOR iterate over START and all non-narrower modes in the same
1202 class, in order of increasing width. */
1203#define FOR_EACH_MODE_FROM(ITERATOR, START)for ((ITERATOR) = (START); mode_iterator::iterate_p (&(ITERATOR
)); mode_iterator::get_next (&(ITERATOR)))
\
1204 for ((ITERATOR) = (START); \
1205 mode_iterator::iterate_p (&(ITERATOR)); \
1206 mode_iterator::get_next (&(ITERATOR)))
1207
1208/* Make ITERATOR iterate over START and all wider modes in the same
1209 class, in order of strictly increasing width. */
1210#define FOR_EACH_WIDER_MODE_FROM(ITERATOR, START)for ((ITERATOR) = (START); mode_iterator::iterate_p (&(ITERATOR
)); mode_iterator::get_wider (&(ITERATOR)))
\
1211 for ((ITERATOR) = (START); \
1212 mode_iterator::iterate_p (&(ITERATOR)); \
1213 mode_iterator::get_wider (&(ITERATOR)))
1214
1215/* Make ITERATOR iterate over modes in the range [NARROWEST, END)
1216 in order of increasing width, where NARROWEST is the narrowest mode
1217 in END's class. */
1218#define FOR_EACH_MODE_UNTIL(ITERATOR, END)for ((ITERATOR) = (get_narrowest_mode (END)); (ITERATOR) != (
END); mode_iterator::get_known_next (&(ITERATOR)))
\
1219 FOR_EACH_MODE (ITERATOR, get_narrowest_mode (END), END)for ((ITERATOR) = (get_narrowest_mode (END)); (ITERATOR) != (
END); mode_iterator::get_known_next (&(ITERATOR)))
1220
1221/* Make ITERATOR iterate over modes in the same class as MODE, in order
1222 of non-decreasing width. Start at next such mode after START,
1223 or don't iterate at all if there is no such mode. */
1224#define FOR_EACH_NEXT_MODE(ITERATOR, START)for ((ITERATOR) = (START), mode_iterator::get_next (&(ITERATOR
)); mode_iterator::iterate_p (&(ITERATOR)); mode_iterator
::get_next (&(ITERATOR)))
\
1225 for ((ITERATOR) = (START), mode_iterator::get_next (&(ITERATOR)); \
1226 mode_iterator::iterate_p (&(ITERATOR)); \
1227 mode_iterator::get_next (&(ITERATOR)))
1228
1229/* Make ITERATOR iterate over modes in the same class as MODE, in order
1230 of increasing width. Start at the first mode wider than START,
1231 or don't iterate at all if there is no wider mode. */
1232#define FOR_EACH_WIDER_MODE(ITERATOR, START)for ((ITERATOR) = (START), mode_iterator::get_wider (&(ITERATOR
)); mode_iterator::iterate_p (&(ITERATOR)); mode_iterator
::get_wider (&(ITERATOR)))
\
1233 for ((ITERATOR) = (START), mode_iterator::get_wider (&(ITERATOR)); \
1234 mode_iterator::iterate_p (&(ITERATOR)); \
1235 mode_iterator::get_wider (&(ITERATOR)))
1236
1237/* Make ITERATOR iterate over modes in the same class as MODE, in order
1238 of increasing width, and with each mode being twice the width of the
1239 previous mode. Start at the mode that is two times wider than START,
1240 or don't iterate at all if there is no such mode. */
1241#define FOR_EACH_2XWIDER_MODE(ITERATOR, START)for ((ITERATOR) = (START), mode_iterator::get_2xwider (&(
ITERATOR)); mode_iterator::iterate_p (&(ITERATOR)); mode_iterator
::get_2xwider (&(ITERATOR)))
\
1242 for ((ITERATOR) = (START), mode_iterator::get_2xwider (&(ITERATOR)); \
1243 mode_iterator::iterate_p (&(ITERATOR)); \
1244 mode_iterator::get_2xwider (&(ITERATOR)))
1245
1246template<typename T>
1247void
1248gt_ggc_mx (pod_mode<T> *)
1249{
1250}
1251
1252template<typename T>
1253void
1254gt_pch_nx (pod_mode<T> *)
1255{
1256}
1257
1258template<typename T>
1259void
1260gt_pch_nx (pod_mode<T> *, gt_pointer_operator, void *)
1261{
1262}
1263
1264#endif /* not HAVE_MACHINE_MODES */