File: | build/gcc/loop-iv.cc |
Warning: | line 2399, column 7 Value stored to 'step_val' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* Rtl-level induction variable analysis. |
2 | Copyright (C) 2004-2023 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it |
7 | under the terms of the GNU General Public License as published by the |
8 | Free Software Foundation; either version 3, or (at your option) any |
9 | later version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT |
12 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | /* This is a simple analysis of induction variables of the loop. The major use |
21 | is for determining the number of iterations of a loop for loop unrolling, |
22 | doloop optimization and branch prediction. The iv information is computed |
23 | on demand. |
24 | |
25 | Induction variables are analyzed by walking the use-def chains. When |
26 | a basic induction variable (biv) is found, it is cached in the bivs |
27 | hash table. When register is proved to be a biv, its description |
28 | is stored to DF_REF_DATA of the def reference. |
29 | |
30 | The analysis works always with one loop -- you must call |
31 | iv_analysis_loop_init (loop) for it. All the other functions then work with |
32 | this loop. When you need to work with another loop, just call |
33 | iv_analysis_loop_init for it. When you no longer need iv analysis, call |
34 | iv_analysis_done () to clean up the memory. |
35 | |
36 | The available functions are: |
37 | |
38 | iv_analyze (insn, mode, reg, iv): Stores the description of the induction |
39 | variable corresponding to the use of register REG in INSN to IV, given |
40 | that REG has mode MODE. Returns true if REG is an induction variable |
41 | in INSN. false otherwise. If a use of REG is not found in INSN, |
42 | the following insns are scanned (so that we may call this function |
43 | on insns returned by get_condition). |
44 | iv_analyze_result (insn, def, iv): Stores to IV the description of the iv |
45 | corresponding to DEF, which is a register defined in INSN. |
46 | iv_analyze_expr (insn, mode, expr, iv): Stores to IV the description of iv |
47 | corresponding to expression EXPR evaluated at INSN. All registers used by |
48 | EXPR must also be used in INSN. MODE is the mode of EXPR. |
49 | */ |
50 | |
51 | #include "config.h" |
52 | #include "system.h" |
53 | #include "coretypes.h" |
54 | #include "backend.h" |
55 | #include "rtl.h" |
56 | #include "df.h" |
57 | #include "memmodel.h" |
58 | #include "emit-rtl.h" |
59 | #include "diagnostic-core.h" |
60 | #include "cfgloop.h" |
61 | #include "intl.h" |
62 | #include "dumpfile.h" |
63 | #include "rtl-iter.h" |
64 | #include "tree-ssa-loop-niter.h" |
65 | #include "regs.h" |
66 | #include "function-abi.h" |
67 | |
68 | /* Possible return values of iv_get_reaching_def. */ |
69 | |
70 | enum iv_grd_result |
71 | { |
72 | /* More than one reaching def, or reaching def that does not |
73 | dominate the use. */ |
74 | GRD_INVALID, |
75 | |
76 | /* The use is trivial invariant of the loop, i.e. is not changed |
77 | inside the loop. */ |
78 | GRD_INVARIANT, |
79 | |
80 | /* The use is reached by initial value and a value from the |
81 | previous iteration. */ |
82 | GRD_MAYBE_BIV, |
83 | |
84 | /* The use has single dominating def. */ |
85 | GRD_SINGLE_DOM |
86 | }; |
87 | |
88 | /* Information about a biv. */ |
89 | |
90 | class biv_entry |
91 | { |
92 | public: |
93 | unsigned regno; /* The register of the biv. */ |
94 | class rtx_iv iv; /* Value of the biv. */ |
95 | }; |
96 | |
97 | static bool clean_slate = true; |
98 | |
99 | static unsigned int iv_ref_table_size = 0; |
100 | |
101 | /* Table of rtx_ivs indexed by the df_ref uid field. */ |
102 | static class rtx_iv ** iv_ref_table; |
103 | |
104 | /* Induction variable stored at the reference. */ |
105 | #define DF_REF_IV(REF)iv_ref_table[((REF)->base.id)] iv_ref_table[DF_REF_ID (REF)((REF)->base.id)] |
106 | #define DF_REF_IV_SET(REF, IV)iv_ref_table[((REF)->base.id)] = (IV) iv_ref_table[DF_REF_ID (REF)((REF)->base.id)] = (IV) |
107 | |
108 | /* The current loop. */ |
109 | |
110 | static class loop *current_loop; |
111 | |
112 | /* Hashtable helper. */ |
113 | |
114 | struct biv_entry_hasher : free_ptr_hash <biv_entry> |
115 | { |
116 | typedef rtx_def *compare_type; |
117 | static inline hashval_t hash (const biv_entry *); |
118 | static inline bool equal (const biv_entry *, const rtx_def *); |
119 | }; |
120 | |
121 | /* Returns hash value for biv B. */ |
122 | |
123 | inline hashval_t |
124 | biv_entry_hasher::hash (const biv_entry *b) |
125 | { |
126 | return b->regno; |
127 | } |
128 | |
129 | /* Compares biv B and register R. */ |
130 | |
131 | inline bool |
132 | biv_entry_hasher::equal (const biv_entry *b, const rtx_def *r) |
133 | { |
134 | return b->regno == REGNO (r)(rhs_regno(r)); |
135 | } |
136 | |
137 | /* Bivs of the current loop. */ |
138 | |
139 | static hash_table<biv_entry_hasher> *bivs; |
140 | |
141 | static bool iv_analyze_op (rtx_insn *, scalar_int_mode, rtx, class rtx_iv *); |
142 | |
143 | /* Return the RTX code corresponding to the IV extend code EXTEND. */ |
144 | static inline enum rtx_code |
145 | iv_extend_to_rtx_code (enum iv_extend_code extend) |
146 | { |
147 | switch (extend) |
148 | { |
149 | case IV_SIGN_EXTEND: |
150 | return SIGN_EXTEND; |
151 | case IV_ZERO_EXTEND: |
152 | return ZERO_EXTEND; |
153 | case IV_UNKNOWN_EXTEND: |
154 | return UNKNOWN; |
155 | } |
156 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 156, __FUNCTION__)); |
157 | } |
158 | |
159 | /* Dumps information about IV to FILE. */ |
160 | |
161 | extern void dump_iv_info (FILE *, class rtx_iv *); |
162 | void |
163 | dump_iv_info (FILE *file, class rtx_iv *iv) |
164 | { |
165 | if (!iv->base) |
166 | { |
167 | fprintf (file, "not simple"); |
168 | return; |
169 | } |
170 | |
171 | if (iv->step == const0_rtx(const_int_rtx[64]) |
172 | && !iv->first_special) |
173 | fprintf (file, "invariant "); |
174 | |
175 | print_rtl (file, iv->base); |
176 | if (iv->step != const0_rtx(const_int_rtx[64])) |
177 | { |
178 | fprintf (file, " + "); |
179 | print_rtl (file, iv->step); |
180 | fprintf (file, " * iteration"); |
181 | } |
182 | fprintf (file, " (in %s)", GET_MODE_NAME (iv->mode)mode_name[iv->mode]); |
183 | |
184 | if (iv->mode != iv->extend_mode) |
185 | fprintf (file, " %s to %s", |
186 | rtx_name[iv_extend_to_rtx_code (iv->extend)], |
187 | GET_MODE_NAME (iv->extend_mode)mode_name[iv->extend_mode]); |
188 | |
189 | if (iv->mult != const1_rtx(const_int_rtx[64 +1])) |
190 | { |
191 | fprintf (file, " * "); |
192 | print_rtl (file, iv->mult); |
193 | } |
194 | if (iv->delta != const0_rtx(const_int_rtx[64])) |
195 | { |
196 | fprintf (file, " + "); |
197 | print_rtl (file, iv->delta); |
198 | } |
199 | if (iv->first_special) |
200 | fprintf (file, " (first special)"); |
201 | } |
202 | |
203 | static void |
204 | check_iv_ref_table_size (void) |
205 | { |
206 | if (iv_ref_table_size < DF_DEFS_TABLE_SIZE ()(df->def_info.table_size)) |
207 | { |
208 | unsigned int new_size = DF_DEFS_TABLE_SIZE ()(df->def_info.table_size) + (DF_DEFS_TABLE_SIZE ()(df->def_info.table_size) / 4); |
209 | iv_ref_table = XRESIZEVEC (class rtx_iv *, iv_ref_table, new_size)((class rtx_iv * *) xrealloc ((void *) (iv_ref_table), sizeof (class rtx_iv *) * (new_size))); |
210 | memset (&iv_ref_table[iv_ref_table_size], 0, |
211 | (new_size - iv_ref_table_size) * sizeof (class rtx_iv *)); |
212 | iv_ref_table_size = new_size; |
213 | } |
214 | } |
215 | |
216 | |
217 | /* Checks whether REG is a well-behaved register. */ |
218 | |
219 | static bool |
220 | simple_reg_p (rtx reg) |
221 | { |
222 | unsigned r; |
223 | |
224 | if (GET_CODE (reg)((enum rtx_code) (reg)->code) == SUBREG) |
225 | { |
226 | if (!subreg_lowpart_p (reg)) |
227 | return false; |
228 | reg = SUBREG_REG (reg)(((reg)->u.fld[0]).rt_rtx); |
229 | } |
230 | |
231 | if (!REG_P (reg)(((enum rtx_code) (reg)->code) == REG)) |
232 | return false; |
233 | |
234 | r = REGNO (reg)(rhs_regno(reg)); |
235 | if (HARD_REGISTER_NUM_P (r)((r) < 76)) |
236 | return false; |
237 | |
238 | if (GET_MODE_CLASS (GET_MODE (reg))((enum mode_class) mode_class[((machine_mode) (reg)->mode) ]) != MODE_INT) |
239 | return false; |
240 | |
241 | return true; |
242 | } |
243 | |
244 | /* Clears the information about ivs stored in df. */ |
245 | |
246 | static void |
247 | clear_iv_info (void) |
248 | { |
249 | unsigned i, n_defs = DF_DEFS_TABLE_SIZE ()(df->def_info.table_size); |
250 | class rtx_iv *iv; |
251 | |
252 | check_iv_ref_table_size (); |
253 | for (i = 0; i < n_defs; i++) |
254 | { |
255 | iv = iv_ref_table[i]; |
256 | if (iv) |
257 | { |
258 | free (iv); |
259 | iv_ref_table[i] = NULL__null; |
260 | } |
261 | } |
262 | |
263 | bivs->empty (); |
264 | } |
265 | |
266 | |
267 | /* Prepare the data for an induction variable analysis of a LOOP. */ |
268 | |
269 | void |
270 | iv_analysis_loop_init (class loop *loop) |
271 | { |
272 | current_loop = loop; |
273 | |
274 | /* Clear the information from the analysis of the previous loop. */ |
275 | if (clean_slate) |
276 | { |
277 | df_set_flags (DF_EQ_NOTES + DF_DEFER_INSN_RESCAN); |
278 | bivs = new hash_table<biv_entry_hasher> (10); |
279 | clean_slate = false; |
280 | } |
281 | else |
282 | clear_iv_info (); |
283 | |
284 | /* Get rid of the ud chains before processing the rescans. Then add |
285 | the problem back. */ |
286 | df_remove_problem (df_chain(df->problems_by_index[DF_CHAIN])); |
287 | df_process_deferred_rescans (); |
288 | df_set_flags (DF_RD_PRUNE_DEAD_DEFS); |
289 | df_chain_add_problem (DF_UD_CHAIN); |
290 | df_note_add_problem (); |
291 | df_analyze_loop (loop); |
292 | if (dump_file) |
293 | df_dump_region (dump_file); |
294 | |
295 | check_iv_ref_table_size (); |
296 | } |
297 | |
298 | /* Finds the definition of REG that dominates loop latch and stores |
299 | it to DEF. Returns false if there is not a single definition |
300 | dominating the latch. If REG has no definition in loop, DEF |
301 | is set to NULL and true is returned. */ |
302 | |
303 | static bool |
304 | latch_dominating_def (rtx reg, df_ref *def) |
305 | { |
306 | df_ref single_rd = NULL__null, adef; |
307 | unsigned regno = REGNO (reg)(rhs_regno(reg)); |
308 | class df_rd_bb_info *bb_info = DF_RD_BB_INFO (current_loop->latch)(df_rd_get_bb_info ((current_loop->latch)->index)); |
309 | |
310 | for (adef = DF_REG_DEF_CHAIN (regno)(df->def_regs[(regno)]->reg_chain); adef; adef = DF_REF_NEXT_REG (adef)((adef)->base.next_reg)) |
311 | { |
312 | if (!bitmap_bit_p (df->blocks_to_analyze, DF_REF_BBNO (adef)(((((adef)->base.cl) == DF_REF_ARTIFICIAL) ? (adef)->artificial_ref .bb : BLOCK_FOR_INSN (((adef)->base.insn_info->insn)))-> index)) |
313 | || !bitmap_bit_p (&bb_info->out, DF_REF_ID (adef)((adef)->base.id))) |
314 | continue; |
315 | |
316 | /* More than one reaching definition. */ |
317 | if (single_rd) |
318 | return false; |
319 | |
320 | if (!just_once_each_iteration_p (current_loop, DF_REF_BB (adef)((((adef)->base.cl) == DF_REF_ARTIFICIAL) ? (adef)->artificial_ref .bb : BLOCK_FOR_INSN (((adef)->base.insn_info->insn))))) |
321 | return false; |
322 | |
323 | single_rd = adef; |
324 | } |
325 | |
326 | *def = single_rd; |
327 | return true; |
328 | } |
329 | |
330 | /* Gets definition of REG reaching its use in INSN and stores it to DEF. */ |
331 | |
332 | static enum iv_grd_result |
333 | iv_get_reaching_def (rtx_insn *insn, rtx reg, df_ref *def) |
334 | { |
335 | df_ref use, adef; |
336 | basic_block def_bb, use_bb; |
337 | rtx_insn *def_insn; |
338 | bool dom_p; |
339 | |
340 | *def = NULL__null; |
341 | if (!simple_reg_p (reg)) |
342 | return GRD_INVALID; |
343 | if (GET_CODE (reg)((enum rtx_code) (reg)->code) == SUBREG) |
344 | reg = SUBREG_REG (reg)(((reg)->u.fld[0]).rt_rtx); |
345 | gcc_assert (REG_P (reg))((void)(!((((enum rtx_code) (reg)->code) == REG)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 345, __FUNCTION__), 0 : 0)); |
346 | |
347 | use = df_find_use (insn, reg); |
348 | gcc_assert (use != NULL)((void)(!(use != __null) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 348, __FUNCTION__), 0 : 0)); |
349 | |
350 | if (!DF_REF_CHAIN (use)((use)->base.chain)) |
351 | return GRD_INVARIANT; |
352 | |
353 | /* More than one reaching def. */ |
354 | if (DF_REF_CHAIN (use)((use)->base.chain)->next) |
355 | return GRD_INVALID; |
356 | |
357 | adef = DF_REF_CHAIN (use)((use)->base.chain)->ref; |
358 | |
359 | /* We do not handle setting only part of the register. */ |
360 | if (DF_REF_FLAGS (adef)((adef)->base.flags) & DF_REF_READ_WRITE) |
361 | return GRD_INVALID; |
362 | |
363 | def_insn = DF_REF_INSN (adef)((adef)->base.insn_info->insn); |
364 | def_bb = DF_REF_BB (adef)((((adef)->base.cl) == DF_REF_ARTIFICIAL) ? (adef)->artificial_ref .bb : BLOCK_FOR_INSN (((adef)->base.insn_info->insn))); |
365 | use_bb = BLOCK_FOR_INSN (insn); |
366 | |
367 | if (use_bb == def_bb) |
368 | dom_p = (DF_INSN_LUID (def_insn)((((df->insns[(INSN_UID (def_insn))]))->luid)) < DF_INSN_LUID (insn)((((df->insns[(INSN_UID (insn))]))->luid))); |
369 | else |
370 | dom_p = dominated_by_p (CDI_DOMINATORS, use_bb, def_bb); |
371 | |
372 | if (dom_p) |
373 | { |
374 | *def = adef; |
375 | return GRD_SINGLE_DOM; |
376 | } |
377 | |
378 | /* The definition does not dominate the use. This is still OK if |
379 | this may be a use of a biv, i.e. if the def_bb dominates loop |
380 | latch. */ |
381 | if (just_once_each_iteration_p (current_loop, def_bb)) |
382 | return GRD_MAYBE_BIV; |
383 | |
384 | return GRD_INVALID; |
385 | } |
386 | |
387 | /* Sets IV to invariant CST in MODE. Always returns true (just for |
388 | consistency with other iv manipulation functions that may fail). */ |
389 | |
390 | static bool |
391 | iv_constant (class rtx_iv *iv, scalar_int_mode mode, rtx cst) |
392 | { |
393 | iv->mode = mode; |
394 | iv->base = cst; |
395 | iv->step = const0_rtx(const_int_rtx[64]); |
396 | iv->first_special = false; |
397 | iv->extend = IV_UNKNOWN_EXTEND; |
398 | iv->extend_mode = iv->mode; |
399 | iv->delta = const0_rtx(const_int_rtx[64]); |
400 | iv->mult = const1_rtx(const_int_rtx[64 +1]); |
401 | |
402 | return true; |
403 | } |
404 | |
405 | /* Evaluates application of subreg to MODE on IV. */ |
406 | |
407 | static bool |
408 | iv_subreg (class rtx_iv *iv, scalar_int_mode mode) |
409 | { |
410 | /* If iv is invariant, just calculate the new value. */ |
411 | if (iv->step == const0_rtx(const_int_rtx[64]) |
412 | && !iv->first_special) |
413 | { |
414 | rtx val = get_iv_value (iv, const0_rtx(const_int_rtx[64])); |
415 | val = lowpart_subreg (mode, val, |
416 | iv->extend == IV_UNKNOWN_EXTEND |
417 | ? iv->mode : iv->extend_mode); |
418 | |
419 | iv->base = val; |
420 | iv->extend = IV_UNKNOWN_EXTEND; |
421 | iv->mode = iv->extend_mode = mode; |
422 | iv->delta = const0_rtx(const_int_rtx[64]); |
423 | iv->mult = const1_rtx(const_int_rtx[64 +1]); |
424 | return true; |
425 | } |
426 | |
427 | if (iv->extend_mode == mode) |
428 | return true; |
429 | |
430 | if (GET_MODE_BITSIZE (mode) > GET_MODE_BITSIZE (iv->mode)) |
431 | return false; |
432 | |
433 | iv->extend = IV_UNKNOWN_EXTEND; |
434 | iv->mode = mode; |
435 | |
436 | iv->base = simplify_gen_binary (PLUS, iv->extend_mode, iv->delta, |
437 | simplify_gen_binary (MULT, iv->extend_mode, |
438 | iv->base, iv->mult)); |
439 | iv->step = simplify_gen_binary (MULT, iv->extend_mode, iv->step, iv->mult); |
440 | iv->mult = const1_rtx(const_int_rtx[64 +1]); |
441 | iv->delta = const0_rtx(const_int_rtx[64]); |
442 | iv->first_special = false; |
443 | |
444 | return true; |
445 | } |
446 | |
447 | /* Evaluates application of EXTEND to MODE on IV. */ |
448 | |
449 | static bool |
450 | iv_extend (class rtx_iv *iv, enum iv_extend_code extend, scalar_int_mode mode) |
451 | { |
452 | /* If iv is invariant, just calculate the new value. */ |
453 | if (iv->step == const0_rtx(const_int_rtx[64]) |
454 | && !iv->first_special) |
455 | { |
456 | rtx val = get_iv_value (iv, const0_rtx(const_int_rtx[64])); |
457 | if (iv->extend_mode != iv->mode |
458 | && iv->extend != IV_UNKNOWN_EXTEND |
459 | && iv->extend != extend) |
460 | val = lowpart_subreg (iv->mode, val, iv->extend_mode); |
461 | val = simplify_gen_unary (iv_extend_to_rtx_code (extend), mode, |
462 | val, |
463 | iv->extend == extend |
464 | ? iv->extend_mode : iv->mode); |
465 | iv->base = val; |
466 | iv->extend = IV_UNKNOWN_EXTEND; |
467 | iv->mode = iv->extend_mode = mode; |
468 | iv->delta = const0_rtx(const_int_rtx[64]); |
469 | iv->mult = const1_rtx(const_int_rtx[64 +1]); |
470 | return true; |
471 | } |
472 | |
473 | if (mode != iv->extend_mode) |
474 | return false; |
475 | |
476 | if (iv->extend != IV_UNKNOWN_EXTEND |
477 | && iv->extend != extend) |
478 | return false; |
479 | |
480 | iv->extend = extend; |
481 | |
482 | return true; |
483 | } |
484 | |
485 | /* Evaluates negation of IV. */ |
486 | |
487 | static bool |
488 | iv_neg (class rtx_iv *iv) |
489 | { |
490 | if (iv->extend == IV_UNKNOWN_EXTEND) |
491 | { |
492 | iv->base = simplify_gen_unary (NEG, iv->extend_mode, |
493 | iv->base, iv->extend_mode); |
494 | iv->step = simplify_gen_unary (NEG, iv->extend_mode, |
495 | iv->step, iv->extend_mode); |
496 | } |
497 | else |
498 | { |
499 | iv->delta = simplify_gen_unary (NEG, iv->extend_mode, |
500 | iv->delta, iv->extend_mode); |
501 | iv->mult = simplify_gen_unary (NEG, iv->extend_mode, |
502 | iv->mult, iv->extend_mode); |
503 | } |
504 | |
505 | return true; |
506 | } |
507 | |
508 | /* Evaluates addition or subtraction (according to OP) of IV1 to IV0. */ |
509 | |
510 | static bool |
511 | iv_add (class rtx_iv *iv0, class rtx_iv *iv1, enum rtx_code op) |
512 | { |
513 | scalar_int_mode mode; |
514 | rtx arg; |
515 | |
516 | /* Extend the constant to extend_mode of the other operand if necessary. */ |
517 | if (iv0->extend == IV_UNKNOWN_EXTEND |
518 | && iv0->mode == iv0->extend_mode |
519 | && iv0->step == const0_rtx(const_int_rtx[64]) |
520 | && GET_MODE_SIZE (iv0->extend_mode) < GET_MODE_SIZE (iv1->extend_mode)) |
521 | { |
522 | iv0->extend_mode = iv1->extend_mode; |
523 | iv0->base = simplify_gen_unary (ZERO_EXTEND, iv0->extend_mode, |
524 | iv0->base, iv0->mode); |
525 | } |
526 | if (iv1->extend == IV_UNKNOWN_EXTEND |
527 | && iv1->mode == iv1->extend_mode |
528 | && iv1->step == const0_rtx(const_int_rtx[64]) |
529 | && GET_MODE_SIZE (iv1->extend_mode) < GET_MODE_SIZE (iv0->extend_mode)) |
530 | { |
531 | iv1->extend_mode = iv0->extend_mode; |
532 | iv1->base = simplify_gen_unary (ZERO_EXTEND, iv1->extend_mode, |
533 | iv1->base, iv1->mode); |
534 | } |
535 | |
536 | mode = iv0->extend_mode; |
537 | if (mode != iv1->extend_mode) |
538 | return false; |
539 | |
540 | if (iv0->extend == IV_UNKNOWN_EXTEND |
541 | && iv1->extend == IV_UNKNOWN_EXTEND) |
542 | { |
543 | if (iv0->mode != iv1->mode) |
544 | return false; |
545 | |
546 | iv0->base = simplify_gen_binary (op, mode, iv0->base, iv1->base); |
547 | iv0->step = simplify_gen_binary (op, mode, iv0->step, iv1->step); |
548 | |
549 | return true; |
550 | } |
551 | |
552 | /* Handle addition of constant. */ |
553 | if (iv1->extend == IV_UNKNOWN_EXTEND |
554 | && iv1->mode == mode |
555 | && iv1->step == const0_rtx(const_int_rtx[64])) |
556 | { |
557 | iv0->delta = simplify_gen_binary (op, mode, iv0->delta, iv1->base); |
558 | return true; |
559 | } |
560 | |
561 | if (iv0->extend == IV_UNKNOWN_EXTEND |
562 | && iv0->mode == mode |
563 | && iv0->step == const0_rtx(const_int_rtx[64])) |
564 | { |
565 | arg = iv0->base; |
566 | *iv0 = *iv1; |
567 | if (op == MINUS |
568 | && !iv_neg (iv0)) |
569 | return false; |
570 | |
571 | iv0->delta = simplify_gen_binary (PLUS, mode, iv0->delta, arg); |
572 | return true; |
573 | } |
574 | |
575 | return false; |
576 | } |
577 | |
578 | /* Evaluates multiplication of IV by constant CST. */ |
579 | |
580 | static bool |
581 | iv_mult (class rtx_iv *iv, rtx mby) |
582 | { |
583 | scalar_int_mode mode = iv->extend_mode; |
584 | |
585 | if (GET_MODE (mby)((machine_mode) (mby)->mode) != VOIDmode((void) 0, E_VOIDmode) |
586 | && GET_MODE (mby)((machine_mode) (mby)->mode) != mode) |
587 | return false; |
588 | |
589 | if (iv->extend == IV_UNKNOWN_EXTEND) |
590 | { |
591 | iv->base = simplify_gen_binary (MULT, mode, iv->base, mby); |
592 | iv->step = simplify_gen_binary (MULT, mode, iv->step, mby); |
593 | } |
594 | else |
595 | { |
596 | iv->delta = simplify_gen_binary (MULT, mode, iv->delta, mby); |
597 | iv->mult = simplify_gen_binary (MULT, mode, iv->mult, mby); |
598 | } |
599 | |
600 | return true; |
601 | } |
602 | |
603 | /* Evaluates shift of IV by constant CST. */ |
604 | |
605 | static bool |
606 | iv_shift (class rtx_iv *iv, rtx mby) |
607 | { |
608 | scalar_int_mode mode = iv->extend_mode; |
609 | |
610 | if (GET_MODE (mby)((machine_mode) (mby)->mode) != VOIDmode((void) 0, E_VOIDmode) |
611 | && GET_MODE (mby)((machine_mode) (mby)->mode) != mode) |
612 | return false; |
613 | |
614 | if (iv->extend == IV_UNKNOWN_EXTEND) |
615 | { |
616 | iv->base = simplify_gen_binary (ASHIFT, mode, iv->base, mby); |
617 | iv->step = simplify_gen_binary (ASHIFT, mode, iv->step, mby); |
618 | } |
619 | else |
620 | { |
621 | iv->delta = simplify_gen_binary (ASHIFT, mode, iv->delta, mby); |
622 | iv->mult = simplify_gen_binary (ASHIFT, mode, iv->mult, mby); |
623 | } |
624 | |
625 | return true; |
626 | } |
627 | |
628 | /* The recursive part of get_biv_step. Gets the value of the single value |
629 | defined by DEF wrto initial value of REG inside loop, in shape described |
630 | at get_biv_step. */ |
631 | |
632 | static bool |
633 | get_biv_step_1 (df_ref def, scalar_int_mode outer_mode, rtx reg, |
634 | rtx *inner_step, scalar_int_mode *inner_mode, |
635 | enum iv_extend_code *extend, |
636 | rtx *outer_step) |
637 | { |
638 | rtx set, rhs, op0 = NULL_RTX(rtx) 0, op1 = NULL_RTX(rtx) 0; |
639 | rtx next, nextr; |
640 | enum rtx_code code; |
641 | rtx_insn *insn = DF_REF_INSN (def)((def)->base.insn_info->insn); |
642 | df_ref next_def; |
643 | enum iv_grd_result res; |
644 | |
645 | set = single_set (insn); |
646 | if (!set) |
647 | return false; |
648 | |
649 | rhs = find_reg_equal_equiv_note (insn); |
650 | if (rhs) |
651 | rhs = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx); |
652 | else |
653 | rhs = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
654 | |
655 | code = GET_CODE (rhs)((enum rtx_code) (rhs)->code); |
656 | switch (code) |
657 | { |
658 | case SUBREG: |
659 | case REG: |
660 | next = rhs; |
661 | break; |
662 | |
663 | case PLUS: |
664 | case MINUS: |
665 | op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx); |
666 | op1 = XEXP (rhs, 1)(((rhs)->u.fld[1]).rt_rtx); |
667 | |
668 | if (code == PLUS && CONSTANT_P (op0)((rtx_class[(int) (((enum rtx_code) (op0)->code))]) == RTX_CONST_OBJ )) |
669 | std::swap (op0, op1); |
670 | |
671 | if (!simple_reg_p (op0) |
672 | || !CONSTANT_P (op1)((rtx_class[(int) (((enum rtx_code) (op1)->code))]) == RTX_CONST_OBJ )) |
673 | return false; |
674 | |
675 | if (GET_MODE (rhs)((machine_mode) (rhs)->mode) != outer_mode) |
676 | { |
677 | /* ppc64 uses expressions like |
678 | |
679 | (set x:SI (plus:SI (subreg:SI y:DI) 1)). |
680 | |
681 | this is equivalent to |
682 | |
683 | (set x':DI (plus:DI y:DI 1)) |
684 | (set x:SI (subreg:SI (x':DI)). */ |
685 | if (GET_CODE (op0)((enum rtx_code) (op0)->code) != SUBREG) |
686 | return false; |
687 | if (GET_MODE (SUBREG_REG (op0))((machine_mode) ((((op0)->u.fld[0]).rt_rtx))->mode) != outer_mode) |
688 | return false; |
689 | } |
690 | |
691 | next = op0; |
692 | break; |
693 | |
694 | case SIGN_EXTEND: |
695 | case ZERO_EXTEND: |
696 | if (GET_MODE (rhs)((machine_mode) (rhs)->mode) != outer_mode) |
697 | return false; |
698 | |
699 | op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx); |
700 | if (!simple_reg_p (op0)) |
701 | return false; |
702 | |
703 | next = op0; |
704 | break; |
705 | |
706 | default: |
707 | return false; |
708 | } |
709 | |
710 | if (GET_CODE (next)((enum rtx_code) (next)->code) == SUBREG) |
711 | { |
712 | if (!subreg_lowpart_p (next)) |
713 | return false; |
714 | |
715 | nextr = SUBREG_REG (next)(((next)->u.fld[0]).rt_rtx); |
716 | if (GET_MODE (nextr)((machine_mode) (nextr)->mode) != outer_mode) |
717 | return false; |
718 | } |
719 | else |
720 | nextr = next; |
721 | |
722 | res = iv_get_reaching_def (insn, nextr, &next_def); |
723 | |
724 | if (res == GRD_INVALID || res == GRD_INVARIANT) |
725 | return false; |
726 | |
727 | if (res == GRD_MAYBE_BIV) |
728 | { |
729 | if (!rtx_equal_p (nextr, reg)) |
730 | return false; |
731 | |
732 | *inner_step = const0_rtx(const_int_rtx[64]); |
733 | *extend = IV_UNKNOWN_EXTEND; |
734 | *inner_mode = outer_mode; |
735 | *outer_step = const0_rtx(const_int_rtx[64]); |
736 | } |
737 | else if (!get_biv_step_1 (next_def, outer_mode, reg, |
738 | inner_step, inner_mode, extend, |
739 | outer_step)) |
740 | return false; |
741 | |
742 | if (GET_CODE (next)((enum rtx_code) (next)->code) == SUBREG) |
743 | { |
744 | scalar_int_mode amode; |
745 | if (!is_a <scalar_int_mode> (GET_MODE (next)((machine_mode) (next)->mode), &amode) |
746 | || GET_MODE_SIZE (amode) > GET_MODE_SIZE (*inner_mode)) |
747 | return false; |
748 | |
749 | *inner_mode = amode; |
750 | *inner_step = simplify_gen_binary (PLUS, outer_mode, |
751 | *inner_step, *outer_step); |
752 | *outer_step = const0_rtx(const_int_rtx[64]); |
753 | *extend = IV_UNKNOWN_EXTEND; |
754 | } |
755 | |
756 | switch (code) |
757 | { |
758 | case REG: |
759 | case SUBREG: |
760 | break; |
761 | |
762 | case PLUS: |
763 | case MINUS: |
764 | if (*inner_mode == outer_mode |
765 | /* See comment in previous switch. */ |
766 | || GET_MODE (rhs)((machine_mode) (rhs)->mode) != outer_mode) |
767 | *inner_step = simplify_gen_binary (code, outer_mode, |
768 | *inner_step, op1); |
769 | else |
770 | *outer_step = simplify_gen_binary (code, outer_mode, |
771 | *outer_step, op1); |
772 | break; |
773 | |
774 | case SIGN_EXTEND: |
775 | case ZERO_EXTEND: |
776 | gcc_assert (GET_MODE (op0) == *inner_mode((void)(!(((machine_mode) (op0)->mode) == *inner_mode && *extend == IV_UNKNOWN_EXTEND && *outer_step == (const_int_rtx [64])) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 778, __FUNCTION__), 0 : 0)) |
777 | && *extend == IV_UNKNOWN_EXTEND((void)(!(((machine_mode) (op0)->mode) == *inner_mode && *extend == IV_UNKNOWN_EXTEND && *outer_step == (const_int_rtx [64])) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 778, __FUNCTION__), 0 : 0)) |
778 | && *outer_step == const0_rtx)((void)(!(((machine_mode) (op0)->mode) == *inner_mode && *extend == IV_UNKNOWN_EXTEND && *outer_step == (const_int_rtx [64])) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 778, __FUNCTION__), 0 : 0)); |
779 | |
780 | *extend = (code == SIGN_EXTEND) ? IV_SIGN_EXTEND : IV_ZERO_EXTEND; |
781 | break; |
782 | |
783 | default: |
784 | return false; |
785 | } |
786 | |
787 | return true; |
788 | } |
789 | |
790 | /* Gets the operation on register REG inside loop, in shape |
791 | |
792 | OUTER_STEP + EXTEND_{OUTER_MODE} (SUBREG_{INNER_MODE} (REG + INNER_STEP)) |
793 | |
794 | If the operation cannot be described in this shape, return false. |
795 | LAST_DEF is the definition of REG that dominates loop latch. */ |
796 | |
797 | static bool |
798 | get_biv_step (df_ref last_def, scalar_int_mode outer_mode, rtx reg, |
799 | rtx *inner_step, scalar_int_mode *inner_mode, |
800 | enum iv_extend_code *extend, rtx *outer_step) |
801 | { |
802 | if (!get_biv_step_1 (last_def, outer_mode, reg, |
803 | inner_step, inner_mode, extend, |
804 | outer_step)) |
805 | return false; |
806 | |
807 | gcc_assert ((*inner_mode == outer_mode) != (*extend != IV_UNKNOWN_EXTEND))((void)(!((*inner_mode == outer_mode) != (*extend != IV_UNKNOWN_EXTEND )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 807, __FUNCTION__), 0 : 0)); |
808 | gcc_assert (*inner_mode != outer_mode || *outer_step == const0_rtx)((void)(!(*inner_mode != outer_mode || *outer_step == (const_int_rtx [64])) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 808, __FUNCTION__), 0 : 0)); |
809 | |
810 | return true; |
811 | } |
812 | |
813 | /* Records information that DEF is induction variable IV. */ |
814 | |
815 | static void |
816 | record_iv (df_ref def, class rtx_iv *iv) |
817 | { |
818 | class rtx_iv *recorded_iv = XNEW (class rtx_iv)((class rtx_iv *) xmalloc (sizeof (class rtx_iv))); |
819 | |
820 | *recorded_iv = *iv; |
821 | check_iv_ref_table_size (); |
822 | DF_REF_IV_SET (def, recorded_iv)iv_ref_table[((def)->base.id)] = (recorded_iv); |
823 | } |
824 | |
825 | /* If DEF was already analyzed for bivness, store the description of the biv to |
826 | IV and return true. Otherwise return false. */ |
827 | |
828 | static bool |
829 | analyzed_for_bivness_p (rtx def, class rtx_iv *iv) |
830 | { |
831 | class biv_entry *biv = bivs->find_with_hash (def, REGNO (def)(rhs_regno(def))); |
832 | |
833 | if (!biv) |
834 | return false; |
835 | |
836 | *iv = biv->iv; |
837 | return true; |
838 | } |
839 | |
840 | static void |
841 | record_biv (rtx def, class rtx_iv *iv) |
842 | { |
843 | class biv_entry *biv = XNEW (class biv_entry)((class biv_entry *) xmalloc (sizeof (class biv_entry))); |
844 | biv_entry **slot = bivs->find_slot_with_hash (def, REGNO (def)(rhs_regno(def)), INSERT); |
845 | |
846 | biv->regno = REGNO (def)(rhs_regno(def)); |
847 | biv->iv = *iv; |
848 | gcc_assert (!*slot)((void)(!(!*slot) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 848, __FUNCTION__), 0 : 0)); |
849 | *slot = biv; |
850 | } |
851 | |
852 | /* Determines whether DEF is a biv and if so, stores its description |
853 | to *IV. OUTER_MODE is the mode of DEF. */ |
854 | |
855 | static bool |
856 | iv_analyze_biv (scalar_int_mode outer_mode, rtx def, class rtx_iv *iv) |
857 | { |
858 | rtx inner_step, outer_step; |
859 | scalar_int_mode inner_mode; |
860 | enum iv_extend_code extend; |
861 | df_ref last_def; |
862 | |
863 | if (dump_file) |
864 | { |
865 | fprintf (dump_file, "Analyzing "); |
866 | print_rtl (dump_file, def); |
867 | fprintf (dump_file, " for bivness.\n"); |
868 | } |
869 | |
870 | if (!REG_P (def)(((enum rtx_code) (def)->code) == REG)) |
871 | { |
872 | if (!CONSTANT_P (def)((rtx_class[(int) (((enum rtx_code) (def)->code))]) == RTX_CONST_OBJ )) |
873 | return false; |
874 | |
875 | return iv_constant (iv, outer_mode, def); |
876 | } |
877 | |
878 | if (!latch_dominating_def (def, &last_def)) |
879 | { |
880 | if (dump_file) |
881 | fprintf (dump_file, " not simple.\n"); |
882 | return false; |
883 | } |
884 | |
885 | if (!last_def) |
886 | return iv_constant (iv, outer_mode, def); |
887 | |
888 | if (analyzed_for_bivness_p (def, iv)) |
889 | { |
890 | if (dump_file) |
891 | fprintf (dump_file, " already analysed.\n"); |
892 | return iv->base != NULL_RTX(rtx) 0; |
893 | } |
894 | |
895 | if (!get_biv_step (last_def, outer_mode, def, &inner_step, &inner_mode, |
896 | &extend, &outer_step)) |
897 | { |
898 | iv->base = NULL_RTX(rtx) 0; |
899 | goto end; |
900 | } |
901 | |
902 | /* Loop transforms base to es (base + inner_step) + outer_step, |
903 | where es means extend of subreg between inner_mode and outer_mode. |
904 | The corresponding induction variable is |
905 | |
906 | es ((base - outer_step) + i * (inner_step + outer_step)) + outer_step */ |
907 | |
908 | iv->base = simplify_gen_binary (MINUS, outer_mode, def, outer_step); |
909 | iv->step = simplify_gen_binary (PLUS, outer_mode, inner_step, outer_step); |
910 | iv->mode = inner_mode; |
911 | iv->extend_mode = outer_mode; |
912 | iv->extend = extend; |
913 | iv->mult = const1_rtx(const_int_rtx[64 +1]); |
914 | iv->delta = outer_step; |
915 | iv->first_special = inner_mode != outer_mode; |
916 | |
917 | end: |
918 | if (dump_file) |
919 | { |
920 | fprintf (dump_file, " "); |
921 | dump_iv_info (dump_file, iv); |
922 | fprintf (dump_file, "\n"); |
923 | } |
924 | |
925 | record_biv (def, iv); |
926 | return iv->base != NULL_RTX(rtx) 0; |
927 | } |
928 | |
929 | /* Analyzes expression RHS used at INSN and stores the result to *IV. |
930 | The mode of the induction variable is MODE. */ |
931 | |
932 | bool |
933 | iv_analyze_expr (rtx_insn *insn, scalar_int_mode mode, rtx rhs, |
934 | class rtx_iv *iv) |
935 | { |
936 | rtx mby = NULL_RTX(rtx) 0; |
937 | rtx op0 = NULL_RTX(rtx) 0, op1 = NULL_RTX(rtx) 0; |
938 | class rtx_iv iv0, iv1; |
939 | enum rtx_code code = GET_CODE (rhs)((enum rtx_code) (rhs)->code); |
940 | scalar_int_mode omode = mode; |
941 | |
942 | iv->base = NULL_RTX(rtx) 0; |
943 | iv->step = NULL_RTX(rtx) 0; |
944 | |
945 | gcc_assert (GET_MODE (rhs) == mode || GET_MODE (rhs) == VOIDmode)((void)(!(((machine_mode) (rhs)->mode) == mode || ((machine_mode ) (rhs)->mode) == ((void) 0, E_VOIDmode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 945, __FUNCTION__), 0 : 0)); |
946 | |
947 | if (CONSTANT_P (rhs)((rtx_class[(int) (((enum rtx_code) (rhs)->code))]) == RTX_CONST_OBJ ) |
948 | || REG_P (rhs)(((enum rtx_code) (rhs)->code) == REG) |
949 | || code == SUBREG) |
950 | return iv_analyze_op (insn, mode, rhs, iv); |
951 | |
952 | switch (code) |
953 | { |
954 | case REG: |
955 | op0 = rhs; |
956 | break; |
957 | |
958 | case SIGN_EXTEND: |
959 | case ZERO_EXTEND: |
960 | case NEG: |
961 | op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx); |
962 | /* We don't know how many bits there are in a sign-extended constant. */ |
963 | if (!is_a <scalar_int_mode> (GET_MODE (op0)((machine_mode) (op0)->mode), &omode)) |
964 | return false; |
965 | break; |
966 | |
967 | case PLUS: |
968 | case MINUS: |
969 | op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx); |
970 | op1 = XEXP (rhs, 1)(((rhs)->u.fld[1]).rt_rtx); |
971 | break; |
972 | |
973 | case MULT: |
974 | op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx); |
975 | mby = XEXP (rhs, 1)(((rhs)->u.fld[1]).rt_rtx); |
976 | if (!CONSTANT_P (mby)((rtx_class[(int) (((enum rtx_code) (mby)->code))]) == RTX_CONST_OBJ )) |
977 | std::swap (op0, mby); |
978 | if (!CONSTANT_P (mby)((rtx_class[(int) (((enum rtx_code) (mby)->code))]) == RTX_CONST_OBJ )) |
979 | return false; |
980 | break; |
981 | |
982 | case ASHIFT: |
983 | op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx); |
984 | mby = XEXP (rhs, 1)(((rhs)->u.fld[1]).rt_rtx); |
985 | if (!CONSTANT_P (mby)((rtx_class[(int) (((enum rtx_code) (mby)->code))]) == RTX_CONST_OBJ )) |
986 | return false; |
987 | break; |
988 | |
989 | default: |
990 | return false; |
991 | } |
992 | |
993 | if (op0 |
994 | && !iv_analyze_expr (insn, omode, op0, &iv0)) |
995 | return false; |
996 | |
997 | if (op1 |
998 | && !iv_analyze_expr (insn, omode, op1, &iv1)) |
999 | return false; |
1000 | |
1001 | switch (code) |
1002 | { |
1003 | case SIGN_EXTEND: |
1004 | if (!iv_extend (&iv0, IV_SIGN_EXTEND, mode)) |
1005 | return false; |
1006 | break; |
1007 | |
1008 | case ZERO_EXTEND: |
1009 | if (!iv_extend (&iv0, IV_ZERO_EXTEND, mode)) |
1010 | return false; |
1011 | break; |
1012 | |
1013 | case NEG: |
1014 | if (!iv_neg (&iv0)) |
1015 | return false; |
1016 | break; |
1017 | |
1018 | case PLUS: |
1019 | case MINUS: |
1020 | if (!iv_add (&iv0, &iv1, code)) |
1021 | return false; |
1022 | break; |
1023 | |
1024 | case MULT: |
1025 | if (!iv_mult (&iv0, mby)) |
1026 | return false; |
1027 | break; |
1028 | |
1029 | case ASHIFT: |
1030 | if (!iv_shift (&iv0, mby)) |
1031 | return false; |
1032 | break; |
1033 | |
1034 | default: |
1035 | break; |
1036 | } |
1037 | |
1038 | *iv = iv0; |
1039 | return iv->base != NULL_RTX(rtx) 0; |
1040 | } |
1041 | |
1042 | /* Analyzes iv DEF and stores the result to *IV. */ |
1043 | |
1044 | static bool |
1045 | iv_analyze_def (df_ref def, class rtx_iv *iv) |
1046 | { |
1047 | rtx_insn *insn = DF_REF_INSN (def)((def)->base.insn_info->insn); |
1048 | rtx reg = DF_REF_REG (def)((def)->base.reg); |
1049 | rtx set, rhs; |
1050 | |
1051 | if (dump_file) |
1052 | { |
1053 | fprintf (dump_file, "Analyzing def of "); |
1054 | print_rtl (dump_file, reg); |
1055 | fprintf (dump_file, " in insn "); |
1056 | print_rtl_single (dump_file, insn); |
1057 | } |
1058 | |
1059 | check_iv_ref_table_size (); |
1060 | if (DF_REF_IV (def)iv_ref_table[((def)->base.id)]) |
1061 | { |
1062 | if (dump_file) |
1063 | fprintf (dump_file, " already analysed.\n"); |
1064 | *iv = *DF_REF_IV (def)iv_ref_table[((def)->base.id)]; |
1065 | return iv->base != NULL_RTX(rtx) 0; |
1066 | } |
1067 | |
1068 | iv->base = NULL_RTX(rtx) 0; |
1069 | iv->step = NULL_RTX(rtx) 0; |
1070 | |
1071 | scalar_int_mode mode; |
1072 | if (!REG_P (reg)(((enum rtx_code) (reg)->code) == REG) || !is_a <scalar_int_mode> (GET_MODE (reg)((machine_mode) (reg)->mode), &mode)) |
1073 | return false; |
1074 | |
1075 | set = single_set (insn); |
1076 | if (!set) |
1077 | return false; |
1078 | |
1079 | if (!REG_P (SET_DEST (set))(((enum rtx_code) ((((set)->u.fld[0]).rt_rtx))->code) == REG)) |
1080 | return false; |
1081 | |
1082 | gcc_assert (SET_DEST (set) == reg)((void)(!((((set)->u.fld[0]).rt_rtx) == reg) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 1082, __FUNCTION__), 0 : 0)); |
1083 | rhs = find_reg_equal_equiv_note (insn); |
1084 | if (rhs) |
1085 | rhs = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx); |
1086 | else |
1087 | rhs = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
1088 | |
1089 | iv_analyze_expr (insn, mode, rhs, iv); |
1090 | record_iv (def, iv); |
1091 | |
1092 | if (dump_file) |
1093 | { |
1094 | print_rtl (dump_file, reg); |
1095 | fprintf (dump_file, " in insn "); |
1096 | print_rtl_single (dump_file, insn); |
1097 | fprintf (dump_file, " is "); |
1098 | dump_iv_info (dump_file, iv); |
1099 | fprintf (dump_file, "\n"); |
1100 | } |
1101 | |
1102 | return iv->base != NULL_RTX(rtx) 0; |
1103 | } |
1104 | |
1105 | /* Analyzes operand OP of INSN and stores the result to *IV. MODE is the |
1106 | mode of OP. */ |
1107 | |
1108 | static bool |
1109 | iv_analyze_op (rtx_insn *insn, scalar_int_mode mode, rtx op, class rtx_iv *iv) |
1110 | { |
1111 | df_ref def = NULL__null; |
1112 | enum iv_grd_result res; |
1113 | |
1114 | if (dump_file) |
1115 | { |
1116 | fprintf (dump_file, "Analyzing operand "); |
1117 | print_rtl (dump_file, op); |
1118 | fprintf (dump_file, " of insn "); |
1119 | print_rtl_single (dump_file, insn); |
1120 | } |
1121 | |
1122 | if (function_invariant_p (op)) |
1123 | res = GRD_INVARIANT; |
1124 | else if (GET_CODE (op)((enum rtx_code) (op)->code) == SUBREG) |
1125 | { |
1126 | scalar_int_mode inner_mode; |
1127 | if (!subreg_lowpart_p (op) |
1128 | || !is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op))((machine_mode) ((((op)->u.fld[0]).rt_rtx))->mode), &inner_mode)) |
1129 | return false; |
1130 | |
1131 | if (!iv_analyze_op (insn, inner_mode, SUBREG_REG (op)(((op)->u.fld[0]).rt_rtx), iv)) |
1132 | return false; |
1133 | |
1134 | return iv_subreg (iv, mode); |
1135 | } |
1136 | else |
1137 | { |
1138 | res = iv_get_reaching_def (insn, op, &def); |
1139 | if (res == GRD_INVALID) |
1140 | { |
1141 | if (dump_file) |
1142 | fprintf (dump_file, " not simple.\n"); |
1143 | return false; |
1144 | } |
1145 | } |
1146 | |
1147 | if (res == GRD_INVARIANT) |
1148 | { |
1149 | iv_constant (iv, mode, op); |
1150 | |
1151 | if (dump_file) |
1152 | { |
1153 | fprintf (dump_file, " "); |
1154 | dump_iv_info (dump_file, iv); |
1155 | fprintf (dump_file, "\n"); |
1156 | } |
1157 | return true; |
1158 | } |
1159 | |
1160 | if (res == GRD_MAYBE_BIV) |
1161 | return iv_analyze_biv (mode, op, iv); |
1162 | |
1163 | return iv_analyze_def (def, iv); |
1164 | } |
1165 | |
1166 | /* Analyzes value VAL at INSN and stores the result to *IV. MODE is the |
1167 | mode of VAL. */ |
1168 | |
1169 | bool |
1170 | iv_analyze (rtx_insn *insn, scalar_int_mode mode, rtx val, class rtx_iv *iv) |
1171 | { |
1172 | rtx reg; |
1173 | |
1174 | /* We must find the insn in that val is used, so that we get to UD chains. |
1175 | Since the function is sometimes called on result of get_condition, |
1176 | this does not necessarily have to be directly INSN; scan also the |
1177 | following insns. */ |
1178 | if (simple_reg_p (val)) |
1179 | { |
1180 | if (GET_CODE (val)((enum rtx_code) (val)->code) == SUBREG) |
1181 | reg = SUBREG_REG (val)(((val)->u.fld[0]).rt_rtx); |
1182 | else |
1183 | reg = val; |
1184 | |
1185 | while (!df_find_use (insn, reg)) |
1186 | insn = NEXT_INSN (insn); |
1187 | } |
1188 | |
1189 | return iv_analyze_op (insn, mode, val, iv); |
1190 | } |
1191 | |
1192 | /* Analyzes definition of DEF in INSN and stores the result to IV. */ |
1193 | |
1194 | bool |
1195 | iv_analyze_result (rtx_insn *insn, rtx def, class rtx_iv *iv) |
1196 | { |
1197 | df_ref adef; |
1198 | |
1199 | adef = df_find_def (insn, def); |
1200 | if (!adef) |
1201 | return false; |
1202 | |
1203 | return iv_analyze_def (adef, iv); |
1204 | } |
1205 | |
1206 | /* Checks whether definition of register REG in INSN is a basic induction |
1207 | variable. MODE is the mode of REG. |
1208 | |
1209 | IV analysis must have been initialized (via a call to |
1210 | iv_analysis_loop_init) for this function to produce a result. */ |
1211 | |
1212 | bool |
1213 | biv_p (rtx_insn *insn, scalar_int_mode mode, rtx reg) |
1214 | { |
1215 | class rtx_iv iv; |
1216 | df_ref def, last_def; |
1217 | |
1218 | if (!simple_reg_p (reg)) |
1219 | return false; |
1220 | |
1221 | def = df_find_def (insn, reg); |
1222 | gcc_assert (def != NULL)((void)(!(def != __null) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 1222, __FUNCTION__), 0 : 0)); |
1223 | if (!latch_dominating_def (reg, &last_def)) |
1224 | return false; |
1225 | if (last_def != def) |
1226 | return false; |
1227 | |
1228 | if (!iv_analyze_biv (mode, reg, &iv)) |
1229 | return false; |
1230 | |
1231 | return iv.step != const0_rtx(const_int_rtx[64]); |
1232 | } |
1233 | |
1234 | /* Calculates value of IV at ITERATION-th iteration. */ |
1235 | |
1236 | rtx |
1237 | get_iv_value (class rtx_iv *iv, rtx iteration) |
1238 | { |
1239 | rtx val; |
1240 | |
1241 | /* We would need to generate some if_then_else patterns, and so far |
1242 | it is not needed anywhere. */ |
1243 | gcc_assert (!iv->first_special)((void)(!(!iv->first_special) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 1243, __FUNCTION__), 0 : 0)); |
1244 | |
1245 | if (iv->step != const0_rtx(const_int_rtx[64]) && iteration != const0_rtx(const_int_rtx[64])) |
1246 | val = simplify_gen_binary (PLUS, iv->extend_mode, iv->base, |
1247 | simplify_gen_binary (MULT, iv->extend_mode, |
1248 | iv->step, iteration)); |
1249 | else |
1250 | val = iv->base; |
1251 | |
1252 | if (iv->extend_mode == iv->mode) |
1253 | return val; |
1254 | |
1255 | val = lowpart_subreg (iv->mode, val, iv->extend_mode); |
1256 | |
1257 | if (iv->extend == IV_UNKNOWN_EXTEND) |
1258 | return val; |
1259 | |
1260 | val = simplify_gen_unary (iv_extend_to_rtx_code (iv->extend), |
1261 | iv->extend_mode, val, iv->mode); |
1262 | val = simplify_gen_binary (PLUS, iv->extend_mode, iv->delta, |
1263 | simplify_gen_binary (MULT, iv->extend_mode, |
1264 | iv->mult, val)); |
1265 | |
1266 | return val; |
1267 | } |
1268 | |
1269 | /* Free the data for an induction variable analysis. */ |
1270 | |
1271 | void |
1272 | iv_analysis_done (void) |
1273 | { |
1274 | if (!clean_slate) |
1275 | { |
1276 | clear_iv_info (); |
1277 | clean_slate = true; |
1278 | df_finish_pass (true); |
1279 | delete bivs; |
1280 | bivs = NULL__null; |
1281 | free (iv_ref_table); |
1282 | iv_ref_table = NULL__null; |
1283 | iv_ref_table_size = 0; |
1284 | } |
1285 | } |
1286 | |
1287 | /* Computes inverse to X modulo (1 << MOD). */ |
1288 | |
1289 | static uint64_t |
1290 | inverse (uint64_t x, int mod) |
1291 | { |
1292 | uint64_t mask = |
1293 | ((uint64_t) 1 << (mod - 1) << 1) - 1; |
1294 | uint64_t rslt = 1; |
1295 | int i; |
1296 | |
1297 | for (i = 0; i < mod - 1; i++) |
1298 | { |
1299 | rslt = (rslt * x) & mask; |
1300 | x = (x * x) & mask; |
1301 | } |
1302 | |
1303 | return rslt; |
1304 | } |
1305 | |
1306 | /* Checks whether any register in X is in set ALT. */ |
1307 | |
1308 | static bool |
1309 | altered_reg_used (const_rtx x, bitmap alt) |
1310 | { |
1311 | subrtx_iterator::array_type array; |
1312 | FOR_EACH_SUBRTX (iter, array, x, NONCONST)for (subrtx_iterator iter (array, x, rtx_nonconst_subrtx_bounds ); !iter.at_end (); iter.next ()) |
1313 | { |
1314 | const_rtx x = *iter; |
1315 | if (REG_P (x)(((enum rtx_code) (x)->code) == REG) && REGNO_REG_SET_P (alt, REGNO (x))bitmap_bit_p (alt, (rhs_regno(x)))) |
1316 | return true; |
1317 | } |
1318 | return false; |
1319 | } |
1320 | |
1321 | /* Marks registers altered by EXPR in set ALT. */ |
1322 | |
1323 | static void |
1324 | mark_altered (rtx expr, const_rtx by ATTRIBUTE_UNUSED__attribute__ ((__unused__)), void *alt) |
1325 | { |
1326 | if (GET_CODE (expr)((enum rtx_code) (expr)->code) == SUBREG) |
1327 | expr = SUBREG_REG (expr)(((expr)->u.fld[0]).rt_rtx); |
1328 | if (!REG_P (expr)(((enum rtx_code) (expr)->code) == REG)) |
1329 | return; |
1330 | |
1331 | SET_REGNO_REG_SET ((bitmap) alt, REGNO (expr))bitmap_set_bit ((bitmap) alt, (rhs_regno(expr))); |
1332 | } |
1333 | |
1334 | /* Checks whether RHS is simple enough to process. */ |
1335 | |
1336 | static bool |
1337 | simple_rhs_p (rtx rhs) |
1338 | { |
1339 | rtx op0, op1; |
1340 | |
1341 | if (function_invariant_p (rhs) |
1342 | || (REG_P (rhs)(((enum rtx_code) (rhs)->code) == REG) && !HARD_REGISTER_P (rhs)((((rhs_regno(rhs))) < 76)))) |
1343 | return true; |
1344 | |
1345 | switch (GET_CODE (rhs)((enum rtx_code) (rhs)->code)) |
1346 | { |
1347 | case PLUS: |
1348 | case MINUS: |
1349 | case AND: |
1350 | op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx); |
1351 | op1 = XEXP (rhs, 1)(((rhs)->u.fld[1]).rt_rtx); |
1352 | /* Allow reg OP const and reg OP reg. */ |
1353 | if (!(REG_P (op0)(((enum rtx_code) (op0)->code) == REG) && !HARD_REGISTER_P (op0)((((rhs_regno(op0))) < 76))) |
1354 | && !function_invariant_p (op0)) |
1355 | return false; |
1356 | if (!(REG_P (op1)(((enum rtx_code) (op1)->code) == REG) && !HARD_REGISTER_P (op1)((((rhs_regno(op1))) < 76))) |
1357 | && !function_invariant_p (op1)) |
1358 | return false; |
1359 | |
1360 | return true; |
1361 | |
1362 | case ASHIFT: |
1363 | case ASHIFTRT: |
1364 | case LSHIFTRT: |
1365 | case MULT: |
1366 | op0 = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx); |
1367 | op1 = XEXP (rhs, 1)(((rhs)->u.fld[1]).rt_rtx); |
1368 | /* Allow reg OP const. */ |
1369 | if (!(REG_P (op0)(((enum rtx_code) (op0)->code) == REG) && !HARD_REGISTER_P (op0)((((rhs_regno(op0))) < 76)))) |
1370 | return false; |
1371 | if (!function_invariant_p (op1)) |
1372 | return false; |
1373 | |
1374 | return true; |
1375 | |
1376 | default: |
1377 | return false; |
1378 | } |
1379 | } |
1380 | |
1381 | /* If any registers in *EXPR that have a single definition, try to replace |
1382 | them with the known-equivalent values. */ |
1383 | |
1384 | static void |
1385 | replace_single_def_regs (rtx *expr) |
1386 | { |
1387 | subrtx_var_iterator::array_type array; |
1388 | repeat: |
1389 | FOR_EACH_SUBRTX_VAR (iter, array, *expr, NONCONST)for (subrtx_var_iterator iter (array, *expr, rtx_nonconst_subrtx_bounds ); !iter.at_end (); iter.next ()) |
1390 | { |
1391 | rtx x = *iter; |
1392 | if (REG_P (x)(((enum rtx_code) (x)->code) == REG)) |
1393 | if (rtx new_x = df_find_single_def_src (x)) |
1394 | { |
1395 | *expr = simplify_replace_rtx (*expr, x, new_x); |
1396 | goto repeat; |
1397 | } |
1398 | } |
1399 | } |
1400 | |
1401 | /* A subroutine of simplify_using_initial_values, this function examines INSN |
1402 | to see if it contains a suitable set that we can use to make a replacement. |
1403 | If it is suitable, return true and set DEST and SRC to the lhs and rhs of |
1404 | the set; return false otherwise. */ |
1405 | |
1406 | static bool |
1407 | suitable_set_for_replacement (rtx_insn *insn, rtx *dest, rtx *src) |
1408 | { |
1409 | rtx set = single_set (insn); |
1410 | rtx lhs = NULL_RTX(rtx) 0, rhs; |
1411 | |
1412 | if (!set) |
1413 | return false; |
1414 | |
1415 | lhs = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); |
1416 | if (!REG_P (lhs)(((enum rtx_code) (lhs)->code) == REG)) |
1417 | return false; |
1418 | |
1419 | rhs = find_reg_equal_equiv_note (insn); |
1420 | if (rhs) |
1421 | rhs = XEXP (rhs, 0)(((rhs)->u.fld[0]).rt_rtx); |
1422 | else |
1423 | rhs = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
1424 | |
1425 | if (!simple_rhs_p (rhs)) |
1426 | return false; |
1427 | |
1428 | *dest = lhs; |
1429 | *src = rhs; |
1430 | return true; |
1431 | } |
1432 | |
1433 | /* Using the data returned by suitable_set_for_replacement, replace DEST |
1434 | with SRC in *EXPR and return the new expression. Also call |
1435 | replace_single_def_regs if the replacement changed something. */ |
1436 | static void |
1437 | replace_in_expr (rtx *expr, rtx dest, rtx src) |
1438 | { |
1439 | rtx old = *expr; |
1440 | *expr = simplify_replace_rtx (*expr, dest, src); |
1441 | if (old == *expr) |
1442 | return; |
1443 | replace_single_def_regs (expr); |
1444 | } |
1445 | |
1446 | /* Checks whether A implies B. */ |
1447 | |
1448 | static bool |
1449 | implies_p (rtx a, rtx b) |
1450 | { |
1451 | rtx op0, op1, opb0, opb1; |
1452 | machine_mode mode; |
1453 | |
1454 | if (rtx_equal_p (a, b)) |
1455 | return true; |
1456 | |
1457 | if (GET_CODE (a)((enum rtx_code) (a)->code) == EQ) |
1458 | { |
1459 | op0 = XEXP (a, 0)(((a)->u.fld[0]).rt_rtx); |
1460 | op1 = XEXP (a, 1)(((a)->u.fld[1]).rt_rtx); |
1461 | |
1462 | if (REG_P (op0)(((enum rtx_code) (op0)->code) == REG) |
1463 | || (GET_CODE (op0)((enum rtx_code) (op0)->code) == SUBREG |
1464 | && REG_P (SUBREG_REG (op0))(((enum rtx_code) ((((op0)->u.fld[0]).rt_rtx))->code) == REG))) |
1465 | { |
1466 | rtx r = simplify_replace_rtx (b, op0, op1); |
1467 | if (r == const_true_rtx) |
1468 | return true; |
1469 | } |
1470 | |
1471 | if (REG_P (op1)(((enum rtx_code) (op1)->code) == REG) |
1472 | || (GET_CODE (op1)((enum rtx_code) (op1)->code) == SUBREG |
1473 | && REG_P (SUBREG_REG (op1))(((enum rtx_code) ((((op1)->u.fld[0]).rt_rtx))->code) == REG))) |
1474 | { |
1475 | rtx r = simplify_replace_rtx (b, op1, op0); |
1476 | if (r == const_true_rtx) |
1477 | return true; |
1478 | } |
1479 | } |
1480 | |
1481 | if (b == const_true_rtx) |
1482 | return true; |
1483 | |
1484 | if ((GET_RTX_CLASS (GET_CODE (a))(rtx_class[(int) (((enum rtx_code) (a)->code))]) != RTX_COMM_COMPARE |
1485 | && GET_RTX_CLASS (GET_CODE (a))(rtx_class[(int) (((enum rtx_code) (a)->code))]) != RTX_COMPARE) |
1486 | || (GET_RTX_CLASS (GET_CODE (b))(rtx_class[(int) (((enum rtx_code) (b)->code))]) != RTX_COMM_COMPARE |
1487 | && GET_RTX_CLASS (GET_CODE (b))(rtx_class[(int) (((enum rtx_code) (b)->code))]) != RTX_COMPARE)) |
1488 | return false; |
1489 | |
1490 | op0 = XEXP (a, 0)(((a)->u.fld[0]).rt_rtx); |
1491 | op1 = XEXP (a, 1)(((a)->u.fld[1]).rt_rtx); |
1492 | opb0 = XEXP (b, 0)(((b)->u.fld[0]).rt_rtx); |
1493 | opb1 = XEXP (b, 1)(((b)->u.fld[1]).rt_rtx); |
1494 | |
1495 | mode = GET_MODE (op0)((machine_mode) (op0)->mode); |
1496 | if (mode != GET_MODE (opb0)((machine_mode) (opb0)->mode)) |
1497 | mode = VOIDmode((void) 0, E_VOIDmode); |
1498 | else if (mode == VOIDmode((void) 0, E_VOIDmode)) |
1499 | { |
1500 | mode = GET_MODE (op1)((machine_mode) (op1)->mode); |
1501 | if (mode != GET_MODE (opb1)((machine_mode) (opb1)->mode)) |
1502 | mode = VOIDmode((void) 0, E_VOIDmode); |
1503 | } |
1504 | |
1505 | /* A < B implies A + 1 <= B. */ |
1506 | if ((GET_CODE (a)((enum rtx_code) (a)->code) == GT || GET_CODE (a)((enum rtx_code) (a)->code) == LT) |
1507 | && (GET_CODE (b)((enum rtx_code) (b)->code) == GE || GET_CODE (b)((enum rtx_code) (b)->code) == LE)) |
1508 | { |
1509 | |
1510 | if (GET_CODE (a)((enum rtx_code) (a)->code) == GT) |
1511 | std::swap (op0, op1); |
1512 | |
1513 | if (GET_CODE (b)((enum rtx_code) (b)->code) == GE) |
1514 | std::swap (opb0, opb1); |
1515 | |
1516 | if (SCALAR_INT_MODE_P (mode)(((enum mode_class) mode_class[mode]) == MODE_INT || ((enum mode_class ) mode_class[mode]) == MODE_PARTIAL_INT) |
1517 | && rtx_equal_p (op1, opb1) |
1518 | && simplify_gen_binary (MINUS, mode, opb0, op0) == const1_rtx(const_int_rtx[64 +1])) |
1519 | return true; |
1520 | return false; |
1521 | } |
1522 | |
1523 | /* A < B or A > B imply A != B. TODO: Likewise |
1524 | A + n < B implies A != B + n if neither wraps. */ |
1525 | if (GET_CODE (b)((enum rtx_code) (b)->code) == NE |
1526 | && (GET_CODE (a)((enum rtx_code) (a)->code) == GT || GET_CODE (a)((enum rtx_code) (a)->code) == GTU |
1527 | || GET_CODE (a)((enum rtx_code) (a)->code) == LT || GET_CODE (a)((enum rtx_code) (a)->code) == LTU)) |
1528 | { |
1529 | if (rtx_equal_p (op0, opb0) |
1530 | && rtx_equal_p (op1, opb1)) |
1531 | return true; |
1532 | } |
1533 | |
1534 | /* For unsigned comparisons, A != 0 implies A > 0 and A >= 1. */ |
1535 | if (GET_CODE (a)((enum rtx_code) (a)->code) == NE |
1536 | && op1 == const0_rtx(const_int_rtx[64])) |
1537 | { |
1538 | if ((GET_CODE (b)((enum rtx_code) (b)->code) == GTU |
1539 | && opb1 == const0_rtx(const_int_rtx[64])) |
1540 | || (GET_CODE (b)((enum rtx_code) (b)->code) == GEU |
1541 | && opb1 == const1_rtx(const_int_rtx[64 +1]))) |
1542 | return rtx_equal_p (op0, opb0); |
1543 | } |
1544 | |
1545 | /* A != N is equivalent to A - (N + 1) <u -1. */ |
1546 | if (GET_CODE (a)((enum rtx_code) (a)->code) == NE |
1547 | && CONST_INT_P (op1)(((enum rtx_code) (op1)->code) == CONST_INT) |
1548 | && GET_CODE (b)((enum rtx_code) (b)->code) == LTU |
1549 | && opb1 == constm1_rtx(const_int_rtx[64 -1]) |
1550 | && GET_CODE (opb0)((enum rtx_code) (opb0)->code) == PLUS |
1551 | && CONST_INT_P (XEXP (opb0, 1))(((enum rtx_code) ((((opb0)->u.fld[1]).rt_rtx))->code) == CONST_INT) |
1552 | /* Avoid overflows. */ |
1553 | && ((unsigned HOST_WIDE_INTlong) INTVAL (XEXP (opb0, 1))(((((opb0)->u.fld[1]).rt_rtx))->u.hwint[0]) |
1554 | != ((unsigned HOST_WIDE_INTlong)1 |
1555 | << (HOST_BITS_PER_WIDE_INT64 - 1)) - 1) |
1556 | && INTVAL (XEXP (opb0, 1))(((((opb0)->u.fld[1]).rt_rtx))->u.hwint[0]) + 1 == -INTVAL (op1)((op1)->u.hwint[0])) |
1557 | return rtx_equal_p (op0, XEXP (opb0, 0)(((opb0)->u.fld[0]).rt_rtx)); |
1558 | |
1559 | /* Likewise, A != N implies A - N > 0. */ |
1560 | if (GET_CODE (a)((enum rtx_code) (a)->code) == NE |
1561 | && CONST_INT_P (op1)(((enum rtx_code) (op1)->code) == CONST_INT)) |
1562 | { |
1563 | if (GET_CODE (b)((enum rtx_code) (b)->code) == GTU |
1564 | && GET_CODE (opb0)((enum rtx_code) (opb0)->code) == PLUS |
1565 | && opb1 == const0_rtx(const_int_rtx[64]) |
1566 | && CONST_INT_P (XEXP (opb0, 1))(((enum rtx_code) ((((opb0)->u.fld[1]).rt_rtx))->code) == CONST_INT) |
1567 | /* Avoid overflows. */ |
1568 | && ((unsigned HOST_WIDE_INTlong) INTVAL (XEXP (opb0, 1))(((((opb0)->u.fld[1]).rt_rtx))->u.hwint[0]) |
1569 | != (HOST_WIDE_INT_1U1UL << (HOST_BITS_PER_WIDE_INT64 - 1))) |
1570 | && rtx_equal_p (XEXP (opb0, 0)(((opb0)->u.fld[0]).rt_rtx), op0)) |
1571 | return INTVAL (op1)((op1)->u.hwint[0]) == -INTVAL (XEXP (opb0, 1))(((((opb0)->u.fld[1]).rt_rtx))->u.hwint[0]); |
1572 | if (GET_CODE (b)((enum rtx_code) (b)->code) == GEU |
1573 | && GET_CODE (opb0)((enum rtx_code) (opb0)->code) == PLUS |
1574 | && opb1 == const1_rtx(const_int_rtx[64 +1]) |
1575 | && CONST_INT_P (XEXP (opb0, 1))(((enum rtx_code) ((((opb0)->u.fld[1]).rt_rtx))->code) == CONST_INT) |
1576 | /* Avoid overflows. */ |
1577 | && ((unsigned HOST_WIDE_INTlong) INTVAL (XEXP (opb0, 1))(((((opb0)->u.fld[1]).rt_rtx))->u.hwint[0]) |
1578 | != (HOST_WIDE_INT_1U1UL << (HOST_BITS_PER_WIDE_INT64 - 1))) |
1579 | && rtx_equal_p (XEXP (opb0, 0)(((opb0)->u.fld[0]).rt_rtx), op0)) |
1580 | return INTVAL (op1)((op1)->u.hwint[0]) == -INTVAL (XEXP (opb0, 1))(((((opb0)->u.fld[1]).rt_rtx))->u.hwint[0]); |
1581 | } |
1582 | |
1583 | /* A >s X, where X is positive, implies A <u Y, if Y is negative. */ |
1584 | if ((GET_CODE (a)((enum rtx_code) (a)->code) == GT || GET_CODE (a)((enum rtx_code) (a)->code) == GE) |
1585 | && CONST_INT_P (op1)(((enum rtx_code) (op1)->code) == CONST_INT) |
1586 | && ((GET_CODE (a)((enum rtx_code) (a)->code) == GT && op1 == constm1_rtx(const_int_rtx[64 -1])) |
1587 | || INTVAL (op1)((op1)->u.hwint[0]) >= 0) |
1588 | && GET_CODE (b)((enum rtx_code) (b)->code) == LTU |
1589 | && CONST_INT_P (opb1)(((enum rtx_code) (opb1)->code) == CONST_INT) |
1590 | && rtx_equal_p (op0, opb0)) |
1591 | return INTVAL (opb1)((opb1)->u.hwint[0]) < 0; |
1592 | |
1593 | return false; |
1594 | } |
1595 | |
1596 | /* Canonicalizes COND so that |
1597 | |
1598 | (1) Ensure that operands are ordered according to |
1599 | swap_commutative_operands_p. |
1600 | (2) (LE x const) will be replaced with (LT x <const+1>) and similarly |
1601 | for GE, GEU, and LEU. */ |
1602 | |
1603 | rtx |
1604 | canon_condition (rtx cond) |
1605 | { |
1606 | rtx op0, op1; |
1607 | enum rtx_code code; |
1608 | machine_mode mode; |
1609 | |
1610 | code = GET_CODE (cond)((enum rtx_code) (cond)->code); |
1611 | op0 = XEXP (cond, 0)(((cond)->u.fld[0]).rt_rtx); |
1612 | op1 = XEXP (cond, 1)(((cond)->u.fld[1]).rt_rtx); |
1613 | |
1614 | if (swap_commutative_operands_p (op0, op1)) |
1615 | { |
1616 | code = swap_condition (code); |
1617 | std::swap (op0, op1); |
1618 | } |
1619 | |
1620 | mode = GET_MODE (op0)((machine_mode) (op0)->mode); |
1621 | if (mode == VOIDmode((void) 0, E_VOIDmode)) |
1622 | mode = GET_MODE (op1)((machine_mode) (op1)->mode); |
1623 | gcc_assert (mode != VOIDmode)((void)(!(mode != ((void) 0, E_VOIDmode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 1623, __FUNCTION__), 0 : 0)); |
1624 | |
1625 | if (CONST_SCALAR_INT_P (op1)((((enum rtx_code) (op1)->code) == CONST_INT) || (((enum rtx_code ) (op1)->code) == CONST_WIDE_INT)) && GET_MODE_CLASS (mode)((enum mode_class) mode_class[mode]) != MODE_CC) |
1626 | { |
1627 | rtx_mode_t const_val (op1, mode); |
1628 | |
1629 | switch (code) |
1630 | { |
1631 | case LE: |
1632 | if (wi::ne_p (const_val, wi::max_value (mode, SIGNED))) |
1633 | { |
1634 | code = LT; |
1635 | op1 = immed_wide_int_const (wi::add (const_val, 1), mode); |
1636 | } |
1637 | break; |
1638 | |
1639 | case GE: |
1640 | if (wi::ne_p (const_val, wi::min_value (mode, SIGNED))) |
1641 | { |
1642 | code = GT; |
1643 | op1 = immed_wide_int_const (wi::sub (const_val, 1), mode); |
1644 | } |
1645 | break; |
1646 | |
1647 | case LEU: |
1648 | if (wi::ne_p (const_val, -1)) |
1649 | { |
1650 | code = LTU; |
1651 | op1 = immed_wide_int_const (wi::add (const_val, 1), mode); |
1652 | } |
1653 | break; |
1654 | |
1655 | case GEU: |
1656 | if (wi::ne_p (const_val, 0)) |
1657 | { |
1658 | code = GTU; |
1659 | op1 = immed_wide_int_const (wi::sub (const_val, 1), mode); |
1660 | } |
1661 | break; |
1662 | |
1663 | default: |
1664 | break; |
1665 | } |
1666 | } |
1667 | |
1668 | if (op0 != XEXP (cond, 0)(((cond)->u.fld[0]).rt_rtx) |
1669 | || op1 != XEXP (cond, 1)(((cond)->u.fld[1]).rt_rtx) |
1670 | || code != GET_CODE (cond)((enum rtx_code) (cond)->code) |
1671 | || GET_MODE (cond)((machine_mode) (cond)->mode) != SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode))) |
1672 | cond = gen_rtx_fmt_ee (code, SImode, op0, op1)gen_rtx_fmt_ee_stat ((code), ((scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), (op0), (op1) ); |
1673 | |
1674 | return cond; |
1675 | } |
1676 | |
1677 | /* Reverses CONDition; returns NULL if we cannot. */ |
1678 | |
1679 | static rtx |
1680 | reversed_condition (rtx cond) |
1681 | { |
1682 | enum rtx_code reversed; |
1683 | reversed = reversed_comparison_code (cond, NULL__null); |
1684 | if (reversed == UNKNOWN) |
1685 | return NULL_RTX(rtx) 0; |
1686 | else |
1687 | return gen_rtx_fmt_ee (reversed,gen_rtx_fmt_ee_stat ((reversed), (((machine_mode) (cond)-> mode)), ((((cond)->u.fld[0]).rt_rtx)), ((((cond)->u.fld [1]).rt_rtx)) ) |
1688 | GET_MODE (cond), XEXP (cond, 0),gen_rtx_fmt_ee_stat ((reversed), (((machine_mode) (cond)-> mode)), ((((cond)->u.fld[0]).rt_rtx)), ((((cond)->u.fld [1]).rt_rtx)) ) |
1689 | XEXP (cond, 1))gen_rtx_fmt_ee_stat ((reversed), (((machine_mode) (cond)-> mode)), ((((cond)->u.fld[0]).rt_rtx)), ((((cond)->u.fld [1]).rt_rtx)) ); |
1690 | } |
1691 | |
1692 | /* Tries to use the fact that COND holds to simplify EXPR. ALTERED is the |
1693 | set of altered regs. */ |
1694 | |
1695 | void |
1696 | simplify_using_condition (rtx cond, rtx *expr, regset altered) |
1697 | { |
1698 | rtx rev, reve, exp = *expr; |
1699 | |
1700 | /* If some register gets altered later, we do not really speak about its |
1701 | value at the time of comparison. */ |
1702 | if (altered && altered_reg_used (cond, altered)) |
1703 | return; |
1704 | |
1705 | if (GET_CODE (cond)((enum rtx_code) (cond)->code) == EQ |
1706 | && REG_P (XEXP (cond, 0))(((enum rtx_code) ((((cond)->u.fld[0]).rt_rtx))->code) == REG) && CONSTANT_P (XEXP (cond, 1))((rtx_class[(int) (((enum rtx_code) ((((cond)->u.fld[1]).rt_rtx ))->code))]) == RTX_CONST_OBJ)) |
1707 | { |
1708 | *expr = simplify_replace_rtx (*expr, XEXP (cond, 0)(((cond)->u.fld[0]).rt_rtx), XEXP (cond, 1)(((cond)->u.fld[1]).rt_rtx)); |
1709 | return; |
1710 | } |
1711 | |
1712 | if (!COMPARISON_P (exp)(((rtx_class[(int) (((enum rtx_code) (exp)->code))]) & (~1)) == (RTX_COMPARE & (~1)))) |
1713 | return; |
1714 | |
1715 | rev = reversed_condition (cond); |
1716 | reve = reversed_condition (exp); |
1717 | |
1718 | cond = canon_condition (cond); |
1719 | exp = canon_condition (exp); |
1720 | if (rev) |
1721 | rev = canon_condition (rev); |
1722 | if (reve) |
1723 | reve = canon_condition (reve); |
1724 | |
1725 | if (rtx_equal_p (exp, cond)) |
1726 | { |
1727 | *expr = const_true_rtx; |
1728 | return; |
1729 | } |
1730 | |
1731 | if (rev && rtx_equal_p (exp, rev)) |
1732 | { |
1733 | *expr = const0_rtx(const_int_rtx[64]); |
1734 | return; |
1735 | } |
1736 | |
1737 | if (implies_p (cond, exp)) |
1738 | { |
1739 | *expr = const_true_rtx; |
1740 | return; |
1741 | } |
1742 | |
1743 | if (reve && implies_p (cond, reve)) |
1744 | { |
1745 | *expr = const0_rtx(const_int_rtx[64]); |
1746 | return; |
1747 | } |
1748 | |
1749 | /* A proof by contradiction. If *EXPR implies (not cond), *EXPR must |
1750 | be false. */ |
1751 | if (rev && implies_p (exp, rev)) |
1752 | { |
1753 | *expr = const0_rtx(const_int_rtx[64]); |
1754 | return; |
1755 | } |
1756 | |
1757 | /* Similarly, If (not *EXPR) implies (not cond), *EXPR must be true. */ |
1758 | if (rev && reve && implies_p (reve, rev)) |
1759 | { |
1760 | *expr = const_true_rtx; |
1761 | return; |
1762 | } |
1763 | |
1764 | /* We would like to have some other tests here. TODO. */ |
1765 | |
1766 | return; |
1767 | } |
1768 | |
1769 | /* Use relationship between A and *B to eventually eliminate *B. |
1770 | OP is the operation we consider. */ |
1771 | |
1772 | static void |
1773 | eliminate_implied_condition (enum rtx_code op, rtx a, rtx *b) |
1774 | { |
1775 | switch (op) |
1776 | { |
1777 | case AND: |
1778 | /* If A implies *B, we may replace *B by true. */ |
1779 | if (implies_p (a, *b)) |
1780 | *b = const_true_rtx; |
1781 | break; |
1782 | |
1783 | case IOR: |
1784 | /* If *B implies A, we may replace *B by false. */ |
1785 | if (implies_p (*b, a)) |
1786 | *b = const0_rtx(const_int_rtx[64]); |
1787 | break; |
1788 | |
1789 | default: |
1790 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 1790, __FUNCTION__)); |
1791 | } |
1792 | } |
1793 | |
1794 | /* Eliminates the conditions in TAIL that are implied by HEAD. OP is the |
1795 | operation we consider. */ |
1796 | |
1797 | static void |
1798 | eliminate_implied_conditions (enum rtx_code op, rtx *head, rtx tail) |
1799 | { |
1800 | rtx elt; |
1801 | |
1802 | for (elt = tail; elt; elt = XEXP (elt, 1)(((elt)->u.fld[1]).rt_rtx)) |
1803 | eliminate_implied_condition (op, *head, &XEXP (elt, 0)(((elt)->u.fld[0]).rt_rtx)); |
1804 | for (elt = tail; elt; elt = XEXP (elt, 1)(((elt)->u.fld[1]).rt_rtx)) |
1805 | eliminate_implied_condition (op, XEXP (elt, 0)(((elt)->u.fld[0]).rt_rtx), head); |
1806 | } |
1807 | |
1808 | /* Simplifies *EXPR using initial values at the start of the LOOP. If *EXPR |
1809 | is a list, its elements are assumed to be combined using OP. */ |
1810 | |
1811 | static void |
1812 | simplify_using_initial_values (class loop *loop, enum rtx_code op, rtx *expr) |
1813 | { |
1814 | bool expression_valid; |
1815 | rtx head, tail, last_valid_expr; |
1816 | rtx_expr_list *cond_list; |
1817 | rtx_insn *insn; |
1818 | rtx neutral, aggr; |
1819 | regset altered, this_altered; |
1820 | edge e; |
1821 | |
1822 | if (!*expr) |
1823 | return; |
1824 | |
1825 | if (CONSTANT_P (*expr)((rtx_class[(int) (((enum rtx_code) (*expr)->code))]) == RTX_CONST_OBJ )) |
1826 | return; |
1827 | |
1828 | if (GET_CODE (*expr)((enum rtx_code) (*expr)->code) == EXPR_LIST) |
1829 | { |
1830 | head = XEXP (*expr, 0)(((*expr)->u.fld[0]).rt_rtx); |
1831 | tail = XEXP (*expr, 1)(((*expr)->u.fld[1]).rt_rtx); |
1832 | |
1833 | eliminate_implied_conditions (op, &head, tail); |
1834 | |
1835 | switch (op) |
1836 | { |
1837 | case AND: |
1838 | neutral = const_true_rtx; |
1839 | aggr = const0_rtx(const_int_rtx[64]); |
1840 | break; |
1841 | |
1842 | case IOR: |
1843 | neutral = const0_rtx(const_int_rtx[64]); |
1844 | aggr = const_true_rtx; |
1845 | break; |
1846 | |
1847 | default: |
1848 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 1848, __FUNCTION__)); |
1849 | } |
1850 | |
1851 | simplify_using_initial_values (loop, UNKNOWN, &head); |
1852 | if (head == aggr) |
1853 | { |
1854 | XEXP (*expr, 0)(((*expr)->u.fld[0]).rt_rtx) = aggr; |
1855 | XEXP (*expr, 1)(((*expr)->u.fld[1]).rt_rtx) = NULL_RTX(rtx) 0; |
1856 | return; |
1857 | } |
1858 | else if (head == neutral) |
1859 | { |
1860 | *expr = tail; |
1861 | simplify_using_initial_values (loop, op, expr); |
1862 | return; |
1863 | } |
1864 | simplify_using_initial_values (loop, op, &tail); |
1865 | |
1866 | if (tail && XEXP (tail, 0)(((tail)->u.fld[0]).rt_rtx) == aggr) |
1867 | { |
1868 | *expr = tail; |
1869 | return; |
1870 | } |
1871 | |
1872 | XEXP (*expr, 0)(((*expr)->u.fld[0]).rt_rtx) = head; |
1873 | XEXP (*expr, 1)(((*expr)->u.fld[1]).rt_rtx) = tail; |
1874 | return; |
1875 | } |
1876 | |
1877 | gcc_assert (op == UNKNOWN)((void)(!(op == UNKNOWN) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 1877, __FUNCTION__), 0 : 0)); |
1878 | |
1879 | replace_single_def_regs (expr); |
1880 | if (CONSTANT_P (*expr)((rtx_class[(int) (((enum rtx_code) (*expr)->code))]) == RTX_CONST_OBJ )) |
1881 | return; |
1882 | |
1883 | e = loop_preheader_edge (loop); |
1884 | if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)) |
1885 | return; |
1886 | |
1887 | altered = ALLOC_REG_SET (®_obstack)bitmap_alloc (®_obstack); |
1888 | this_altered = ALLOC_REG_SET (®_obstack)bitmap_alloc (®_obstack); |
1889 | |
1890 | expression_valid = true; |
1891 | last_valid_expr = *expr; |
1892 | cond_list = NULL__null; |
1893 | while (1) |
1894 | { |
1895 | insn = BB_END (e->src)(e->src)->il.x.rtl->end_; |
1896 | if (any_condjump_p (insn) && onlyjump_p (insn)) |
1897 | { |
1898 | rtx cond = get_condition (BB_END (e->src)(e->src)->il.x.rtl->end_, NULL__null, false, true); |
1899 | |
1900 | if (cond && (e->flags & EDGE_FALLTHRU)) |
1901 | cond = reversed_condition (cond); |
1902 | if (cond) |
1903 | { |
1904 | rtx old = *expr; |
1905 | simplify_using_condition (cond, expr, altered); |
1906 | if (old != *expr) |
1907 | { |
1908 | rtx note; |
1909 | if (CONSTANT_P (*expr)((rtx_class[(int) (((enum rtx_code) (*expr)->code))]) == RTX_CONST_OBJ )) |
1910 | goto out; |
1911 | for (note = cond_list; note; note = XEXP (note, 1)(((note)->u.fld[1]).rt_rtx)) |
1912 | { |
1913 | simplify_using_condition (XEXP (note, 0)(((note)->u.fld[0]).rt_rtx), expr, altered); |
1914 | if (CONSTANT_P (*expr)((rtx_class[(int) (((enum rtx_code) (*expr)->code))]) == RTX_CONST_OBJ )) |
1915 | goto out; |
1916 | } |
1917 | } |
1918 | cond_list = alloc_EXPR_LIST (0, cond, cond_list); |
1919 | } |
1920 | } |
1921 | |
1922 | FOR_BB_INSNS_REVERSE (e->src, insn)for ((insn) = (e->src)->il.x.rtl->end_; (insn) && (insn) != PREV_INSN ((e->src)->il.x.head_); (insn) = PREV_INSN (insn)) |
1923 | { |
1924 | rtx src, dest; |
1925 | rtx old = *expr; |
1926 | |
1927 | if (!INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
1928 | continue; |
1929 | |
1930 | CLEAR_REG_SET (this_altered)bitmap_clear (this_altered); |
1931 | note_stores (insn, mark_altered, this_altered); |
1932 | if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)) |
1933 | { |
1934 | /* Kill all registers that might be clobbered by the call. |
1935 | We don't track modes of hard registers, so we need to be |
1936 | conservative and assume that partial kills are full kills. */ |
1937 | function_abi callee_abi = insn_callee_abi (insn); |
1938 | IOR_REG_SET_HRS (this_altered,bitmap_ior_into (this_altered, bitmap_view<HARD_REG_SET> (callee_abi.full_and_partial_reg_clobbers ())) |
1939 | callee_abi.full_and_partial_reg_clobbers ())bitmap_ior_into (this_altered, bitmap_view<HARD_REG_SET> (callee_abi.full_and_partial_reg_clobbers ())); |
1940 | } |
1941 | |
1942 | if (suitable_set_for_replacement (insn, &dest, &src)) |
1943 | { |
1944 | rtx_expr_list **pnote, **pnote_next; |
1945 | |
1946 | replace_in_expr (expr, dest, src); |
1947 | if (CONSTANT_P (*expr)((rtx_class[(int) (((enum rtx_code) (*expr)->code))]) == RTX_CONST_OBJ )) |
1948 | goto out; |
1949 | |
1950 | for (pnote = &cond_list; *pnote; pnote = pnote_next) |
1951 | { |
1952 | rtx_expr_list *note = *pnote; |
1953 | rtx old_cond = XEXP (note, 0)(((note)->u.fld[0]).rt_rtx); |
1954 | |
1955 | pnote_next = (rtx_expr_list **)&XEXP (note, 1)(((note)->u.fld[1]).rt_rtx); |
1956 | replace_in_expr (&XEXP (note, 0)(((note)->u.fld[0]).rt_rtx), dest, src); |
1957 | |
1958 | /* We can no longer use a condition that has been simplified |
1959 | to a constant, and simplify_using_condition will abort if |
1960 | we try. */ |
1961 | if (CONSTANT_P (XEXP (note, 0))((rtx_class[(int) (((enum rtx_code) ((((note)->u.fld[0]).rt_rtx ))->code))]) == RTX_CONST_OBJ)) |
1962 | { |
1963 | *pnote = *pnote_next; |
1964 | pnote_next = pnote; |
1965 | free_EXPR_LIST_node (note); |
1966 | } |
1967 | /* Retry simplifications with this condition if either the |
1968 | expression or the condition changed. */ |
1969 | else if (old_cond != XEXP (note, 0)(((note)->u.fld[0]).rt_rtx) || old != *expr) |
1970 | simplify_using_condition (XEXP (note, 0)(((note)->u.fld[0]).rt_rtx), expr, altered); |
1971 | } |
1972 | } |
1973 | else |
1974 | { |
1975 | rtx_expr_list **pnote, **pnote_next; |
1976 | |
1977 | /* If we did not use this insn to make a replacement, any overlap |
1978 | between stores in this insn and our expression will cause the |
1979 | expression to become invalid. */ |
1980 | if (altered_reg_used (*expr, this_altered)) |
1981 | goto out; |
1982 | |
1983 | /* Likewise for the conditions. */ |
1984 | for (pnote = &cond_list; *pnote; pnote = pnote_next) |
1985 | { |
1986 | rtx_expr_list *note = *pnote; |
1987 | rtx old_cond = XEXP (note, 0)(((note)->u.fld[0]).rt_rtx); |
1988 | |
1989 | pnote_next = (rtx_expr_list **)&XEXP (note, 1)(((note)->u.fld[1]).rt_rtx); |
1990 | if (altered_reg_used (old_cond, this_altered)) |
1991 | { |
1992 | *pnote = *pnote_next; |
1993 | pnote_next = pnote; |
1994 | free_EXPR_LIST_node (note); |
1995 | } |
1996 | } |
1997 | } |
1998 | |
1999 | if (CONSTANT_P (*expr)((rtx_class[(int) (((enum rtx_code) (*expr)->code))]) == RTX_CONST_OBJ )) |
2000 | goto out; |
2001 | |
2002 | IOR_REG_SET (altered, this_altered)bitmap_ior_into (altered, this_altered); |
2003 | |
2004 | /* If the expression now contains regs that have been altered, we |
2005 | can't return it to the caller. However, it is still valid for |
2006 | further simplification, so keep searching to see if we can |
2007 | eventually turn it into a constant. */ |
2008 | if (altered_reg_used (*expr, altered)) |
2009 | expression_valid = false; |
2010 | if (expression_valid) |
2011 | last_valid_expr = *expr; |
2012 | } |
2013 | |
2014 | if (!single_pred_p (e->src) |
2015 | || single_pred (e->src) == ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr)) |
2016 | break; |
2017 | e = single_pred_edge (e->src); |
2018 | } |
2019 | |
2020 | out: |
2021 | free_EXPR_LIST_list (&cond_list); |
2022 | if (!CONSTANT_P (*expr)((rtx_class[(int) (((enum rtx_code) (*expr)->code))]) == RTX_CONST_OBJ )) |
2023 | *expr = last_valid_expr; |
2024 | FREE_REG_SET (altered)((void) (bitmap_obstack_free ((bitmap) altered), (altered) = ( bitmap) __null)); |
2025 | FREE_REG_SET (this_altered)((void) (bitmap_obstack_free ((bitmap) this_altered), (this_altered ) = (bitmap) __null)); |
2026 | } |
2027 | |
2028 | /* Transforms invariant IV into MODE. Adds assumptions based on the fact |
2029 | that IV occurs as left operands of comparison COND and its signedness |
2030 | is SIGNED_P to DESC. */ |
2031 | |
2032 | static void |
2033 | shorten_into_mode (class rtx_iv *iv, scalar_int_mode mode, |
2034 | enum rtx_code cond, bool signed_p, class niter_desc *desc) |
2035 | { |
2036 | rtx mmin, mmax, cond_over, cond_under; |
2037 | |
2038 | get_mode_bounds (mode, signed_p, iv->extend_mode, &mmin, &mmax); |
2039 | cond_under = simplify_gen_relational (LT, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), iv->extend_mode, |
2040 | iv->base, mmin); |
2041 | cond_over = simplify_gen_relational (GT, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), iv->extend_mode, |
2042 | iv->base, mmax); |
2043 | |
2044 | switch (cond) |
2045 | { |
2046 | case LE: |
2047 | case LT: |
2048 | case LEU: |
2049 | case LTU: |
2050 | if (cond_under != const0_rtx(const_int_rtx[64])) |
2051 | desc->infinite = |
2052 | alloc_EXPR_LIST (0, cond_under, desc->infinite); |
2053 | if (cond_over != const0_rtx(const_int_rtx[64])) |
2054 | desc->noloop_assumptions = |
2055 | alloc_EXPR_LIST (0, cond_over, desc->noloop_assumptions); |
2056 | break; |
2057 | |
2058 | case GE: |
2059 | case GT: |
2060 | case GEU: |
2061 | case GTU: |
2062 | if (cond_over != const0_rtx(const_int_rtx[64])) |
2063 | desc->infinite = |
2064 | alloc_EXPR_LIST (0, cond_over, desc->infinite); |
2065 | if (cond_under != const0_rtx(const_int_rtx[64])) |
2066 | desc->noloop_assumptions = |
2067 | alloc_EXPR_LIST (0, cond_under, desc->noloop_assumptions); |
2068 | break; |
2069 | |
2070 | case NE: |
2071 | if (cond_over != const0_rtx(const_int_rtx[64])) |
2072 | desc->infinite = |
2073 | alloc_EXPR_LIST (0, cond_over, desc->infinite); |
2074 | if (cond_under != const0_rtx(const_int_rtx[64])) |
2075 | desc->infinite = |
2076 | alloc_EXPR_LIST (0, cond_under, desc->infinite); |
2077 | break; |
2078 | |
2079 | default: |
2080 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 2080, __FUNCTION__)); |
2081 | } |
2082 | |
2083 | iv->mode = mode; |
2084 | iv->extend = signed_p ? IV_SIGN_EXTEND : IV_ZERO_EXTEND; |
2085 | } |
2086 | |
2087 | /* Transforms IV0 and IV1 compared by COND so that they are both compared as |
2088 | subregs of the same mode if possible (sometimes it is necessary to add |
2089 | some assumptions to DESC). */ |
2090 | |
2091 | static bool |
2092 | canonicalize_iv_subregs (class rtx_iv *iv0, class rtx_iv *iv1, |
2093 | enum rtx_code cond, class niter_desc *desc) |
2094 | { |
2095 | scalar_int_mode comp_mode; |
2096 | bool signed_p; |
2097 | |
2098 | /* If the ivs behave specially in the first iteration, or are |
2099 | added/multiplied after extending, we ignore them. */ |
2100 | if (iv0->first_special || iv0->mult != const1_rtx(const_int_rtx[64 +1]) || iv0->delta != const0_rtx(const_int_rtx[64])) |
2101 | return false; |
2102 | if (iv1->first_special || iv1->mult != const1_rtx(const_int_rtx[64 +1]) || iv1->delta != const0_rtx(const_int_rtx[64])) |
2103 | return false; |
2104 | |
2105 | /* If there is some extend, it must match signedness of the comparison. */ |
2106 | switch (cond) |
2107 | { |
2108 | case LE: |
2109 | case LT: |
2110 | if (iv0->extend == IV_ZERO_EXTEND |
2111 | || iv1->extend == IV_ZERO_EXTEND) |
2112 | return false; |
2113 | signed_p = true; |
2114 | break; |
2115 | |
2116 | case LEU: |
2117 | case LTU: |
2118 | if (iv0->extend == IV_SIGN_EXTEND |
2119 | || iv1->extend == IV_SIGN_EXTEND) |
2120 | return false; |
2121 | signed_p = false; |
2122 | break; |
2123 | |
2124 | case NE: |
2125 | if (iv0->extend != IV_UNKNOWN_EXTEND |
2126 | && iv1->extend != IV_UNKNOWN_EXTEND |
2127 | && iv0->extend != iv1->extend) |
2128 | return false; |
2129 | |
2130 | signed_p = false; |
2131 | if (iv0->extend != IV_UNKNOWN_EXTEND) |
2132 | signed_p = iv0->extend == IV_SIGN_EXTEND; |
2133 | if (iv1->extend != IV_UNKNOWN_EXTEND) |
2134 | signed_p = iv1->extend == IV_SIGN_EXTEND; |
2135 | break; |
2136 | |
2137 | default: |
2138 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 2138, __FUNCTION__)); |
2139 | } |
2140 | |
2141 | /* Values of both variables should be computed in the same mode. These |
2142 | might indeed be different, if we have comparison like |
2143 | |
2144 | (compare (subreg:SI (iv0)) (subreg:SI (iv1))) |
2145 | |
2146 | and iv0 and iv1 are both ivs iterating in SI mode, but calculated |
2147 | in different modes. This does not seem impossible to handle, but |
2148 | it hardly ever occurs in practice. |
2149 | |
2150 | The only exception is the case when one of operands is invariant. |
2151 | For example pentium 3 generates comparisons like |
2152 | (lt (subreg:HI (reg:SI)) 100). Here we assign HImode to 100, but we |
2153 | definitely do not want this prevent the optimization. */ |
2154 | comp_mode = iv0->extend_mode; |
2155 | if (GET_MODE_BITSIZE (comp_mode) < GET_MODE_BITSIZE (iv1->extend_mode)) |
2156 | comp_mode = iv1->extend_mode; |
2157 | |
2158 | if (iv0->extend_mode != comp_mode) |
2159 | { |
2160 | if (iv0->mode != iv0->extend_mode |
2161 | || iv0->step != const0_rtx(const_int_rtx[64])) |
2162 | return false; |
2163 | |
2164 | iv0->base = simplify_gen_unary (signed_p ? SIGN_EXTEND : ZERO_EXTEND, |
2165 | comp_mode, iv0->base, iv0->mode); |
2166 | iv0->extend_mode = comp_mode; |
2167 | } |
2168 | |
2169 | if (iv1->extend_mode != comp_mode) |
2170 | { |
2171 | if (iv1->mode != iv1->extend_mode |
2172 | || iv1->step != const0_rtx(const_int_rtx[64])) |
2173 | return false; |
2174 | |
2175 | iv1->base = simplify_gen_unary (signed_p ? SIGN_EXTEND : ZERO_EXTEND, |
2176 | comp_mode, iv1->base, iv1->mode); |
2177 | iv1->extend_mode = comp_mode; |
2178 | } |
2179 | |
2180 | /* Check that both ivs belong to a range of a single mode. If one of the |
2181 | operands is an invariant, we may need to shorten it into the common |
2182 | mode. */ |
2183 | if (iv0->mode == iv0->extend_mode |
2184 | && iv0->step == const0_rtx(const_int_rtx[64]) |
2185 | && iv0->mode != iv1->mode) |
2186 | shorten_into_mode (iv0, iv1->mode, cond, signed_p, desc); |
2187 | |
2188 | if (iv1->mode == iv1->extend_mode |
2189 | && iv1->step == const0_rtx(const_int_rtx[64]) |
2190 | && iv0->mode != iv1->mode) |
2191 | shorten_into_mode (iv1, iv0->mode, swap_condition (cond), signed_p, desc); |
2192 | |
2193 | if (iv0->mode != iv1->mode) |
2194 | return false; |
2195 | |
2196 | desc->mode = iv0->mode; |
2197 | desc->signed_p = signed_p; |
2198 | |
2199 | return true; |
2200 | } |
2201 | |
2202 | /* Tries to estimate the maximum number of iterations in LOOP, and return the |
2203 | result. This function is called from iv_number_of_iterations with |
2204 | a number of fields in DESC already filled in. OLD_NITER is the original |
2205 | expression for the number of iterations, before we tried to simplify it. */ |
2206 | |
2207 | static uint64_t |
2208 | determine_max_iter (class loop *loop, class niter_desc *desc, rtx old_niter) |
2209 | { |
2210 | rtx niter = desc->niter_expr; |
2211 | rtx mmin, mmax, cmp; |
2212 | uint64_t nmax, inc; |
2213 | uint64_t andmax = 0; |
2214 | |
2215 | /* We used to look for constant operand 0 of AND, |
2216 | but canonicalization should always make this impossible. */ |
2217 | gcc_checking_assert (GET_CODE (niter) != AND((void)(!(((enum rtx_code) (niter)->code) != AND || !(((enum rtx_code) ((((niter)->u.fld[0]).rt_rtx))->code) == CONST_INT )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 2218, __FUNCTION__), 0 : 0)) |
2218 | || !CONST_INT_P (XEXP (niter, 0)))((void)(!(((enum rtx_code) (niter)->code) != AND || !(((enum rtx_code) ((((niter)->u.fld[0]).rt_rtx))->code) == CONST_INT )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 2218, __FUNCTION__), 0 : 0)); |
2219 | |
2220 | if (GET_CODE (niter)((enum rtx_code) (niter)->code) == AND |
2221 | && CONST_INT_P (XEXP (niter, 1))(((enum rtx_code) ((((niter)->u.fld[1]).rt_rtx))->code) == CONST_INT)) |
2222 | { |
2223 | andmax = UINTVAL (XEXP (niter, 1))((unsigned long) (((((niter)->u.fld[1]).rt_rtx))->u.hwint [0])); |
2224 | niter = XEXP (niter, 0)(((niter)->u.fld[0]).rt_rtx); |
2225 | } |
2226 | |
2227 | get_mode_bounds (desc->mode, desc->signed_p, desc->mode, &mmin, &mmax); |
2228 | nmax = UINTVAL (mmax)((unsigned long) ((mmax)->u.hwint[0])) - UINTVAL (mmin)((unsigned long) ((mmin)->u.hwint[0])); |
2229 | |
2230 | if (GET_CODE (niter)((enum rtx_code) (niter)->code) == UDIV) |
2231 | { |
2232 | if (!CONST_INT_P (XEXP (niter, 1))(((enum rtx_code) ((((niter)->u.fld[1]).rt_rtx))->code) == CONST_INT)) |
2233 | return nmax; |
2234 | inc = INTVAL (XEXP (niter, 1))(((((niter)->u.fld[1]).rt_rtx))->u.hwint[0]); |
2235 | niter = XEXP (niter, 0)(((niter)->u.fld[0]).rt_rtx); |
2236 | } |
2237 | else |
2238 | inc = 1; |
2239 | |
2240 | /* We could use a binary search here, but for now improving the upper |
2241 | bound by just one eliminates one important corner case. */ |
2242 | cmp = simplify_gen_relational (desc->signed_p ? LT : LTU, VOIDmode((void) 0, E_VOIDmode), |
2243 | desc->mode, old_niter, mmax); |
2244 | simplify_using_initial_values (loop, UNKNOWN, &cmp); |
2245 | if (cmp == const_true_rtx) |
2246 | { |
2247 | nmax--; |
2248 | |
2249 | if (dump_file) |
2250 | fprintf (dump_file, ";; improved upper bound by one.\n"); |
2251 | } |
2252 | nmax /= inc; |
2253 | if (andmax) |
2254 | nmax = MIN (nmax, andmax)((nmax) < (andmax) ? (nmax) : (andmax)); |
2255 | if (dump_file) |
2256 | fprintf (dump_file, ";; Determined upper bound %" PRId64"l" "d"".\n", |
2257 | nmax); |
2258 | return nmax; |
2259 | } |
2260 | |
2261 | /* Computes number of iterations of the CONDITION in INSN in LOOP and stores |
2262 | the result into DESC. Very similar to determine_number_of_iterations |
2263 | (basically its rtl version), complicated by things like subregs. */ |
2264 | |
2265 | static void |
2266 | iv_number_of_iterations (class loop *loop, rtx_insn *insn, rtx condition, |
2267 | class niter_desc *desc) |
2268 | { |
2269 | rtx op0, op1, delta, step, bound, may_xform, tmp, tmp0, tmp1; |
2270 | class rtx_iv iv0, iv1; |
2271 | rtx assumption, may_not_xform; |
2272 | enum rtx_code cond; |
2273 | machine_mode nonvoid_mode; |
2274 | scalar_int_mode comp_mode; |
2275 | rtx mmin, mmax, mode_mmin, mode_mmax; |
2276 | uint64_t s, size, d, inv, max, up, down; |
2277 | int64_t inc, step_val; |
2278 | int was_sharp = false; |
2279 | rtx old_niter; |
2280 | bool step_is_pow2; |
2281 | |
2282 | /* The meaning of these assumptions is this: |
2283 | if !assumptions |
2284 | then the rest of information does not have to be valid |
2285 | if noloop_assumptions then the loop does not roll |
2286 | if infinite then this exit is never used */ |
2287 | |
2288 | desc->assumptions = NULL_RTX(rtx) 0; |
2289 | desc->noloop_assumptions = NULL_RTX(rtx) 0; |
2290 | desc->infinite = NULL_RTX(rtx) 0; |
2291 | desc->simple_p = true; |
2292 | |
2293 | desc->const_iter = false; |
2294 | desc->niter_expr = NULL_RTX(rtx) 0; |
2295 | |
2296 | cond = GET_CODE (condition)((enum rtx_code) (condition)->code); |
2297 | gcc_assert (COMPARISON_P (condition))((void)(!((((rtx_class[(int) (((enum rtx_code) (condition)-> code))]) & (~1)) == (RTX_COMPARE & (~1)))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 2297, __FUNCTION__), 0 : 0)); |
2298 | |
2299 | nonvoid_mode = GET_MODE (XEXP (condition, 0))((machine_mode) ((((condition)->u.fld[0]).rt_rtx))->mode ); |
2300 | if (nonvoid_mode == VOIDmode((void) 0, E_VOIDmode)) |
2301 | nonvoid_mode = GET_MODE (XEXP (condition, 1))((machine_mode) ((((condition)->u.fld[1]).rt_rtx))->mode ); |
2302 | /* The constant comparisons should be folded. */ |
2303 | gcc_assert (nonvoid_mode != VOIDmode)((void)(!(nonvoid_mode != ((void) 0, E_VOIDmode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/loop-iv.cc" , 2303, __FUNCTION__), 0 : 0)); |
2304 | |
2305 | /* We only handle integers or pointers. */ |
2306 | scalar_int_mode mode; |
2307 | if (!is_a <scalar_int_mode> (nonvoid_mode, &mode)) |
2308 | goto fail; |
2309 | |
2310 | op0 = XEXP (condition, 0)(((condition)->u.fld[0]).rt_rtx); |
2311 | if (!iv_analyze (insn, mode, op0, &iv0)) |
2312 | goto fail; |
2313 | |
2314 | op1 = XEXP (condition, 1)(((condition)->u.fld[1]).rt_rtx); |
2315 | if (!iv_analyze (insn, mode, op1, &iv1)) |
2316 | goto fail; |
2317 | |
2318 | if (GET_MODE_BITSIZE (iv0.extend_mode) > HOST_BITS_PER_WIDE_INT64 |
2319 | || GET_MODE_BITSIZE (iv1.extend_mode) > HOST_BITS_PER_WIDE_INT64) |
2320 | goto fail; |
2321 | |
2322 | /* Check condition and normalize it. */ |
2323 | |
2324 | switch (cond) |
2325 | { |
2326 | case GE: |
2327 | case GT: |
2328 | case GEU: |
2329 | case GTU: |
2330 | std::swap (iv0, iv1); |
2331 | cond = swap_condition (cond); |
2332 | break; |
2333 | case NE: |
2334 | case LE: |
2335 | case LEU: |
2336 | case LT: |
2337 | case LTU: |
2338 | break; |
2339 | default: |
2340 | goto fail; |
2341 | } |
2342 | |
2343 | /* Handle extends. This is relatively nontrivial, so we only try in some |
2344 | easy cases, when we can canonicalize the ivs (possibly by adding some |
2345 | assumptions) to shape subreg (base + i * step). This function also fills |
2346 | in desc->mode and desc->signed_p. */ |
2347 | |
2348 | if (!canonicalize_iv_subregs (&iv0, &iv1, cond, desc)) |
2349 | goto fail; |
2350 | |
2351 | comp_mode = iv0.extend_mode; |
2352 | mode = iv0.mode; |
2353 | size = GET_MODE_PRECISION (mode); |
2354 | get_mode_bounds (mode, (cond == LE || cond == LT), comp_mode, &mmin, &mmax); |
2355 | mode_mmin = lowpart_subreg (mode, mmin, comp_mode); |
2356 | mode_mmax = lowpart_subreg (mode, mmax, comp_mode); |
2357 | |
2358 | if (!CONST_INT_P (iv0.step)(((enum rtx_code) (iv0.step)->code) == CONST_INT) || !CONST_INT_P (iv1.step)(((enum rtx_code) (iv1.step)->code) == CONST_INT)) |
2359 | goto fail; |
2360 | |
2361 | /* We can take care of the case of two induction variables chasing each other |
2362 | if the test is NE. I have never seen a loop using it, but still it is |
2363 | cool. */ |
2364 | if (iv0.step != const0_rtx(const_int_rtx[64]) && iv1.step != const0_rtx(const_int_rtx[64])) |
2365 | { |
2366 | if (cond != NE) |
2367 | goto fail; |
2368 | |
2369 | iv0.step = simplify_gen_binary (MINUS, comp_mode, iv0.step, iv1.step); |
2370 | iv1.step = const0_rtx(const_int_rtx[64]); |
2371 | } |
2372 | |
2373 | iv0.step = lowpart_subreg (mode, iv0.step, comp_mode); |
2374 | iv1.step = lowpart_subreg (mode, iv1.step, comp_mode); |
2375 | |
2376 | /* This is either infinite loop or the one that ends immediately, depending |
2377 | on initial values. Unswitching should remove this kind of conditions. */ |
2378 | if (iv0.step == const0_rtx(const_int_rtx[64]) && iv1.step == const0_rtx(const_int_rtx[64])) |
2379 | goto fail; |
2380 | |
2381 | if (cond != NE) |
2382 | { |
2383 | if (iv0.step == const0_rtx(const_int_rtx[64])) |
2384 | step_val = -INTVAL (iv1.step)((iv1.step)->u.hwint[0]); |
2385 | else |
2386 | step_val = INTVAL (iv0.step)((iv0.step)->u.hwint[0]); |
2387 | |
2388 | /* Ignore loops of while (i-- < 10) type. */ |
2389 | if (step_val < 0) |
2390 | goto fail; |
2391 | |
2392 | step_is_pow2 = !(step_val & (step_val - 1)); |
2393 | } |
2394 | else |
2395 | { |
2396 | /* We do not care about whether the step is power of two in this |
2397 | case. */ |
2398 | step_is_pow2 = false; |
2399 | step_val = 0; |
Value stored to 'step_val' is never read | |
2400 | } |
2401 | |
2402 | /* Some more condition normalization. We must record some assumptions |
2403 | due to overflows. */ |
2404 | switch (cond) |
2405 | { |
2406 | case LT: |
2407 | case LTU: |
2408 | /* We want to take care only of non-sharp relationals; this is easy, |
2409 | as in cases the overflow would make the transformation unsafe |
2410 | the loop does not roll. Seemingly it would make more sense to want |
2411 | to take care of sharp relationals instead, as NE is more similar to |
2412 | them, but the problem is that here the transformation would be more |
2413 | difficult due to possibly infinite loops. */ |
2414 | if (iv0.step == const0_rtx(const_int_rtx[64])) |
2415 | { |
2416 | tmp = lowpart_subreg (mode, iv0.base, comp_mode); |
2417 | assumption = simplify_gen_relational (EQ, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, tmp, |
2418 | mode_mmax); |
2419 | if (assumption == const_true_rtx) |
2420 | goto zero_iter_simplify; |
2421 | iv0.base = simplify_gen_binary (PLUS, comp_mode, |
2422 | iv0.base, const1_rtx(const_int_rtx[64 +1])); |
2423 | } |
2424 | else |
2425 | { |
2426 | tmp = lowpart_subreg (mode, iv1.base, comp_mode); |
2427 | assumption = simplify_gen_relational (EQ, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, tmp, |
2428 | mode_mmin); |
2429 | if (assumption == const_true_rtx) |
2430 | goto zero_iter_simplify; |
2431 | iv1.base = simplify_gen_binary (PLUS, comp_mode, |
2432 | iv1.base, constm1_rtx(const_int_rtx[64 -1])); |
2433 | } |
2434 | |
2435 | if (assumption != const0_rtx(const_int_rtx[64])) |
2436 | desc->noloop_assumptions = |
2437 | alloc_EXPR_LIST (0, assumption, desc->noloop_assumptions); |
2438 | cond = (cond == LT) ? LE : LEU; |
2439 | |
2440 | /* It will be useful to be able to tell the difference once more in |
2441 | LE -> NE reduction. */ |
2442 | was_sharp = true; |
2443 | break; |
2444 | default: ; |
2445 | } |
2446 | |
2447 | /* Take care of trivially infinite loops. */ |
2448 | if (cond != NE) |
2449 | { |
2450 | if (iv0.step == const0_rtx(const_int_rtx[64])) |
2451 | { |
2452 | tmp = lowpart_subreg (mode, iv0.base, comp_mode); |
2453 | if (rtx_equal_p (tmp, mode_mmin)) |
2454 | { |
2455 | desc->infinite = |
2456 | alloc_EXPR_LIST (0, const_true_rtx, NULL_RTX(rtx) 0); |
2457 | /* Fill in the remaining fields somehow. */ |
2458 | goto zero_iter_simplify; |
2459 | } |
2460 | } |
2461 | else |
2462 | { |
2463 | tmp = lowpart_subreg (mode, iv1.base, comp_mode); |
2464 | if (rtx_equal_p (tmp, mode_mmax)) |
2465 | { |
2466 | desc->infinite = |
2467 | alloc_EXPR_LIST (0, const_true_rtx, NULL_RTX(rtx) 0); |
2468 | /* Fill in the remaining fields somehow. */ |
2469 | goto zero_iter_simplify; |
2470 | } |
2471 | } |
2472 | } |
2473 | |
2474 | /* If we can we want to take care of NE conditions instead of size |
2475 | comparisons, as they are much more friendly (most importantly |
2476 | this takes care of special handling of loops with step 1). We can |
2477 | do it if we first check that upper bound is greater or equal to |
2478 | lower bound, their difference is constant c modulo step and that |
2479 | there is not an overflow. */ |
2480 | if (cond != NE) |
2481 | { |
2482 | if (iv0.step == const0_rtx(const_int_rtx[64])) |
2483 | step = simplify_gen_unary (NEG, comp_mode, iv1.step, comp_mode); |
2484 | else |
2485 | step = iv0.step; |
2486 | step = lowpart_subreg (mode, step, comp_mode); |
2487 | delta = simplify_gen_binary (MINUS, comp_mode, iv1.base, iv0.base); |
2488 | delta = lowpart_subreg (mode, delta, comp_mode); |
2489 | delta = simplify_gen_binary (UMOD, mode, delta, step); |
2490 | may_xform = const0_rtx(const_int_rtx[64]); |
2491 | may_not_xform = const_true_rtx; |
2492 | |
2493 | if (CONST_INT_P (delta)(((enum rtx_code) (delta)->code) == CONST_INT)) |
2494 | { |
2495 | if (was_sharp && INTVAL (delta)((delta)->u.hwint[0]) == INTVAL (step)((step)->u.hwint[0]) - 1) |
2496 | { |
2497 | /* A special case. We have transformed condition of type |
2498 | for (i = 0; i < 4; i += 4) |
2499 | into |
2500 | for (i = 0; i <= 3; i += 4) |
2501 | obviously if the test for overflow during that transformation |
2502 | passed, we cannot overflow here. Most importantly any |
2503 | loop with sharp end condition and step 1 falls into this |
2504 | category, so handling this case specially is definitely |
2505 | worth the troubles. */ |
2506 | may_xform = const_true_rtx; |
2507 | } |
2508 | else if (iv0.step == const0_rtx(const_int_rtx[64])) |
2509 | { |
2510 | bound = simplify_gen_binary (PLUS, comp_mode, mmin, step); |
2511 | bound = simplify_gen_binary (MINUS, comp_mode, bound, delta); |
2512 | bound = lowpart_subreg (mode, bound, comp_mode); |
2513 | tmp = lowpart_subreg (mode, iv0.base, comp_mode); |
2514 | may_xform = simplify_gen_relational (cond, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, |
2515 | bound, tmp); |
2516 | may_not_xform = simplify_gen_relational (reverse_condition (cond), |
2517 | SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, |
2518 | bound, tmp); |
2519 | } |
2520 | else |
2521 | { |
2522 | bound = simplify_gen_binary (MINUS, comp_mode, mmax, step); |
2523 | bound = simplify_gen_binary (PLUS, comp_mode, bound, delta); |
2524 | bound = lowpart_subreg (mode, bound, comp_mode); |
2525 | tmp = lowpart_subreg (mode, iv1.base, comp_mode); |
2526 | may_xform = simplify_gen_relational (cond, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, |
2527 | tmp, bound); |
2528 | may_not_xform = simplify_gen_relational (reverse_condition (cond), |
2529 | SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, |
2530 | tmp, bound); |
2531 | } |
2532 | } |
2533 | |
2534 | if (may_xform != const0_rtx(const_int_rtx[64])) |
2535 | { |
2536 | /* We perform the transformation always provided that it is not |
2537 | completely senseless. This is OK, as we would need this assumption |
2538 | to determine the number of iterations anyway. */ |
2539 | if (may_xform != const_true_rtx) |
2540 | { |
2541 | /* If the step is a power of two and the final value we have |
2542 | computed overflows, the cycle is infinite. Otherwise it |
2543 | is nontrivial to compute the number of iterations. */ |
2544 | if (step_is_pow2) |
2545 | desc->infinite = alloc_EXPR_LIST (0, may_not_xform, |
2546 | desc->infinite); |
2547 | else |
2548 | desc->assumptions = alloc_EXPR_LIST (0, may_xform, |
2549 | desc->assumptions); |
2550 | } |
2551 | |
2552 | /* We are going to lose some information about upper bound on |
2553 | number of iterations in this step, so record the information |
2554 | here. */ |
2555 | inc = INTVAL (iv0.step)((iv0.step)->u.hwint[0]) - INTVAL (iv1.step)((iv1.step)->u.hwint[0]); |
2556 | if (CONST_INT_P (iv1.base)(((enum rtx_code) (iv1.base)->code) == CONST_INT)) |
2557 | up = INTVAL (iv1.base)((iv1.base)->u.hwint[0]); |
2558 | else |
2559 | up = INTVAL (mode_mmax)((mode_mmax)->u.hwint[0]) - inc; |
2560 | down = INTVAL (CONST_INT_P (iv0.base)(((((enum rtx_code) (iv0.base)->code) == CONST_INT) ? iv0. base : mode_mmin)->u.hwint[0]) |
2561 | ? iv0.base(((((enum rtx_code) (iv0.base)->code) == CONST_INT) ? iv0. base : mode_mmin)->u.hwint[0]) |
2562 | : mode_mmin)(((((enum rtx_code) (iv0.base)->code) == CONST_INT) ? iv0. base : mode_mmin)->u.hwint[0]); |
2563 | max = (up - down) / inc + 1; |
2564 | if (!desc->infinite |
2565 | && !desc->assumptions) |
2566 | record_niter_bound (loop, max, false, true); |
2567 | |
2568 | if (iv0.step == const0_rtx(const_int_rtx[64])) |
2569 | { |
2570 | iv0.base = simplify_gen_binary (PLUS, comp_mode, iv0.base, delta); |
2571 | iv0.base = simplify_gen_binary (MINUS, comp_mode, iv0.base, step); |
2572 | } |
2573 | else |
2574 | { |
2575 | iv1.base = simplify_gen_binary (MINUS, comp_mode, iv1.base, delta); |
2576 | iv1.base = simplify_gen_binary (PLUS, comp_mode, iv1.base, step); |
2577 | } |
2578 | |
2579 | tmp0 = lowpart_subreg (mode, iv0.base, comp_mode); |
2580 | tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); |
2581 | assumption = simplify_gen_relational (reverse_condition (cond), |
2582 | SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, tmp0, tmp1); |
2583 | if (assumption == const_true_rtx) |
2584 | goto zero_iter_simplify; |
2585 | else if (assumption != const0_rtx(const_int_rtx[64])) |
2586 | desc->noloop_assumptions = |
2587 | alloc_EXPR_LIST (0, assumption, desc->noloop_assumptions); |
2588 | cond = NE; |
2589 | } |
2590 | } |
2591 | |
2592 | /* Count the number of iterations. */ |
2593 | if (cond == NE) |
2594 | { |
2595 | /* Everything we do here is just arithmetics modulo size of mode. This |
2596 | makes us able to do more involved computations of number of iterations |
2597 | than in other cases. First transform the condition into shape |
2598 | s * i <> c, with s positive. */ |
2599 | iv1.base = simplify_gen_binary (MINUS, comp_mode, iv1.base, iv0.base); |
2600 | iv0.base = const0_rtx(const_int_rtx[64]); |
2601 | iv0.step = simplify_gen_binary (MINUS, comp_mode, iv0.step, iv1.step); |
2602 | iv1.step = const0_rtx(const_int_rtx[64]); |
2603 | if (INTVAL (iv0.step)((iv0.step)->u.hwint[0]) < 0) |
2604 | { |
2605 | iv0.step = simplify_gen_unary (NEG, comp_mode, iv0.step, comp_mode); |
2606 | iv1.base = simplify_gen_unary (NEG, comp_mode, iv1.base, comp_mode); |
2607 | } |
2608 | iv0.step = lowpart_subreg (mode, iv0.step, comp_mode); |
2609 | |
2610 | /* Let nsd (s, size of mode) = d. If d does not divide c, the loop |
2611 | is infinite. Otherwise, the number of iterations is |
2612 | (inverse(s/d) * (c/d)) mod (size of mode/d). */ |
2613 | s = INTVAL (iv0.step)((iv0.step)->u.hwint[0]); d = 1; |
2614 | while (s % 2 != 1) |
2615 | { |
2616 | s /= 2; |
2617 | d *= 2; |
2618 | size--; |
2619 | } |
2620 | bound = GEN_INT (((uint64_t) 1 << (size - 1 ) << 1) - 1)gen_rtx_CONST_INT (((void) 0, E_VOIDmode), (((uint64_t) 1 << (size - 1 ) << 1) - 1)); |
2621 | |
2622 | tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); |
2623 | tmp = simplify_gen_binary (UMOD, mode, tmp1, gen_int_mode (d, mode)); |
2624 | assumption = simplify_gen_relational (NE, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, tmp, const0_rtx(const_int_rtx[64])); |
2625 | desc->infinite = alloc_EXPR_LIST (0, assumption, desc->infinite); |
2626 | |
2627 | tmp = simplify_gen_binary (UDIV, mode, tmp1, gen_int_mode (d, mode)); |
2628 | inv = inverse (s, size); |
2629 | tmp = simplify_gen_binary (MULT, mode, tmp, gen_int_mode (inv, mode)); |
2630 | desc->niter_expr = simplify_gen_binary (AND, mode, tmp, bound); |
2631 | } |
2632 | else |
2633 | { |
2634 | if (iv1.step == const0_rtx(const_int_rtx[64])) |
2635 | /* Condition in shape a + s * i <= b |
2636 | We must know that b + s does not overflow and a <= b + s and then we |
2637 | can compute number of iterations as (b + s - a) / s. (It might |
2638 | seem that we in fact could be more clever about testing the b + s |
2639 | overflow condition using some information about b - a mod s, |
2640 | but it was already taken into account during LE -> NE transform). */ |
2641 | { |
2642 | step = iv0.step; |
2643 | tmp0 = lowpart_subreg (mode, iv0.base, comp_mode); |
2644 | tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); |
2645 | |
2646 | bound = simplify_gen_binary (MINUS, mode, mode_mmax, |
2647 | lowpart_subreg (mode, step, |
2648 | comp_mode)); |
2649 | if (step_is_pow2) |
2650 | { |
2651 | rtx t0, t1; |
2652 | |
2653 | /* If s is power of 2, we know that the loop is infinite if |
2654 | a % s <= b % s and b + s overflows. */ |
2655 | assumption = simplify_gen_relational (reverse_condition (cond), |
2656 | SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, |
2657 | tmp1, bound); |
2658 | |
2659 | t0 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp0), step); |
2660 | t1 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp1), step); |
2661 | tmp = simplify_gen_relational (cond, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, t0, t1); |
2662 | assumption = simplify_gen_binary (AND, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), assumption, tmp); |
2663 | desc->infinite = |
2664 | alloc_EXPR_LIST (0, assumption, desc->infinite); |
2665 | } |
2666 | else |
2667 | { |
2668 | assumption = simplify_gen_relational (cond, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, |
2669 | tmp1, bound); |
2670 | desc->assumptions = |
2671 | alloc_EXPR_LIST (0, assumption, desc->assumptions); |
2672 | } |
2673 | |
2674 | tmp = simplify_gen_binary (PLUS, comp_mode, iv1.base, iv0.step); |
2675 | tmp = lowpart_subreg (mode, tmp, comp_mode); |
2676 | assumption = simplify_gen_relational (reverse_condition (cond), |
2677 | SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, tmp0, tmp); |
2678 | |
2679 | delta = simplify_gen_binary (PLUS, mode, tmp1, step); |
2680 | delta = simplify_gen_binary (MINUS, mode, delta, tmp0); |
2681 | } |
2682 | else |
2683 | { |
2684 | /* Condition in shape a <= b - s * i |
2685 | We must know that a - s does not overflow and a - s <= b and then |
2686 | we can again compute number of iterations as (b - (a - s)) / s. */ |
2687 | step = simplify_gen_unary (NEG, mode, iv1.step, mode); |
2688 | tmp0 = lowpart_subreg (mode, iv0.base, comp_mode); |
2689 | tmp1 = lowpart_subreg (mode, iv1.base, comp_mode); |
2690 | |
2691 | bound = simplify_gen_binary (PLUS, mode, mode_mmin, |
2692 | lowpart_subreg (mode, step, comp_mode)); |
2693 | if (step_is_pow2) |
2694 | { |
2695 | rtx t0, t1; |
2696 | |
2697 | /* If s is power of 2, we know that the loop is infinite if |
2698 | a % s <= b % s and a - s overflows. */ |
2699 | assumption = simplify_gen_relational (reverse_condition (cond), |
2700 | SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, |
2701 | bound, tmp0); |
2702 | |
2703 | t0 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp0), step); |
2704 | t1 = simplify_gen_binary (UMOD, mode, copy_rtx (tmp1), step); |
2705 | tmp = simplify_gen_relational (cond, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, t0, t1); |
2706 | assumption = simplify_gen_binary (AND, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), assumption, tmp); |
2707 | desc->infinite = |
2708 | alloc_EXPR_LIST (0, assumption, desc->infinite); |
2709 | } |
2710 | else |
2711 | { |
2712 | assumption = simplify_gen_relational (cond, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, |
2713 | bound, tmp0); |
2714 | desc->assumptions = |
2715 | alloc_EXPR_LIST (0, assumption, desc->assumptions); |
2716 | } |
2717 | |
2718 | tmp = simplify_gen_binary (PLUS, comp_mode, iv0.base, iv1.step); |
2719 | tmp = lowpart_subreg (mode, tmp, comp_mode); |
2720 | assumption = simplify_gen_relational (reverse_condition (cond), |
2721 | SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode)), mode, |
2722 | tmp, tmp1); |
2723 | delta = simplify_gen_binary (MINUS, mode, tmp0, step); |
2724 | delta = simplify_gen_binary (MINUS, mode, tmp1, delta); |
2725 | } |
2726 | if (assumption == const_true_rtx) |
2727 | goto zero_iter_simplify; |
2728 | else if (assumption != const0_rtx(const_int_rtx[64])) |
2729 | desc->noloop_assumptions = |
2730 | alloc_EXPR_LIST (0, assumption, desc->noloop_assumptions); |
2731 | delta = simplify_gen_binary (UDIV, mode, delta, step); |
2732 | desc->niter_expr = delta; |
2733 | } |
2734 | |
2735 | old_niter = desc->niter_expr; |
2736 | |
2737 | simplify_using_initial_values (loop, AND, &desc->assumptions); |
2738 | if (desc->assumptions |
2739 | && XEXP (desc->assumptions, 0)(((desc->assumptions)->u.fld[0]).rt_rtx) == const0_rtx(const_int_rtx[64])) |
2740 | goto fail; |
2741 | simplify_using_initial_values (loop, IOR, &desc->noloop_assumptions); |
2742 | simplify_using_initial_values (loop, IOR, &desc->infinite); |
2743 | simplify_using_initial_values (loop, UNKNOWN, &desc->niter_expr); |
2744 | |
2745 | /* Rerun the simplification. Consider code (created by copying loop headers) |
2746 | |
2747 | i = 0; |
2748 | |
2749 | if (0 < n) |
2750 | { |
2751 | do |
2752 | { |
2753 | i++; |
2754 | } while (i < n); |
2755 | } |
2756 | |
2757 | The first pass determines that i = 0, the second pass uses it to eliminate |
2758 | noloop assumption. */ |
2759 | |
2760 | simplify_using_initial_values (loop, AND, &desc->assumptions); |
2761 | if (desc->assumptions |
2762 | && XEXP (desc->assumptions, 0)(((desc->assumptions)->u.fld[0]).rt_rtx) == const0_rtx(const_int_rtx[64])) |
2763 | goto fail; |
2764 | simplify_using_initial_values (loop, IOR, &desc->noloop_assumptions); |
2765 | simplify_using_initial_values (loop, IOR, &desc->infinite); |
2766 | simplify_using_initial_values (loop, UNKNOWN, &desc->niter_expr); |
2767 | |
2768 | if (desc->noloop_assumptions |
2769 | && XEXP (desc->noloop_assumptions, 0)(((desc->noloop_assumptions)->u.fld[0]).rt_rtx) == const_true_rtx) |
2770 | goto zero_iter; |
2771 | |
2772 | if (CONST_INT_P (desc->niter_expr)(((enum rtx_code) (desc->niter_expr)->code) == CONST_INT )) |
2773 | { |
2774 | uint64_t val = INTVAL (desc->niter_expr)((desc->niter_expr)->u.hwint[0]); |
2775 | |
2776 | desc->const_iter = true; |
2777 | desc->niter = val & GET_MODE_MASK (desc->mode)mode_mask_array[desc->mode]; |
2778 | if (!desc->infinite |
2779 | && !desc->assumptions) |
2780 | record_niter_bound (loop, desc->niter, false, true); |
2781 | } |
2782 | else |
2783 | { |
2784 | max = determine_max_iter (loop, desc, old_niter); |
2785 | if (!max) |
2786 | goto zero_iter_simplify; |
2787 | if (!desc->infinite |
2788 | && !desc->assumptions) |
2789 | record_niter_bound (loop, max, false, true); |
2790 | |
2791 | /* simplify_using_initial_values does a copy propagation on the registers |
2792 | in the expression for the number of iterations. This prolongs life |
2793 | ranges of registers and increases register pressure, and usually |
2794 | brings no gain (and if it happens to do, the cse pass will take care |
2795 | of it anyway). So prevent this behavior, unless it enabled us to |
2796 | derive that the number of iterations is a constant. */ |
2797 | desc->niter_expr = old_niter; |
2798 | } |
2799 | |
2800 | return; |
2801 | |
2802 | zero_iter_simplify: |
2803 | /* Simplify the assumptions. */ |
2804 | simplify_using_initial_values (loop, AND, &desc->assumptions); |
2805 | if (desc->assumptions |
2806 | && XEXP (desc->assumptions, 0)(((desc->assumptions)->u.fld[0]).rt_rtx) == const0_rtx(const_int_rtx[64])) |
2807 | goto fail; |
2808 | simplify_using_initial_values (loop, IOR, &desc->infinite); |
2809 | |
2810 | /* Fallthru. */ |
2811 | zero_iter: |
2812 | desc->const_iter = true; |
2813 | desc->niter = 0; |
2814 | record_niter_bound (loop, 0, true, true); |
2815 | desc->noloop_assumptions = NULL_RTX(rtx) 0; |
2816 | desc->niter_expr = const0_rtx(const_int_rtx[64]); |
2817 | return; |
2818 | |
2819 | fail: |
2820 | desc->simple_p = false; |
2821 | return; |
2822 | } |
2823 | |
2824 | /* Checks whether E is a simple exit from LOOP and stores its description |
2825 | into DESC. */ |
2826 | |
2827 | static void |
2828 | check_simple_exit (class loop *loop, edge e, class niter_desc *desc) |
2829 | { |
2830 | basic_block exit_bb; |
2831 | rtx condition; |
2832 | rtx_insn *at; |
2833 | edge ein; |
2834 | |
2835 | exit_bb = e->src; |
2836 | desc->simple_p = false; |
2837 | |
2838 | /* It must belong directly to the loop. */ |
2839 | if (exit_bb->loop_father != loop) |
2840 | return; |
2841 | |
2842 | /* It must be tested (at least) once during any iteration. */ |
2843 | if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit_bb)) |
2844 | return; |
2845 | |
2846 | /* It must end in a simple conditional jump. */ |
2847 | if (!any_condjump_p (BB_END (exit_bb)(exit_bb)->il.x.rtl->end_) || !onlyjump_p (BB_END (exit_bb)(exit_bb)->il.x.rtl->end_)) |
2848 | return; |
2849 | |
2850 | ein = EDGE_SUCC (exit_bb, 0)(*(exit_bb)->succs)[(0)]; |
2851 | if (ein == e) |
2852 | ein = EDGE_SUCC (exit_bb, 1)(*(exit_bb)->succs)[(1)]; |
2853 | |
2854 | desc->out_edge = e; |
2855 | desc->in_edge = ein; |
2856 | |
2857 | /* Test whether the condition is suitable. */ |
2858 | if (!(condition = get_condition (BB_END (ein->src)(ein->src)->il.x.rtl->end_, &at, false, false))) |
2859 | return; |
2860 | |
2861 | if (ein->flags & EDGE_FALLTHRU) |
2862 | { |
2863 | condition = reversed_condition (condition); |
2864 | if (!condition) |
2865 | return; |
2866 | } |
2867 | |
2868 | /* Check that we are able to determine number of iterations and fill |
2869 | in information about it. */ |
2870 | iv_number_of_iterations (loop, at, condition, desc); |
2871 | } |
2872 | |
2873 | /* Finds a simple exit of LOOP and stores its description into DESC. */ |
2874 | |
2875 | static void |
2876 | find_simple_exit (class loop *loop, class niter_desc *desc) |
2877 | { |
2878 | unsigned i; |
2879 | basic_block *body; |
2880 | edge e; |
2881 | class niter_desc act; |
2882 | bool any = false; |
2883 | edge_iterator ei; |
2884 | |
2885 | desc->simple_p = false; |
2886 | body = get_loop_body (loop); |
2887 | |
2888 | for (i = 0; i < loop->num_nodes; i++) |
2889 | { |
2890 | FOR_EACH_EDGE (e, ei, body[i]->succs)for ((ei) = ei_start_1 (&((body[i]->succs))); ei_cond ( (ei), &(e)); ei_next (&(ei))) |
2891 | { |
2892 | if (flow_bb_inside_loop_p (loop, e->dest)) |
2893 | continue; |
2894 | |
2895 | check_simple_exit (loop, e, &act); |
2896 | if (!act.simple_p) |
2897 | continue; |
2898 | |
2899 | if (!any) |
2900 | any = true; |
2901 | else |
2902 | { |
2903 | /* Prefer constant iterations; the less the better. */ |
2904 | if (!act.const_iter |
2905 | || (desc->const_iter && act.niter >= desc->niter)) |
2906 | continue; |
2907 | |
2908 | /* Also if the actual exit may be infinite, while the old one |
2909 | not, prefer the old one. */ |
2910 | if (act.infinite && !desc->infinite) |
2911 | continue; |
2912 | } |
2913 | |
2914 | *desc = act; |
2915 | } |
2916 | } |
2917 | |
2918 | if (dump_file) |
2919 | { |
2920 | if (desc->simple_p) |
2921 | { |
2922 | fprintf (dump_file, "Loop %d is simple:\n", loop->num); |
2923 | fprintf (dump_file, " simple exit %d -> %d\n", |
2924 | desc->out_edge->src->index, |
2925 | desc->out_edge->dest->index); |
2926 | if (desc->assumptions) |
2927 | { |
2928 | fprintf (dump_file, " assumptions: "); |
2929 | print_rtl (dump_file, desc->assumptions); |
2930 | fprintf (dump_file, "\n"); |
2931 | } |
2932 | if (desc->noloop_assumptions) |
2933 | { |
2934 | fprintf (dump_file, " does not roll if: "); |
2935 | print_rtl (dump_file, desc->noloop_assumptions); |
2936 | fprintf (dump_file, "\n"); |
2937 | } |
2938 | if (desc->infinite) |
2939 | { |
2940 | fprintf (dump_file, " infinite if: "); |
2941 | print_rtl (dump_file, desc->infinite); |
2942 | fprintf (dump_file, "\n"); |
2943 | } |
2944 | |
2945 | fprintf (dump_file, " number of iterations: "); |
2946 | print_rtl (dump_file, desc->niter_expr); |
2947 | fprintf (dump_file, "\n"); |
2948 | |
2949 | fprintf (dump_file, " upper bound: %li\n", |
2950 | (long)get_max_loop_iterations_int (loop)); |
2951 | fprintf (dump_file, " likely upper bound: %li\n", |
2952 | (long)get_likely_max_loop_iterations_int (loop)); |
2953 | fprintf (dump_file, " realistic bound: %li\n", |
2954 | (long)get_estimated_loop_iterations_int (loop)); |
2955 | } |
2956 | else |
2957 | fprintf (dump_file, "Loop %d is not simple.\n", loop->num); |
2958 | } |
2959 | |
2960 | /* Fix up the finiteness if possible. We can only do it for single exit, |
2961 | since the loop is finite, but it's possible that we predicate one loop |
2962 | exit to be finite which can not be determined as finite in middle-end as |
2963 | well. It results in incorrect predicate information on the exit condition |
2964 | expression. For example, if says [(int) _1 + -8, + , -8] != 0 finite, |
2965 | it means _1 can exactly divide -8. */ |
2966 | if (desc->infinite && single_exit (loop) && finite_loop_p (loop)) |
2967 | { |
2968 | desc->infinite = NULL_RTX(rtx) 0; |
2969 | if (dump_file) |
2970 | fprintf (dump_file, " infinite updated to finite.\n"); |
2971 | } |
2972 | |
2973 | free (body); |
2974 | } |
2975 | |
2976 | /* Creates a simple loop description of LOOP if it was not computed |
2977 | already. */ |
2978 | |
2979 | class niter_desc * |
2980 | get_simple_loop_desc (class loop *loop) |
2981 | { |
2982 | class niter_desc *desc = simple_loop_desc (loop); |
2983 | |
2984 | if (desc) |
2985 | return desc; |
2986 | |
2987 | /* At least desc->infinite is not always initialized by |
2988 | find_simple_loop_exit. */ |
2989 | desc = ggc_cleared_alloc<niter_desc> (); |
2990 | iv_analysis_loop_init (loop); |
2991 | find_simple_exit (loop, desc); |
2992 | loop->simple_loop_desc = desc; |
2993 | return desc; |
2994 | } |
2995 | |
2996 | /* Releases simple loop description for LOOP. */ |
2997 | |
2998 | void |
2999 | free_simple_loop_desc (class loop *loop) |
3000 | { |
3001 | class niter_desc *desc = simple_loop_desc (loop); |
3002 | |
3003 | if (!desc) |
3004 | return; |
3005 | |
3006 | ggc_free (desc); |
3007 | loop->simple_loop_desc = NULL__null; |
3008 | } |