Bug Summary

File:build/gcc/rtlanal.cc
Warning:line 107, column 25
Called C++ object pointer is null

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple x86_64-suse-linux -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name rtlanal.cc -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=cplusplus -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -analyzer-config-compatibility-mode=true -mrelocation-model static -mframe-pointer=none -fmath-errno -ffp-contract=on -fno-rounding-math -mconstructor-aliases -funwind-tables=2 -target-cpu x86-64 -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -resource-dir /usr/lib64/clang/15.0.7 -D IN_GCC -D HAVE_CONFIG_H -I . -I . -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/. -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../include -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcpp/include -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libcody -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libdecnumber/bid -I ../libdecnumber -I /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/../libbacktrace -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13 -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13/x86_64-suse-linux -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../include/c++/13/backward -internal-isystem /usr/lib64/clang/15.0.7/include -internal-isystem /usr/local/include -internal-isystem /usr/bin/../lib64/gcc/x86_64-suse-linux/13/../../../../x86_64-suse-linux/include -internal-externc-isystem /include -internal-externc-isystem /usr/include -O2 -Wno-narrowing -Wwrite-strings -Wno-long-long -Wno-variadic-macros -Wno-overlength-strings -fdeprecated-macro -fdebug-compilation-dir=/buildworker/marxinbox-gcc-clang-static-analyzer/objdir/gcc -ferror-limit 19 -fno-rtti -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -analyzer-output=plist-html -analyzer-config silence-checkers=core.NullDereference -faddrsig -D__GCC_HAVE_DWARF2_CFI_ASM=1 -o /buildworker/marxinbox-gcc-clang-static-analyzer/objdir/clang-static-analyzer/2023-03-27-141847-20772-1/report-kjurYM.plist -x c++ /buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc

/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc

1/* Analyze RTL for GNU compiler.
2 Copyright (C) 1987-2023 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "backend.h"
25#include "target.h"
26#include "rtl.h"
27#include "rtlanal.h"
28#include "tree.h"
29#include "predict.h"
30#include "df.h"
31#include "memmodel.h"
32#include "tm_p.h"
33#include "insn-config.h"
34#include "regs.h"
35#include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */
36#include "recog.h"
37#include "addresses.h"
38#include "rtl-iter.h"
39#include "hard-reg-set.h"
40#include "function-abi.h"
41
42/* Forward declarations */
43static void set_of_1 (rtx, const_rtx, void *);
44static bool covers_regno_p (const_rtx, unsigned int);
45static bool covers_regno_no_parallel_p (const_rtx, unsigned int);
46static int computed_jump_p_1 (const_rtx);
47static void parms_set (rtx, const_rtx, void *);
48
49static unsigned HOST_WIDE_INTlong cached_nonzero_bits (const_rtx, scalar_int_mode,
50 const_rtx, machine_mode,
51 unsigned HOST_WIDE_INTlong);
52static unsigned HOST_WIDE_INTlong nonzero_bits1 (const_rtx, scalar_int_mode,
53 const_rtx, machine_mode,
54 unsigned HOST_WIDE_INTlong);
55static unsigned int cached_num_sign_bit_copies (const_rtx, scalar_int_mode,
56 const_rtx, machine_mode,
57 unsigned int);
58static unsigned int num_sign_bit_copies1 (const_rtx, scalar_int_mode,
59 const_rtx, machine_mode,
60 unsigned int);
61
62rtx_subrtx_bound_info rtx_all_subrtx_bounds[NUM_RTX_CODE((int) LAST_AND_UNUSED_RTX_CODE)];
63rtx_subrtx_bound_info rtx_nonconst_subrtx_bounds[NUM_RTX_CODE((int) LAST_AND_UNUSED_RTX_CODE)];
64
65/* Truncation narrows the mode from SOURCE mode to DESTINATION mode.
66 If TARGET_MODE_REP_EXTENDED (DESTINATION, DESTINATION_REP) is
67 SIGN_EXTEND then while narrowing we also have to enforce the
68 representation and sign-extend the value to mode DESTINATION_REP.
69
70 If the value is already sign-extended to DESTINATION_REP mode we
71 can just switch to DESTINATION mode on it. For each pair of
72 integral modes SOURCE and DESTINATION, when truncating from SOURCE
73 to DESTINATION, NUM_SIGN_BIT_COPIES_IN_REP[SOURCE][DESTINATION]
74 contains the number of high-order bits in SOURCE that have to be
75 copies of the sign-bit so that we can do this mode-switch to
76 DESTINATION. */
77
78static unsigned int
79num_sign_bit_copies_in_rep[MAX_MODE_INT + 1][MAX_MODE_INT + 1];
80
81/* Store X into index I of ARRAY. ARRAY is known to have at least I
82 elements. Return the new base of ARRAY. */
83
84template <typename T>
85typename T::value_type *
86generic_subrtx_iterator <T>::add_single_to_queue (array_type &array,
87 value_type *base,
88 size_t i, value_type x)
89{
90 if (base
32.1
'base' is not equal to field 'stack'
32.1
'base' is not equal to field 'stack'
== array.stack)
33
Taking false branch
91 {
92 if (i < LOCAL_ELEMS)
93 {
94 base[i] = x;
95 return base;
96 }
97 gcc_checking_assert (i == LOCAL_ELEMS)((void)(!(i == LOCAL_ELEMS) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 97, __FUNCTION__), 0 : 0))
;
98 /* A previous iteration might also have moved from the stack to the
99 heap, in which case the heap array will already be big enough. */
100 if (vec_safe_length (array.heap) <= i)
101 vec_safe_grow (array.heap, i + 1, true);
102 base = array.heap->address ();
103 memcpy (base, array.stack, sizeof (array.stack));
104 base[LOCAL_ELEMS] = x;
105 return base;
106 }
107 unsigned int length = array.heap->length ();
34
Called C++ object pointer is null
108 if (length > i)
109 {
110 gcc_checking_assert (base == array.heap->address ())((void)(!(base == array.heap->address ()) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 110, __FUNCTION__), 0 : 0))
;
111 base[i] = x;
112 return base;
113 }
114 else
115 {
116 gcc_checking_assert (i == length)((void)(!(i == length) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 116, __FUNCTION__), 0 : 0))
;
117 vec_safe_push (array.heap, x);
118 return array.heap->address ();
119 }
120}
121
122/* Add the subrtxes of X to worklist ARRAY, starting at END. Return the
123 number of elements added to the worklist. */
124
125template <typename T>
126size_t
127generic_subrtx_iterator <T>::add_subrtxes_to_queue (array_type &array,
128 value_type *base,
129 size_t end, rtx_type x)
130{
131 enum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code);
132 const char *format = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
133 size_t orig_end = end;
134 if (UNLIKELY (INSN_P (x))(__builtin_expect (((((((enum rtx_code) (x)->code) == INSN
) || (((enum rtx_code) (x)->code) == JUMP_INSN) || (((enum
rtx_code) (x)->code) == CALL_INSN)) || (((enum rtx_code) (
x)->code) == DEBUG_INSN))), 0))
)
19
Assuming field 'code' is not equal to INSN
20
Assuming field 'code' is not equal to JUMP_INSN
21
Assuming field 'code' is not equal to CALL_INSN
22
Assuming field 'code' is not equal to DEBUG_INSN
23
Taking false branch
135 {
136 /* Put the pattern at the top of the queue, since that's what
137 we're likely to want most. It also allows for the SEQUENCE
138 code below. */
139 for (int i = GET_RTX_LENGTH (GET_CODE (x))(rtx_length[(int) (((enum rtx_code) (x)->code))]) - 1; i >= 0; --i)
140 if (format[i] == 'e')
141 {
142 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
143 if (LIKELY (end < LOCAL_ELEMS)(__builtin_expect ((end < LOCAL_ELEMS), 1)))
144 base[end++] = subx;
145 else
146 base = add_single_to_queue (array, base, end++, subx);
147 }
148 }
149 else
150 for (int i = 0; format[i]; ++i)
24
Loop condition is true. Entering loop body
151 if (format[i] == 'e')
25
Assuming the condition is false
26
Taking false branch
152 {
153 value_type subx = T::get_value (x->u.fld[i].rt_rtx);
154 if (LIKELY (end < LOCAL_ELEMS)(__builtin_expect ((end < LOCAL_ELEMS), 1)))
155 base[end++] = subx;
156 else
157 base = add_single_to_queue (array, base, end++, subx);
158 }
159 else if (format[i] == 'E')
27
Assuming the condition is true
28
Taking true branch
160 {
161 unsigned int length = GET_NUM_ELEM (x->u.fld[i].rt_rtvec)((x->u.fld[i].rt_rtvec)->num_elem);
162 rtx *vec = x->u.fld[i].rt_rtvec->elem;
163 if (LIKELY (end + length <= LOCAL_ELEMS)(__builtin_expect ((end + length <= LOCAL_ELEMS), 1)))
29
Assuming the condition is false
30
Taking false branch
164 for (unsigned int j = 0; j < length; j++)
165 base[end++] = T::get_value (vec[j]);
166 else
167 for (unsigned int j = 0; j
30.1
'j' is < 'length'
30.1
'j' is < 'length'
< length; j++)
31
Loop condition is true. Entering loop body
168 base = add_single_to_queue (array, base, end++,
32
Calling 'generic_subrtx_iterator::add_single_to_queue'
169 T::get_value (vec[j]));
170 if (code == SEQUENCE && end == length)
171 /* If the subrtxes of the sequence fill the entire array then
172 we know that no other parts of a containing insn are queued.
173 The caller is therefore iterating over the sequence as a
174 PATTERN (...), so we also want the patterns of the
175 subinstructions. */
176 for (unsigned int j = 0; j < length; j++)
177 {
178 typename T::rtx_type x = T::get_rtx (base[j]);
179 if (INSN_P (x)(((((enum rtx_code) (x)->code) == INSN) || (((enum rtx_code
) (x)->code) == JUMP_INSN) || (((enum rtx_code) (x)->code
) == CALL_INSN)) || (((enum rtx_code) (x)->code) == DEBUG_INSN
))
)
180 base[j] = T::get_value (PATTERN (x));
181 }
182 }
183 return end - orig_end;
184}
185
186template <typename T>
187void
188generic_subrtx_iterator <T>::free_array (array_type &array)
189{
190 vec_free (array.heap);
191}
192
193template <typename T>
194const size_t generic_subrtx_iterator <T>::LOCAL_ELEMS;
195
196template class generic_subrtx_iterator <const_rtx_accessor>;
197template class generic_subrtx_iterator <rtx_var_accessor>;
198template class generic_subrtx_iterator <rtx_ptr_accessor>;
199
200/* Return 1 if the value of X is unstable
201 (would be different at a different point in the program).
202 The frame pointer, arg pointer, etc. are considered stable
203 (within one function) and so is anything marked `unchanging'. */
204
205int
206rtx_unstable_p (const_rtx x)
207{
208 const RTX_CODEenum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code);
209 int i;
210 const char *fmt;
211
212 switch (code)
213 {
214 case MEM:
215 return !MEM_READONLY_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_READONLY_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 215, __FUNCTION__); _rtx; })->unchanging)
|| rtx_unstable_p (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx));
216
217 case CONST:
218 CASE_CONST_ANYcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case
CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR
:
219 case SYMBOL_REF:
220 case LABEL_REF:
221 return 0;
222
223 case REG:
224 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
225 if (x == frame_pointer_rtx((this_target_rtl->x_global_rtl)[GR_FRAME_POINTER]) || x == hard_frame_pointer_rtx((this_target_rtl->x_global_rtl)[GR_HARD_FRAME_POINTER])
226 /* The arg pointer varies if it is not a fixed register. */
227 || (x == arg_pointer_rtx((this_target_rtl->x_global_rtl)[GR_ARG_POINTER]) && fixed_regs(this_target_hard_regs->x_fixed_regs)[ARG_POINTER_REGNUM16]))
228 return 0;
229 /* ??? When call-clobbered, the value is stable modulo the restore
230 that must happen after a call. This currently screws up local-alloc
231 into believing that the restore is not needed. */
232 if (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED0 && x == pic_offset_table_rtx(this_target_rtl->x_pic_offset_table_rtx))
233 return 0;
234 return 1;
235
236 case ASM_OPERANDS:
237 if (MEM_VOLATILE_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM && ((enum rtx_code
) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code
) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 237, __FUNCTION__); _rtx; })->volatil)
)
238 return 1;
239
240 /* Fall through. */
241
242 default:
243 break;
244 }
245
246 fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
247 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
248 if (fmt[i] == 'e')
249 {
250 if (rtx_unstable_p (XEXP (x, i)(((x)->u.fld[i]).rt_rtx)))
251 return 1;
252 }
253 else if (fmt[i] == 'E')
254 {
255 int j;
256 for (j = 0; j < XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); j++)
257 if (rtx_unstable_p (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j])))
258 return 1;
259 }
260
261 return 0;
262}
263
264/* Return 1 if X has a value that can vary even between two
265 executions of the program. 0 means X can be compared reliably
266 against certain constants or near-constants.
267 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
268 zero, we are slightly more conservative.
269 The frame pointer and the arg pointer are considered constant. */
270
271bool
272rtx_varies_p (const_rtx x, bool for_alias)
273{
274 RTX_CODEenum rtx_code code;
275 int i;
276 const char *fmt;
277
278 if (!x)
279 return 0;
280
281 code = GET_CODE (x)((enum rtx_code) (x)->code);
282 switch (code)
283 {
284 case MEM:
285 return !MEM_READONLY_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_READONLY_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 285, __FUNCTION__); _rtx; })->unchanging)
|| rtx_varies_p (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), for_alias);
286
287 case CONST:
288 CASE_CONST_ANYcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case
CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR
:
289 case SYMBOL_REF:
290 case LABEL_REF:
291 return 0;
292
293 case REG:
294 /* Note that we have to test for the actual rtx used for the frame
295 and arg pointers and not just the register number in case we have
296 eliminated the frame and/or arg pointer and are using it
297 for pseudos. */
298 if (x == frame_pointer_rtx((this_target_rtl->x_global_rtl)[GR_FRAME_POINTER]) || x == hard_frame_pointer_rtx((this_target_rtl->x_global_rtl)[GR_HARD_FRAME_POINTER])
299 /* The arg pointer varies if it is not a fixed register. */
300 || (x == arg_pointer_rtx((this_target_rtl->x_global_rtl)[GR_ARG_POINTER]) && fixed_regs(this_target_hard_regs->x_fixed_regs)[ARG_POINTER_REGNUM16]))
301 return 0;
302 if (x == pic_offset_table_rtx(this_target_rtl->x_pic_offset_table_rtx)
303 /* ??? When call-clobbered, the value is stable modulo the restore
304 that must happen after a call. This currently screws up
305 local-alloc into believing that the restore is not needed, so we
306 must return 0 only if we are called from alias analysis. */
307 && (!PIC_OFFSET_TABLE_REG_CALL_CLOBBERED0 || for_alias))
308 return 0;
309 return 1;
310
311 case LO_SUM:
312 /* The operand 0 of a LO_SUM is considered constant
313 (in fact it is related specifically to operand 1)
314 during alias analysis. */
315 return (! for_alias && rtx_varies_p (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), for_alias))
316 || rtx_varies_p (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), for_alias);
317
318 case ASM_OPERANDS:
319 if (MEM_VOLATILE_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM && ((enum rtx_code
) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code
) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 319, __FUNCTION__); _rtx; })->volatil)
)
320 return 1;
321
322 /* Fall through. */
323
324 default:
325 break;
326 }
327
328 fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
329 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
330 if (fmt[i] == 'e')
331 {
332 if (rtx_varies_p (XEXP (x, i)(((x)->u.fld[i]).rt_rtx), for_alias))
333 return 1;
334 }
335 else if (fmt[i] == 'E')
336 {
337 int j;
338 for (j = 0; j < XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); j++)
339 if (rtx_varies_p (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), for_alias))
340 return 1;
341 }
342
343 return 0;
344}
345
346/* Compute an approximation for the offset between the register
347 FROM and TO for the current function, as it was at the start
348 of the routine. */
349
350static poly_int64
351get_initial_register_offset (int from, int to)
352{
353 static const struct elim_table_t
354 {
355 const int from;
356 const int to;
357 } table[] = ELIMINABLE_REGS{{ 16, 7}, { 16, 6}, { 19, 7}, { 19, 6}};
358 poly_int64 offset1, offset2;
359 unsigned int i, j;
360
361 if (to == from)
362 return 0;
363
364 /* It is not safe to call INITIAL_ELIMINATION_OFFSET before the epilogue
365 is completed, but we need to give at least an estimate for the stack
366 pointer based on the frame size. */
367 if (!epilogue_completed)
368 {
369 offset1 = crtl(&x_rtl)->outgoing_args_size + get_frame_size ();
370#if !STACK_GROWS_DOWNWARD1
371 offset1 = - offset1;
372#endif
373 if (to == STACK_POINTER_REGNUM7)
374 return offset1;
375 else if (from == STACK_POINTER_REGNUM7)
376 return - offset1;
377 else
378 return 0;
379 }
380
381 for (i = 0; i < ARRAY_SIZE (table)(sizeof (table) / sizeof ((table)[0])); i++)
382 if (table[i].from == from)
383 {
384 if (table[i].to == to)
385 {
386 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,((offset1) = ix86_initial_elimination_offset ((table[i].from)
, (table[i].to)))
387 offset1)((offset1) = ix86_initial_elimination_offset ((table[i].from)
, (table[i].to)))
;
388 return offset1;
389 }
390 for (j = 0; j < ARRAY_SIZE (table)(sizeof (table) / sizeof ((table)[0])); j++)
391 {
392 if (table[j].to == to
393 && table[j].from == table[i].to)
394 {
395 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,((offset1) = ix86_initial_elimination_offset ((table[i].from)
, (table[i].to)))
396 offset1)((offset1) = ix86_initial_elimination_offset ((table[i].from)
, (table[i].to)))
;
397 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,((offset2) = ix86_initial_elimination_offset ((table[j].from)
, (table[j].to)))
398 offset2)((offset2) = ix86_initial_elimination_offset ((table[j].from)
, (table[j].to)))
;
399 return offset1 + offset2;
400 }
401 if (table[j].from == to
402 && table[j].to == table[i].to)
403 {
404 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,((offset1) = ix86_initial_elimination_offset ((table[i].from)
, (table[i].to)))
405 offset1)((offset1) = ix86_initial_elimination_offset ((table[i].from)
, (table[i].to)))
;
406 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,((offset2) = ix86_initial_elimination_offset ((table[j].from)
, (table[j].to)))
407 offset2)((offset2) = ix86_initial_elimination_offset ((table[j].from)
, (table[j].to)))
;
408 return offset1 - offset2;
409 }
410 }
411 }
412 else if (table[i].to == from)
413 {
414 if (table[i].from == to)
415 {
416 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,((offset1) = ix86_initial_elimination_offset ((table[i].from)
, (table[i].to)))
417 offset1)((offset1) = ix86_initial_elimination_offset ((table[i].from)
, (table[i].to)))
;
418 return - offset1;
419 }
420 for (j = 0; j < ARRAY_SIZE (table)(sizeof (table) / sizeof ((table)[0])); j++)
421 {
422 if (table[j].to == to
423 && table[j].from == table[i].from)
424 {
425 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,((offset1) = ix86_initial_elimination_offset ((table[i].from)
, (table[i].to)))
426 offset1)((offset1) = ix86_initial_elimination_offset ((table[i].from)
, (table[i].to)))
;
427 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,((offset2) = ix86_initial_elimination_offset ((table[j].from)
, (table[j].to)))
428 offset2)((offset2) = ix86_initial_elimination_offset ((table[j].from)
, (table[j].to)))
;
429 return - offset1 + offset2;
430 }
431 if (table[j].from == to
432 && table[j].to == table[i].from)
433 {
434 INITIAL_ELIMINATION_OFFSET (table[i].from, table[i].to,((offset1) = ix86_initial_elimination_offset ((table[i].from)
, (table[i].to)))
435 offset1)((offset1) = ix86_initial_elimination_offset ((table[i].from)
, (table[i].to)))
;
436 INITIAL_ELIMINATION_OFFSET (table[j].from, table[j].to,((offset2) = ix86_initial_elimination_offset ((table[j].from)
, (table[j].to)))
437 offset2)((offset2) = ix86_initial_elimination_offset ((table[j].from)
, (table[j].to)))
;
438 return - offset1 - offset2;
439 }
440 }
441 }
442
443 /* If the requested register combination was not found,
444 try a different more simple combination. */
445 if (from == ARG_POINTER_REGNUM16)
446 return get_initial_register_offset (HARD_FRAME_POINTER_REGNUM6, to);
447 else if (to == ARG_POINTER_REGNUM16)
448 return get_initial_register_offset (from, HARD_FRAME_POINTER_REGNUM6);
449 else if (from == HARD_FRAME_POINTER_REGNUM6)
450 return get_initial_register_offset (FRAME_POINTER_REGNUM19, to);
451 else if (to == HARD_FRAME_POINTER_REGNUM6)
452 return get_initial_register_offset (from, FRAME_POINTER_REGNUM19);
453 else
454 return 0;
455}
456
457/* Return nonzero if the use of X+OFFSET as an address in a MEM with SIZE
458 bytes can cause a trap. MODE is the mode of the MEM (not that of X) and
459 UNALIGNED_MEMS controls whether nonzero is returned for unaligned memory
460 references on strict alignment machines. */
461
462static int
463rtx_addr_can_trap_p_1 (const_rtx x, poly_int64 offset, poly_int64 size,
464 machine_mode mode, bool unaligned_mems)
465{
466 enum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code);
467 gcc_checking_assert (mode == BLKmode((void)(!(mode == ((void) 0, E_BLKmode) || mode == ((void) 0,
E_VOIDmode) || known_size_p (size)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 469, __FUNCTION__), 0 : 0))
468 || mode == VOIDmode((void)(!(mode == ((void) 0, E_BLKmode) || mode == ((void) 0,
E_VOIDmode) || known_size_p (size)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 469, __FUNCTION__), 0 : 0))
469 || known_size_p (size))((void)(!(mode == ((void) 0, E_BLKmode) || mode == ((void) 0,
E_VOIDmode) || known_size_p (size)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 469, __FUNCTION__), 0 : 0))
;
470 poly_int64 const_x1;
471
472 /* The offset must be a multiple of the mode size if we are considering
473 unaligned memory references on strict alignment machines. */
474 if (STRICT_ALIGNMENT0
475 && unaligned_mems
476 && mode != BLKmode((void) 0, E_BLKmode)
477 && mode != VOIDmode((void) 0, E_VOIDmode))
478 {
479 poly_int64 actual_offset = offset;
480
481#ifdef SPARC_STACK_BOUNDARY_HACK
482 /* ??? The SPARC port may claim a STACK_BOUNDARY higher than
483 the real alignment of %sp. However, when it does this, the
484 alignment of %sp+STACK_POINTER_OFFSET is STACK_BOUNDARY. */
485 if (SPARC_STACK_BOUNDARY_HACK
486 && (x == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]) || x == hard_frame_pointer_rtx((this_target_rtl->x_global_rtl)[GR_HARD_FRAME_POINTER])))
487 actual_offset -= STACK_POINTER_OFFSET0;
488#endif
489
490 if (!multiple_p (actual_offset, GET_MODE_SIZE (mode)))
491 return 1;
492 }
493
494 switch (code)
495 {
496 case SYMBOL_REF:
497 if (SYMBOL_REF_WEAK (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag
("SYMBOL_REF_WEAK", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 497, __FUNCTION__); _rtx; })->return_val)
)
498 return 1;
499 if (!CONSTANT_POOL_ADDRESS_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag
("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 499, __FUNCTION__); _rtx; })->unchanging)
&& !SYMBOL_REF_FUNCTION_P (x)(((__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((
enum rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag
("SYMBOL_REF_FLAGS", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 499, __FUNCTION__); _rtx; }) ->u2.symbol_ref_flags) &
(1 << 0)) != 0)
)
500 {
501 tree decl;
502 poly_int64 decl_size;
503
504 if (maybe_lt (offset, 0))
505 return 1;
506 if (!known_size_p (size))
507 return maybe_ne (offset, 0);
508
509 /* If the size of the access or of the symbol is unknown,
510 assume the worst. */
511 decl = SYMBOL_REF_DECL (x)((__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag
("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 511, __FUNCTION__); _rtx; })->unchanging) ? nullptr : ((
((x))->u.fld[1]).rt_tree))
;
512
513 /* Else check that the access is in bounds. TODO: restructure
514 expr_size/tree_expr_size/int_expr_size and just use the latter. */
515 if (!decl)
516 decl_size = -1;
517 else if (DECL_P (decl)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code
) (decl)->base.code))] == tcc_declaration)
&& DECL_SIZE_UNIT (decl)((contains_struct_check ((decl), (TS_DECL_COMMON), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 517, __FUNCTION__))->decl_common.size_unit)
)
518 {
519 if (!poly_int_tree_p (DECL_SIZE_UNIT (decl)((contains_struct_check ((decl), (TS_DECL_COMMON), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 519, __FUNCTION__))->decl_common.size_unit)
, &decl_size))
520 decl_size = -1;
521 }
522 else if (TREE_CODE (decl)((enum tree_code) (decl)->base.code) == STRING_CST)
523 decl_size = TREE_STRING_LENGTH (decl)((tree_check ((decl), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 523, __FUNCTION__, (STRING_CST)))->string.length)
;
524 else if (TYPE_SIZE_UNIT (TREE_TYPE (decl))((tree_class_check ((((contains_struct_check ((decl), (TS_TYPED
), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 524, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 524, __FUNCTION__))->type_common.size_unit)
)
525 decl_size = int_size_in_bytes (TREE_TYPE (decl)((contains_struct_check ((decl), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 525, __FUNCTION__))->typed.type)
);
526 else
527 decl_size = -1;
528
529 return (!known_size_p (decl_size) || known_eq (decl_size, 0)(!maybe_ne (decl_size, 0))
530 ? maybe_ne (offset, 0)
531 : !known_subrange_p (offset, size, 0, decl_size));
532 }
533
534 return 0;
535
536 case LABEL_REF:
537 return 0;
538
539 case REG:
540 /* Stack references are assumed not to trap, but we need to deal with
541 nonsensical offsets. */
542 if (x == frame_pointer_rtx((this_target_rtl->x_global_rtl)[GR_FRAME_POINTER]) || x == hard_frame_pointer_rtx((this_target_rtl->x_global_rtl)[GR_HARD_FRAME_POINTER])
543 || x == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER])
544 /* The arg pointer varies if it is not a fixed register. */
545 || (x == arg_pointer_rtx((this_target_rtl->x_global_rtl)[GR_ARG_POINTER]) && fixed_regs(this_target_hard_regs->x_fixed_regs)[ARG_POINTER_REGNUM16]))
546 {
547#ifdef RED_ZONE_SIZE128
548 poly_int64 red_zone_size = RED_ZONE_SIZE128;
549#else
550 poly_int64 red_zone_size = 0;
551#endif
552 poly_int64 stack_boundary = PREFERRED_STACK_BOUNDARYix86_preferred_stack_boundary / BITS_PER_UNIT(8);
553 poly_int64 low_bound, high_bound;
554
555 if (!known_size_p (size))
556 return 1;
557
558 if (x == frame_pointer_rtx((this_target_rtl->x_global_rtl)[GR_FRAME_POINTER]))
559 {
560 if (FRAME_GROWS_DOWNWARD1)
561 {
562 high_bound = targetm.starting_frame_offset ();
563 low_bound = high_bound - get_frame_size ();
564 }
565 else
566 {
567 low_bound = targetm.starting_frame_offset ();
568 high_bound = low_bound + get_frame_size ();
569 }
570 }
571 else if (x == hard_frame_pointer_rtx((this_target_rtl->x_global_rtl)[GR_HARD_FRAME_POINTER]))
572 {
573 poly_int64 sp_offset
574 = get_initial_register_offset (STACK_POINTER_REGNUM7,
575 HARD_FRAME_POINTER_REGNUM6);
576 poly_int64 ap_offset
577 = get_initial_register_offset (ARG_POINTER_REGNUM16,
578 HARD_FRAME_POINTER_REGNUM6);
579
580#if STACK_GROWS_DOWNWARD1
581 low_bound = sp_offset - red_zone_size - stack_boundary;
582 high_bound = ap_offset
583 + FIRST_PARM_OFFSET (current_function_decl)0
584#if !ARGS_GROW_DOWNWARD0
585 + crtl(&x_rtl)->args.size
586#endif
587 + stack_boundary;
588#else
589 high_bound = sp_offset + red_zone_size + stack_boundary;
590 low_bound = ap_offset
591 + FIRST_PARM_OFFSET (current_function_decl)0
592#if ARGS_GROW_DOWNWARD0
593 - crtl(&x_rtl)->args.size
594#endif
595 - stack_boundary;
596#endif
597 }
598 else if (x == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]))
599 {
600 poly_int64 ap_offset
601 = get_initial_register_offset (ARG_POINTER_REGNUM16,
602 STACK_POINTER_REGNUM7);
603
604#if STACK_GROWS_DOWNWARD1
605 low_bound = - red_zone_size - stack_boundary;
606 high_bound = ap_offset
607 + FIRST_PARM_OFFSET (current_function_decl)0
608#if !ARGS_GROW_DOWNWARD0
609 + crtl(&x_rtl)->args.size
610#endif
611 + stack_boundary;
612#else
613 high_bound = red_zone_size + stack_boundary;
614 low_bound = ap_offset
615 + FIRST_PARM_OFFSET (current_function_decl)0
616#if ARGS_GROW_DOWNWARD0
617 - crtl(&x_rtl)->args.size
618#endif
619 - stack_boundary;
620#endif
621 }
622 else
623 {
624 /* We assume that accesses are safe to at least the
625 next stack boundary.
626 Examples are varargs and __builtin_return_address. */
627#if ARGS_GROW_DOWNWARD0
628 high_bound = FIRST_PARM_OFFSET (current_function_decl)0
629 + stack_boundary;
630 low_bound = FIRST_PARM_OFFSET (current_function_decl)0
631 - crtl(&x_rtl)->args.size - stack_boundary;
632#else
633 low_bound = FIRST_PARM_OFFSET (current_function_decl)0
634 - stack_boundary;
635 high_bound = FIRST_PARM_OFFSET (current_function_decl)0
636 + crtl(&x_rtl)->args.size + stack_boundary;
637#endif
638 }
639
640 if (known_ge (offset, low_bound)(!maybe_lt (offset, low_bound))
641 && known_le (offset, high_bound - size)(!maybe_lt (high_bound - size, offset)))
642 return 0;
643 return 1;
644 }
645 /* All of the virtual frame registers are stack references. */
646 if (REGNO (x)(rhs_regno(x)) >= FIRST_VIRTUAL_REGISTER(76)
647 && REGNO (x)(rhs_regno(x)) <= LAST_VIRTUAL_REGISTER(((76)) + 5))
648 return 0;
649 return 1;
650
651 case CONST:
652 return rtx_addr_can_trap_p_1 (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), offset, size,
653 mode, unaligned_mems);
654
655 case PLUS:
656 /* An address is assumed not to trap if:
657 - it is the pic register plus a const unspec without offset. */
658 if (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx) == pic_offset_table_rtx(this_target_rtl->x_pic_offset_table_rtx)
659 && GET_CODE (XEXP (x, 1))((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST
660 && GET_CODE (XEXP (XEXP (x, 1), 0))((enum rtx_code) (((((((x)->u.fld[1]).rt_rtx))->u.fld[0
]).rt_rtx))->code)
== UNSPEC
661 && known_eq (offset, 0)(!maybe_ne (offset, 0)))
662 return 0;
663
664 /* - or it is an address that can't trap plus a constant integer. */
665 if (poly_int_rtx_p (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), &const_x1)
666 && !rtx_addr_can_trap_p_1 (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), offset + const_x1,
667 size, mode, unaligned_mems))
668 return 0;
669
670 return 1;
671
672 case LO_SUM:
673 case PRE_MODIFY:
674 return rtx_addr_can_trap_p_1 (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), offset, size,
675 mode, unaligned_mems);
676
677 case PRE_DEC:
678 case PRE_INC:
679 case POST_DEC:
680 case POST_INC:
681 case POST_MODIFY:
682 return rtx_addr_can_trap_p_1 (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), offset, size,
683 mode, unaligned_mems);
684
685 default:
686 break;
687 }
688
689 /* If it isn't one of the case above, it can cause a trap. */
690 return 1;
691}
692
693/* Return nonzero if the use of X as an address in a MEM can cause a trap. */
694
695int
696rtx_addr_can_trap_p (const_rtx x)
697{
698 return rtx_addr_can_trap_p_1 (x, 0, -1, BLKmode((void) 0, E_BLKmode), false);
699}
700
701/* Return true if X contains a MEM subrtx. */
702
703bool
704contains_mem_rtx_p (rtx x)
705{
706 subrtx_iterator::array_type array;
707 FOR_EACH_SUBRTX (iter, array, x, ALL)for (subrtx_iterator iter (array, x, rtx_all_subrtx_bounds); !
iter.at_end (); iter.next ())
708 if (MEM_P (*iter)(((enum rtx_code) (*iter)->code) == MEM))
709 return true;
710
711 return false;
712}
713
714/* Return true if X is an address that is known to not be zero. */
715
716bool
717nonzero_address_p (const_rtx x)
718{
719 const enum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code);
720
721 switch (code)
722 {
723 case SYMBOL_REF:
724 return flag_delete_null_pointer_checksglobal_options.x_flag_delete_null_pointer_checks && !SYMBOL_REF_WEAK (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag
("SYMBOL_REF_WEAK", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 724, __FUNCTION__); _rtx; })->return_val)
;
725
726 case LABEL_REF:
727 return true;
728
729 case REG:
730 /* As in rtx_varies_p, we have to use the actual rtx, not reg number. */
731 if (x == frame_pointer_rtx((this_target_rtl->x_global_rtl)[GR_FRAME_POINTER]) || x == hard_frame_pointer_rtx((this_target_rtl->x_global_rtl)[GR_HARD_FRAME_POINTER])
732 || x == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER])
733 || (x == arg_pointer_rtx((this_target_rtl->x_global_rtl)[GR_ARG_POINTER]) && fixed_regs(this_target_hard_regs->x_fixed_regs)[ARG_POINTER_REGNUM16]))
734 return true;
735 /* All of the virtual frame registers are stack references. */
736 if (REGNO (x)(rhs_regno(x)) >= FIRST_VIRTUAL_REGISTER(76)
737 && REGNO (x)(rhs_regno(x)) <= LAST_VIRTUAL_REGISTER(((76)) + 5))
738 return true;
739 return false;
740
741 case CONST:
742 return nonzero_address_p (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx));
743
744 case PLUS:
745 /* Handle PIC references. */
746 if (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx) == pic_offset_table_rtx(this_target_rtl->x_pic_offset_table_rtx)
747 && CONSTANT_P (XEXP (x, 1))((rtx_class[(int) (((enum rtx_code) ((((x)->u.fld[1]).rt_rtx
))->code))]) == RTX_CONST_OBJ)
)
748 return true;
749 return false;
750
751 case PRE_MODIFY:
752 /* Similar to the above; allow positive offsets. Further, since
753 auto-inc is only allowed in memories, the register must be a
754 pointer. */
755 if (CONST_INT_P (XEXP (x, 1))(((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_INT
)
756 && INTVAL (XEXP (x, 1))(((((x)->u.fld[1]).rt_rtx))->u.hwint[0]) > 0)
757 return true;
758 return nonzero_address_p (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx));
759
760 case PRE_INC:
761 /* Similarly. Further, the offset is always positive. */
762 return true;
763
764 case PRE_DEC:
765 case POST_DEC:
766 case POST_INC:
767 case POST_MODIFY:
768 return nonzero_address_p (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx));
769
770 case LO_SUM:
771 return nonzero_address_p (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx));
772
773 default:
774 break;
775 }
776
777 /* If it isn't one of the case above, might be zero. */
778 return false;
779}
780
781/* Return 1 if X refers to a memory location whose address
782 cannot be compared reliably with constant addresses,
783 or if X refers to a BLKmode memory object.
784 FOR_ALIAS is nonzero if we are called from alias analysis; if it is
785 zero, we are slightly more conservative. */
786
787bool
788rtx_addr_varies_p (const_rtx x, bool for_alias)
789{
790 enum rtx_code code;
791 int i;
792 const char *fmt;
793
794 if (x == 0)
795 return 0;
796
797 code = GET_CODE (x)((enum rtx_code) (x)->code);
798 if (code == MEM)
799 return GET_MODE (x)((machine_mode) (x)->mode) == BLKmode((void) 0, E_BLKmode) || rtx_varies_p (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), for_alias);
800
801 fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
802 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
803 if (fmt[i] == 'e')
804 {
805 if (rtx_addr_varies_p (XEXP (x, i)(((x)->u.fld[i]).rt_rtx), for_alias))
806 return 1;
807 }
808 else if (fmt[i] == 'E')
809 {
810 int j;
811 for (j = 0; j < XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); j++)
812 if (rtx_addr_varies_p (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), for_alias))
813 return 1;
814 }
815 return 0;
816}
817
818/* Return the CALL in X if there is one. */
819
820rtx
821get_call_rtx_from (const rtx_insn *insn)
822{
823 rtx x = PATTERN (insn);
824 if (GET_CODE (x)((enum rtx_code) (x)->code) == PARALLEL)
825 x = XVECEXP (x, 0, 0)(((((x)->u.fld[0]).rt_rtvec))->elem[0]);
826 if (GET_CODE (x)((enum rtx_code) (x)->code) == SET)
827 x = SET_SRC (x)(((x)->u.fld[1]).rt_rtx);
828 if (GET_CODE (x)((enum rtx_code) (x)->code) == CALL && MEM_P (XEXP (x, 0))(((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == MEM
)
)
829 return x;
830 return NULL_RTX(rtx) 0;
831}
832
833/* Get the declaration of the function called by INSN. */
834
835tree
836get_call_fndecl (const rtx_insn *insn)
837{
838 rtx note, datum;
839
840 note = find_reg_note (insn, REG_CALL_DECL, NULL_RTX(rtx) 0);
841 if (note == NULL_RTX(rtx) 0)
842 return NULL_TREE(tree) nullptr;
843
844 datum = XEXP (note, 0)(((note)->u.fld[0]).rt_rtx);
845 if (datum != NULL_RTX(rtx) 0)
846 return SYMBOL_REF_DECL (datum)((__extension__ ({ __typeof ((datum)) const _rtx = ((datum));
if (((enum rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag
("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 846, __FUNCTION__); _rtx; })->unchanging) ? nullptr : ((
((datum))->u.fld[1]).rt_tree))
;
847
848 return NULL_TREE(tree) nullptr;
849}
850
851/* Return the value of the integer term in X, if one is apparent;
852 otherwise return 0.
853 Only obvious integer terms are detected.
854 This is used in cse.cc with the `related_value' field. */
855
856HOST_WIDE_INTlong
857get_integer_term (const_rtx x)
858{
859 if (GET_CODE (x)((enum rtx_code) (x)->code) == CONST)
860 x = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx);
861
862 if (GET_CODE (x)((enum rtx_code) (x)->code) == MINUS
863 && CONST_INT_P (XEXP (x, 1))(((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_INT
)
)
864 return - INTVAL (XEXP (x, 1))(((((x)->u.fld[1]).rt_rtx))->u.hwint[0]);
865 if (GET_CODE (x)((enum rtx_code) (x)->code) == PLUS
866 && CONST_INT_P (XEXP (x, 1))(((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_INT
)
)
867 return INTVAL (XEXP (x, 1))(((((x)->u.fld[1]).rt_rtx))->u.hwint[0]);
868 return 0;
869}
870
871/* If X is a constant, return the value sans apparent integer term;
872 otherwise return 0.
873 Only obvious integer terms are detected. */
874
875rtx
876get_related_value (const_rtx x)
877{
878 if (GET_CODE (x)((enum rtx_code) (x)->code) != CONST)
879 return 0;
880 x = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx);
881 if (GET_CODE (x)((enum rtx_code) (x)->code) == PLUS
882 && CONST_INT_P (XEXP (x, 1))(((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_INT
)
)
883 return XEXP (x, 0)(((x)->u.fld[0]).rt_rtx);
884 else if (GET_CODE (x)((enum rtx_code) (x)->code) == MINUS
885 && CONST_INT_P (XEXP (x, 1))(((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_INT
)
)
886 return XEXP (x, 0)(((x)->u.fld[0]).rt_rtx);
887 return 0;
888}
889
890/* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
891 to somewhere in the same object or object_block as SYMBOL. */
892
893bool
894offset_within_block_p (const_rtx symbol, HOST_WIDE_INTlong offset)
895{
896 tree decl;
897
898 if (GET_CODE (symbol)((enum rtx_code) (symbol)->code) != SYMBOL_REF)
899 return false;
900
901 if (offset == 0)
902 return true;
903
904 if (offset > 0)
905 {
906 if (CONSTANT_POOL_ADDRESS_P (symbol)(__extension__ ({ __typeof ((symbol)) const _rtx = ((symbol))
; if (((enum rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag
("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 906, __FUNCTION__); _rtx; })->unchanging)
907 && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
908 return true;
909
910 decl = SYMBOL_REF_DECL (symbol)((__extension__ ({ __typeof ((symbol)) const _rtx = ((symbol)
); if (((enum rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag
("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 910, __FUNCTION__); _rtx; })->unchanging) ? nullptr : ((
((symbol))->u.fld[1]).rt_tree))
;
911 if (decl && offset < int_size_in_bytes (TREE_TYPE (decl)((contains_struct_check ((decl), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 911, __FUNCTION__))->typed.type)
))
912 return true;
913 }
914
915 if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)(((__extension__ ({ __typeof ((symbol)) const _rtx = ((symbol
)); if (((enum rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag
("SYMBOL_REF_FLAGS", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 915, __FUNCTION__); _rtx; }) ->u2.symbol_ref_flags) &
(1 << 7)) != 0)
916 && SYMBOL_REF_BLOCK (symbol)((&(symbol)->u.block_sym)->block)
917 && SYMBOL_REF_BLOCK_OFFSET (symbol)((&(symbol)->u.block_sym)->offset) >= 0
918 && ((unsigned HOST_WIDE_INTlong) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)((&(symbol)->u.block_sym)->offset)
919 < (unsigned HOST_WIDE_INTlong) SYMBOL_REF_BLOCK (symbol)((&(symbol)->u.block_sym)->block)->size))
920 return true;
921
922 return false;
923}
924
925/* Split X into a base and a constant offset, storing them in *BASE_OUT
926 and *OFFSET_OUT respectively. */
927
928void
929split_const (rtx x, rtx *base_out, rtx *offset_out)
930{
931 if (GET_CODE (x)((enum rtx_code) (x)->code) == CONST)
932 {
933 x = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx);
934 if (GET_CODE (x)((enum rtx_code) (x)->code) == PLUS && CONST_INT_P (XEXP (x, 1))(((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_INT
)
)
935 {
936 *base_out = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx);
937 *offset_out = XEXP (x, 1)(((x)->u.fld[1]).rt_rtx);
938 return;
939 }
940 }
941 *base_out = x;
942 *offset_out = const0_rtx(const_int_rtx[64]);
943}
944
945/* Express integer value X as some value Y plus a polynomial offset,
946 where Y is either const0_rtx, X or something within X (as opposed
947 to a new rtx). Return the Y and store the offset in *OFFSET_OUT. */
948
949rtx
950strip_offset (rtx x, poly_int64_pod *offset_out)
951{
952 rtx base = const0_rtx(const_int_rtx[64]);
953 rtx test = x;
954 if (GET_CODE (test)((enum rtx_code) (test)->code) == CONST)
955 test = XEXP (test, 0)(((test)->u.fld[0]).rt_rtx);
956 if (GET_CODE (test)((enum rtx_code) (test)->code) == PLUS)
957 {
958 base = XEXP (test, 0)(((test)->u.fld[0]).rt_rtx);
959 test = XEXP (test, 1)(((test)->u.fld[1]).rt_rtx);
960 }
961 if (poly_int_rtx_p (test, offset_out))
962 return base;
963 *offset_out = 0;
964 return x;
965}
966
967/* Return the argument size in REG_ARGS_SIZE note X. */
968
969poly_int64
970get_args_size (const_rtx x)
971{
972 gcc_checking_assert (REG_NOTE_KIND (x) == REG_ARGS_SIZE)((void)(!(((enum reg_note) ((machine_mode) (x)->mode)) == REG_ARGS_SIZE
) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 972, __FUNCTION__), 0 : 0))
;
973 return rtx_to_poly_int64 (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx));
974}
975
976/* Return the number of places FIND appears within X. If COUNT_DEST is
977 zero, we do not count occurrences inside the destination of a SET. */
978
979int
980count_occurrences (const_rtx x, const_rtx find, int count_dest)
981{
982 int i, j;
983 enum rtx_code code;
984 const char *format_ptr;
985 int count;
986
987 if (x == find)
988 return 1;
989
990 code = GET_CODE (x)((enum rtx_code) (x)->code);
991
992 switch (code)
993 {
994 case REG:
995 CASE_CONST_ANYcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case
CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR
:
996 case SYMBOL_REF:
997 case CODE_LABEL:
998 case PC:
999 return 0;
1000
1001 case EXPR_LIST:
1002 count = count_occurrences (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), find, count_dest);
1003 if (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx))
1004 count += count_occurrences (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), find, count_dest);
1005 return count;
1006
1007 case MEM:
1008 if (MEM_P (find)(((enum rtx_code) (find)->code) == MEM) && rtx_equal_p (x, find))
1009 return 1;
1010 break;
1011
1012 case SET:
1013 if (SET_DEST (x)(((x)->u.fld[0]).rt_rtx) == find && ! count_dest)
1014 return count_occurrences (SET_SRC (x)(((x)->u.fld[1]).rt_rtx), find, count_dest);
1015 break;
1016
1017 default:
1018 break;
1019 }
1020
1021 format_ptr = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
1022 count = 0;
1023
1024 for (i = 0; i < GET_RTX_LENGTH (code)(rtx_length[(int) (code)]); i++)
1025 {
1026 switch (*format_ptr++)
1027 {
1028 case 'e':
1029 count += count_occurrences (XEXP (x, i)(((x)->u.fld[i]).rt_rtx), find, count_dest);
1030 break;
1031
1032 case 'E':
1033 for (j = 0; j < XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); j++)
1034 count += count_occurrences (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), find, count_dest);
1035 break;
1036 }
1037 }
1038 return count;
1039}
1040
1041
1042/* Return TRUE if OP is a register or subreg of a register that
1043 holds an unsigned quantity. Otherwise, return FALSE. */
1044
1045bool
1046unsigned_reg_p (rtx op)
1047{
1048 if (REG_P (op)(((enum rtx_code) (op)->code) == REG)
1049 && REG_EXPR (op)(((&(op)->u.reg)->attrs) == 0 ? 0 : ((&(op)->
u.reg)->attrs)->decl)
1050 && TYPE_UNSIGNED (TREE_TYPE (REG_EXPR (op)))((tree_class_check ((((contains_struct_check (((((&(op)->
u.reg)->attrs) == 0 ? 0 : ((&(op)->u.reg)->attrs
)->decl)), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 1050, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 1050, __FUNCTION__))->base.u.bits.unsigned_flag)
)
1051 return true;
1052
1053 if (GET_CODE (op)((enum rtx_code) (op)->code) == SUBREG
1054 && SUBREG_PROMOTED_SIGN (op)((__extension__ ({ __typeof ((op)) const _rtx = ((op)); if ((
(enum rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag
("SUBREG_PROMOTED_SIGN", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 1054, __FUNCTION__); _rtx; })->volatil) ? 1 : (op)->unchanging
- 1)
)
1055 return true;
1056
1057 return false;
1058}
1059
1060
1061/* Nonzero if register REG appears somewhere within IN.
1062 Also works if REG is not a register; in this case it checks
1063 for a subexpression of IN that is Lisp "equal" to REG. */
1064
1065int
1066reg_mentioned_p (const_rtx reg, const_rtx in)
1067{
1068 const char *fmt;
1069 int i;
1070 enum rtx_code code;
1071
1072 if (in == 0)
1073 return 0;
1074
1075 if (reg == in)
1076 return 1;
1077
1078 if (GET_CODE (in)((enum rtx_code) (in)->code) == LABEL_REF)
1079 return reg == label_ref_label (in);
1080
1081 code = GET_CODE (in)((enum rtx_code) (in)->code);
1082
1083 switch (code)
1084 {
1085 /* Compare registers by number. */
1086 case REG:
1087 return REG_P (reg)(((enum rtx_code) (reg)->code) == REG) && REGNO (in)(rhs_regno(in)) == REGNO (reg)(rhs_regno(reg));
1088
1089 /* These codes have no constituent expressions
1090 and are unique. */
1091 case SCRATCH:
1092 case PC:
1093 return 0;
1094
1095 CASE_CONST_ANYcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case
CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR
:
1096 /* These are kept unique for a given value. */
1097 return 0;
1098
1099 default:
1100 break;
1101 }
1102
1103 if (GET_CODE (reg)((enum rtx_code) (reg)->code) == code && rtx_equal_p (reg, in))
1104 return 1;
1105
1106 fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
1107
1108 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
1109 {
1110 if (fmt[i] == 'E')
1111 {
1112 int j;
1113 for (j = XVECLEN (in, i)(((((in)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--)
1114 if (reg_mentioned_p (reg, XVECEXP (in, i, j)(((((in)->u.fld[i]).rt_rtvec))->elem[j])))
1115 return 1;
1116 }
1117 else if (fmt[i] == 'e'
1118 && reg_mentioned_p (reg, XEXP (in, i)(((in)->u.fld[i]).rt_rtx)))
1119 return 1;
1120 }
1121 return 0;
1122}
1123
1124/* Return 1 if in between BEG and END, exclusive of BEG and END, there is
1125 no CODE_LABEL insn. */
1126
1127int
1128no_labels_between_p (const rtx_insn *beg, const rtx_insn *end)
1129{
1130 rtx_insn *p;
1131 if (beg == end)
1132 return 0;
1133 for (p = NEXT_INSN (beg); p != end; p = NEXT_INSN (p))
1134 if (LABEL_P (p)(((enum rtx_code) (p)->code) == CODE_LABEL))
1135 return 0;
1136 return 1;
1137}
1138
1139/* Nonzero if register REG is used in an insn between
1140 FROM_INSN and TO_INSN (exclusive of those two). */
1141
1142int
1143reg_used_between_p (const_rtx reg, const rtx_insn *from_insn,
1144 const rtx_insn *to_insn)
1145{
1146 rtx_insn *insn;
1147
1148 if (from_insn == to_insn)
1149 return 0;
1150
1151 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1152 if (NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN))
1153 && (reg_overlap_mentioned_p (reg, PATTERN (insn))
1154 || (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN) && find_reg_fusage (insn, USE, reg))))
1155 return 1;
1156 return 0;
1157}
1158
1159/* Nonzero if the old value of X, a register, is referenced in BODY. If X
1160 is entirely replaced by a new value and the only use is as a SET_DEST,
1161 we do not consider it a reference. */
1162
1163int
1164reg_referenced_p (const_rtx x, const_rtx body)
1165{
1166 int i;
1167
1168 switch (GET_CODE (body)((enum rtx_code) (body)->code))
1169 {
1170 case SET:
1171 if (reg_overlap_mentioned_p (x, SET_SRC (body)(((body)->u.fld[1]).rt_rtx)))
1172 return 1;
1173
1174 /* If the destination is anything other than PC, a REG or a SUBREG
1175 of a REG that occupies all of the REG, the insn references X if
1176 it is mentioned in the destination. */
1177 if (GET_CODE (SET_DEST (body))((enum rtx_code) ((((body)->u.fld[0]).rt_rtx))->code) != PC
1178 && !REG_P (SET_DEST (body))(((enum rtx_code) ((((body)->u.fld[0]).rt_rtx))->code) ==
REG)
1179 && ! (GET_CODE (SET_DEST (body))((enum rtx_code) ((((body)->u.fld[0]).rt_rtx))->code) == SUBREG
1180 && REG_P (SUBREG_REG (SET_DEST (body)))(((enum rtx_code) (((((((body)->u.fld[0]).rt_rtx))->u.fld
[0]).rt_rtx))->code) == REG)
1181 && !read_modify_subreg_p (SET_DEST (body)(((body)->u.fld[0]).rt_rtx)))
1182 && reg_overlap_mentioned_p (x, SET_DEST (body)(((body)->u.fld[0]).rt_rtx)))
1183 return 1;
1184 return 0;
1185
1186 case ASM_OPERANDS:
1187 for (i = ASM_OPERANDS_INPUT_LENGTH (body)(((((body)->u.fld[3]).rt_rtvec))->num_elem) - 1; i >= 0; i--)
1188 if (reg_overlap_mentioned_p (x, ASM_OPERANDS_INPUT (body, i)(((((body)->u.fld[3]).rt_rtvec))->elem[i])))
1189 return 1;
1190 return 0;
1191
1192 case CALL:
1193 case USE:
1194 case IF_THEN_ELSE:
1195 return reg_overlap_mentioned_p (x, body);
1196
1197 case TRAP_IF:
1198 return reg_overlap_mentioned_p (x, TRAP_CONDITION (body)(((body)->u.fld[0]).rt_rtx));
1199
1200 case PREFETCH:
1201 return reg_overlap_mentioned_p (x, XEXP (body, 0)(((body)->u.fld[0]).rt_rtx));
1202
1203 case UNSPEC:
1204 case UNSPEC_VOLATILE:
1205 for (i = XVECLEN (body, 0)(((((body)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; i--)
1206 if (reg_overlap_mentioned_p (x, XVECEXP (body, 0, i)(((((body)->u.fld[0]).rt_rtvec))->elem[i])))
1207 return 1;
1208 return 0;
1209
1210 case PARALLEL:
1211 for (i = XVECLEN (body, 0)(((((body)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; i--)
1212 if (reg_referenced_p (x, XVECEXP (body, 0, i)(((((body)->u.fld[0]).rt_rtvec))->elem[i])))
1213 return 1;
1214 return 0;
1215
1216 case CLOBBER:
1217 if (MEM_P (XEXP (body, 0))(((enum rtx_code) ((((body)->u.fld[0]).rt_rtx))->code) ==
MEM)
)
1218 if (reg_overlap_mentioned_p (x, XEXP (XEXP (body, 0), 0)((((((body)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx)))
1219 return 1;
1220 return 0;
1221
1222 case COND_EXEC:
1223 if (reg_overlap_mentioned_p (x, COND_EXEC_TEST (body)(((body)->u.fld[0]).rt_rtx)))
1224 return 1;
1225 return reg_referenced_p (x, COND_EXEC_CODE (body)(((body)->u.fld[1]).rt_rtx));
1226
1227 default:
1228 return 0;
1229 }
1230}
1231
1232/* Nonzero if register REG is set or clobbered in an insn between
1233 FROM_INSN and TO_INSN (exclusive of those two). */
1234
1235int
1236reg_set_between_p (const_rtx reg, const rtx_insn *from_insn,
1237 const rtx_insn *to_insn)
1238{
1239 const rtx_insn *insn;
1240
1241 if (from_insn == to_insn)
1242 return 0;
1243
1244 for (insn = NEXT_INSN (from_insn); insn != to_insn; insn = NEXT_INSN (insn))
1245 if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
&& reg_set_p (reg, insn))
1246 return 1;
1247 return 0;
1248}
1249
1250/* Return true if REG is set or clobbered inside INSN. */
1251
1252int
1253reg_set_p (const_rtx reg, const_rtx insn)
1254{
1255 /* After delay slot handling, call and branch insns might be in a
1256 sequence. Check all the elements there. */
1257 if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
&& GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == SEQUENCE)
1258 {
1259 for (int i = 0; i < XVECLEN (PATTERN (insn), 0)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->num_elem); ++i)
1260 if (reg_set_p (reg, XVECEXP (PATTERN (insn), 0, i)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->elem[i])))
1261 return true;
1262
1263 return false;
1264 }
1265
1266 /* We can be passed an insn or part of one. If we are passed an insn,
1267 check if a side-effect of the insn clobbers REG. */
1268 if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
1269 && (FIND_REG_INC_NOTE (insn, reg)0
1270 || (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)
1271 && ((REG_P (reg)(((enum rtx_code) (reg)->code) == REG)
1272 && REGNO (reg)(rhs_regno(reg)) < FIRST_PSEUDO_REGISTER76
1273 && (insn_callee_abi (as_a<const rtx_insn *> (insn))
1274 .clobbers_reg_p (GET_MODE (reg)((machine_mode) (reg)->mode), REGNO (reg)(rhs_regno(reg)))))
1275 || MEM_P (reg)(((enum rtx_code) (reg)->code) == MEM)
1276 || find_reg_fusage (insn, CLOBBER, reg)))))
1277 return true;
1278
1279 /* There are no REG_INC notes for SP autoinc. */
1280 if (reg == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]) && INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
)
1281 {
1282 subrtx_var_iterator::array_type array;
1283 FOR_EACH_SUBRTX_VAR (iter, array, PATTERN (insn), NONCONST)for (subrtx_var_iterator iter (array, PATTERN (insn), rtx_nonconst_subrtx_bounds
); !iter.at_end (); iter.next ())
1284 {
1285 rtx mem = *iter;
1286 if (mem
1287 && MEM_P (mem)(((enum rtx_code) (mem)->code) == MEM)
1288 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0)))(rtx_class[(int) (((enum rtx_code) ((((mem)->u.fld[0]).rt_rtx
))->code))])
== RTX_AUTOINC)
1289 {
1290 if (XEXP (XEXP (mem, 0), 0)((((((mem)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx) == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]))
1291 return true;
1292 iter.skip_subrtxes ();
1293 }
1294 }
1295 }
1296
1297 return set_of (reg, insn) != NULL_RTX(rtx) 0;
1298}
1299
1300/* Similar to reg_set_between_p, but check all registers in X. Return 0
1301 only if none of them are modified between START and END. Return 1 if
1302 X contains a MEM; this routine does use memory aliasing. */
1303
1304int
1305modified_between_p (const_rtx x, const rtx_insn *start, const rtx_insn *end)
1306{
1307 const enum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code);
1308 const char *fmt;
1309 int i, j;
1310 rtx_insn *insn;
1311
1312 if (start == end)
1313 return 0;
1314
1315 switch (code)
1316 {
1317 CASE_CONST_ANYcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case
CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR
:
1318 case CONST:
1319 case SYMBOL_REF:
1320 case LABEL_REF:
1321 return 0;
1322
1323 case PC:
1324 return 1;
1325
1326 case MEM:
1327 if (modified_between_p (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), start, end))
1328 return 1;
1329 if (MEM_READONLY_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_READONLY_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 1329, __FUNCTION__); _rtx; })->unchanging)
)
1330 return 0;
1331 for (insn = NEXT_INSN (start); insn != end; insn = NEXT_INSN (insn))
1332 if (memory_modified_in_insn_p (x, insn))
1333 return 1;
1334 return 0;
1335
1336 case REG:
1337 return reg_set_between_p (x, start, end);
1338
1339 default:
1340 break;
1341 }
1342
1343 fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
1344 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
1345 {
1346 if (fmt[i] == 'e' && modified_between_p (XEXP (x, i)(((x)->u.fld[i]).rt_rtx), start, end))
1347 return 1;
1348
1349 else if (fmt[i] == 'E')
1350 for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--)
1351 if (modified_between_p (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), start, end))
1352 return 1;
1353 }
1354
1355 return 0;
1356}
1357
1358/* Similar to reg_set_p, but check all registers in X. Return 0 only if none
1359 of them are modified in INSN. Return 1 if X contains a MEM; this routine
1360 does use memory aliasing. */
1361
1362int
1363modified_in_p (const_rtx x, const_rtx insn)
1364{
1365 const enum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code);
1366 const char *fmt;
1367 int i, j;
1368
1369 switch (code)
1370 {
1371 CASE_CONST_ANYcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case
CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR
:
1372 case CONST:
1373 case SYMBOL_REF:
1374 case LABEL_REF:
1375 return 0;
1376
1377 case PC:
1378 return 1;
1379
1380 case MEM:
1381 if (modified_in_p (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), insn))
1382 return 1;
1383 if (MEM_READONLY_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_READONLY_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 1383, __FUNCTION__); _rtx; })->unchanging)
)
1384 return 0;
1385 if (memory_modified_in_insn_p (x, insn))
1386 return 1;
1387 return 0;
1388
1389 case REG:
1390 return reg_set_p (x, insn);
1391
1392 default:
1393 break;
1394 }
1395
1396 fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
1397 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
1398 {
1399 if (fmt[i] == 'e' && modified_in_p (XEXP (x, i)(((x)->u.fld[i]).rt_rtx), insn))
1400 return 1;
1401
1402 else if (fmt[i] == 'E')
1403 for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--)
1404 if (modified_in_p (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), insn))
1405 return 1;
1406 }
1407
1408 return 0;
1409}
1410
1411/* Return true if X is a SUBREG and if storing a value to X would
1412 preserve some of its SUBREG_REG. For example, on a normal 32-bit
1413 target, using a SUBREG to store to one half of a DImode REG would
1414 preserve the other half. */
1415
1416bool
1417read_modify_subreg_p (const_rtx x)
1418{
1419 if (GET_CODE (x)((enum rtx_code) (x)->code) != SUBREG)
1420 return false;
1421 poly_uint64 isize = GET_MODE_SIZE (GET_MODE (SUBREG_REG (x))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode));
1422 poly_uint64 osize = GET_MODE_SIZE (GET_MODE (x)((machine_mode) (x)->mode));
1423 poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (SUBREG_REG (x)))ix86_regmode_natural_size (((machine_mode) ((((x)->u.fld[0
]).rt_rtx))->mode))
;
1424 /* The inner and outer modes of a subreg must be ordered, so that we
1425 can tell whether they're paradoxical or partial. */
1426 gcc_checking_assert (ordered_p (isize, osize))((void)(!(ordered_p (isize, osize)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 1426, __FUNCTION__), 0 : 0))
;
1427 return (maybe_gt (isize, osize)maybe_lt (osize, isize) && maybe_gt (isize, regsize)maybe_lt (regsize, isize));
1428}
1429
1430/* Helper function for set_of. */
1431struct set_of_data
1432 {
1433 const_rtx found;
1434 const_rtx pat;
1435 };
1436
1437static void
1438set_of_1 (rtx x, const_rtx pat, void *data1)
1439{
1440 struct set_of_data *const data = (struct set_of_data *) (data1);
1441 if (rtx_equal_p (x, data->pat)
1442 || (!MEM_P (x)(((enum rtx_code) (x)->code) == MEM) && reg_overlap_mentioned_p (data->pat, x)))
1443 data->found = pat;
1444}
1445
1446/* Give an INSN, return a SET or CLOBBER expression that does modify PAT
1447 (either directly or via STRICT_LOW_PART and similar modifiers). */
1448const_rtx
1449set_of (const_rtx pat, const_rtx insn)
1450{
1451 struct set_of_data data;
1452 data.found = NULL_RTX(rtx) 0;
1453 data.pat = pat;
1454 note_pattern_stores (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
? PATTERN (insn) : insn, set_of_1, &data);
1455 return data.found;
1456}
1457
1458/* Check whether instruction pattern PAT contains a SET with the following
1459 properties:
1460
1461 - the SET is executed unconditionally; and
1462 - either:
1463 - the destination of the SET is a REG that contains REGNO; or
1464 - both:
1465 - the destination of the SET is a SUBREG of such a REG; and
1466 - writing to the subreg clobbers all of the SUBREG_REG
1467 (in other words, read_modify_subreg_p is false).
1468
1469 If PAT does have a SET like that, return the set, otherwise return null.
1470
1471 This is intended to be an alternative to single_set for passes that
1472 can handle patterns with multiple_sets. */
1473rtx
1474simple_regno_set (rtx pat, unsigned int regno)
1475{
1476 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == PARALLEL)
1477 {
1478 int last = XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem) - 1;
1479 for (int i = 0; i < last; ++i)
1480 if (rtx set = simple_regno_set (XVECEXP (pat, 0, i)(((((pat)->u.fld[0]).rt_rtvec))->elem[i]), regno))
1481 return set;
1482
1483 pat = XVECEXP (pat, 0, last)(((((pat)->u.fld[0]).rt_rtvec))->elem[last]);
1484 }
1485
1486 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == SET
1487 && covers_regno_no_parallel_p (SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx), regno))
1488 return pat;
1489
1490 return nullptr;
1491}
1492
1493/* Add all hard register in X to *PSET. */
1494void
1495find_all_hard_regs (const_rtx x, HARD_REG_SET *pset)
1496{
1497 subrtx_iterator::array_type array;
1498 FOR_EACH_SUBRTX (iter, array, x, NONCONST)for (subrtx_iterator iter (array, x, rtx_nonconst_subrtx_bounds
); !iter.at_end (); iter.next ())
1499 {
1500 const_rtx x = *iter;
1501 if (REG_P (x)(((enum rtx_code) (x)->code) == REG) && REGNO (x)(rhs_regno(x)) < FIRST_PSEUDO_REGISTER76)
1502 add_to_hard_reg_set (pset, GET_MODE (x)((machine_mode) (x)->mode), REGNO (x)(rhs_regno(x)));
1503 }
1504}
1505
1506/* This function, called through note_stores, collects sets and
1507 clobbers of hard registers in a HARD_REG_SET, which is pointed to
1508 by DATA. */
1509void
1510record_hard_reg_sets (rtx x, const_rtx pat ATTRIBUTE_UNUSED__attribute__ ((__unused__)), void *data)
1511{
1512 HARD_REG_SET *pset = (HARD_REG_SET *)data;
1513 if (REG_P (x)(((enum rtx_code) (x)->code) == REG) && HARD_REGISTER_P (x)((((rhs_regno(x))) < 76)))
1514 add_to_hard_reg_set (pset, GET_MODE (x)((machine_mode) (x)->mode), REGNO (x)(rhs_regno(x)));
1515}
1516
1517/* Examine INSN, and compute the set of hard registers written by it.
1518 Store it in *PSET. Should only be called after reload.
1519
1520 IMPLICIT is true if we should include registers that are fully-clobbered
1521 by calls. This should be used with caution, since it doesn't include
1522 partially-clobbered registers. */
1523void
1524find_all_hard_reg_sets (const rtx_insn *insn, HARD_REG_SET *pset, bool implicit)
1525{
1526 rtx link;
1527
1528 CLEAR_HARD_REG_SET (*pset);
1529 note_stores (insn, record_hard_reg_sets, pset);
1530 if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN) && implicit)
1531 *pset |= insn_callee_abi (insn).full_reg_clobbers ();
1532 for (link = REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); link; link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx))
1533 if (REG_NOTE_KIND (link)((enum reg_note) ((machine_mode) (link)->mode)) == REG_INC)
1534 record_hard_reg_sets (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), NULLnullptr, pset);
1535}
1536
1537/* Like record_hard_reg_sets, but called through note_uses. */
1538void
1539record_hard_reg_uses (rtx *px, void *data)
1540{
1541 find_all_hard_regs (*px, (HARD_REG_SET *) data);
1542}
1543
1544/* Given an INSN, return a SET expression if this insn has only a single SET.
1545 It may also have CLOBBERs, USEs, or SET whose output
1546 will not be used, which we ignore. */
1547
1548rtx
1549single_set_2 (const rtx_insn *insn, const_rtx pat)
1550{
1551 rtx set = NULLnullptr;
1552 int set_verified = 1;
1553 int i;
1554
1555 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == PARALLEL)
1556 {
1557 for (i = 0; i < XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem); i++)
1558 {
1559 rtx sub = XVECEXP (pat, 0, i)(((((pat)->u.fld[0]).rt_rtvec))->elem[i]);
1560 switch (GET_CODE (sub)((enum rtx_code) (sub)->code))
1561 {
1562 case USE:
1563 case CLOBBER:
1564 break;
1565
1566 case SET:
1567 /* We can consider insns having multiple sets, where all
1568 but one are dead as single set insns. In common case
1569 only single set is present in the pattern so we want
1570 to avoid checking for REG_UNUSED notes unless necessary.
1571
1572 When we reach set first time, we just expect this is
1573 the single set we are looking for and only when more
1574 sets are found in the insn, we check them. */
1575 if (!set_verified)
1576 {
1577 if (find_reg_note (insn, REG_UNUSED, SET_DEST (set)(((set)->u.fld[0]).rt_rtx))
1578 && !side_effects_p (set))
1579 set = NULLnullptr;
1580 else
1581 set_verified = 1;
1582 }
1583 if (!set)
1584 set = sub, set_verified = 0;
1585 else if (!find_reg_note (insn, REG_UNUSED, SET_DEST (sub)(((sub)->u.fld[0]).rt_rtx))
1586 || side_effects_p (sub))
1587 return NULL_RTX(rtx) 0;
1588 break;
1589
1590 default:
1591 return NULL_RTX(rtx) 0;
1592 }
1593 }
1594 }
1595 return set;
1596}
1597
1598/* Given an INSN, return nonzero if it has more than one SET, else return
1599 zero. */
1600
1601int
1602multiple_sets (const_rtx insn)
1603{
1604 int found;
1605 int i;
1606
1607 /* INSN must be an insn. */
1608 if (! INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
)
1609 return 0;
1610
1611 /* Only a PARALLEL can have multiple SETs. */
1612 if (GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == PARALLEL)
1613 {
1614 for (i = 0, found = 0; i < XVECLEN (PATTERN (insn), 0)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->num_elem); i++)
1615 if (GET_CODE (XVECEXP (PATTERN (insn), 0, i))((enum rtx_code) ((((((PATTERN (insn))->u.fld[0]).rt_rtvec
))->elem[i]))->code)
== SET)
1616 {
1617 /* If we have already found a SET, then return now. */
1618 if (found)
1619 return 1;
1620 else
1621 found = 1;
1622 }
1623 }
1624
1625 /* Either zero or one SET. */
1626 return 0;
1627}
1628
1629/* Return nonzero if the destination of SET equals the source
1630 and there are no side effects. */
1631
1632int
1633set_noop_p (const_rtx set)
1634{
1635 rtx src = SET_SRC (set)(((set)->u.fld[1]).rt_rtx);
1636 rtx dst = SET_DEST (set)(((set)->u.fld[0]).rt_rtx);
1637
1638 if (dst == pc_rtx && src == pc_rtx)
1639 return 1;
1640
1641 if (MEM_P (dst)(((enum rtx_code) (dst)->code) == MEM) && MEM_P (src)(((enum rtx_code) (src)->code) == MEM))
1642 return rtx_equal_p (dst, src) && !side_effects_p (dst);
1643
1644 if (GET_CODE (dst)((enum rtx_code) (dst)->code) == ZERO_EXTRACT)
1645 return rtx_equal_p (XEXP (dst, 0)(((dst)->u.fld[0]).rt_rtx), src)
1646 && !BITS_BIG_ENDIAN0 && XEXP (dst, 2)(((dst)->u.fld[2]).rt_rtx) == const0_rtx(const_int_rtx[64])
1647 && !side_effects_p (src);
1648
1649 if (GET_CODE (dst)((enum rtx_code) (dst)->code) == STRICT_LOW_PART)
1650 dst = XEXP (dst, 0)(((dst)->u.fld[0]).rt_rtx);
1651
1652 if (GET_CODE (src)((enum rtx_code) (src)->code) == SUBREG && GET_CODE (dst)((enum rtx_code) (dst)->code) == SUBREG)
1653 {
1654 if (maybe_ne (SUBREG_BYTE (src)(((src)->u.fld[1]).rt_subreg), SUBREG_BYTE (dst)(((dst)->u.fld[1]).rt_subreg)))
1655 return 0;
1656 src = SUBREG_REG (src)(((src)->u.fld[0]).rt_rtx);
1657 dst = SUBREG_REG (dst)(((dst)->u.fld[0]).rt_rtx);
1658 if (GET_MODE (src)((machine_mode) (src)->mode) != GET_MODE (dst)((machine_mode) (dst)->mode))
1659 /* It is hard to tell whether subregs refer to the same bits, so act
1660 conservatively and return 0. */
1661 return 0;
1662 }
1663
1664 /* It is a NOOP if destination overlaps with selected src vector
1665 elements. */
1666 if (GET_CODE (src)((enum rtx_code) (src)->code) == VEC_SELECT
1667 && REG_P (XEXP (src, 0))(((enum rtx_code) ((((src)->u.fld[0]).rt_rtx))->code) ==
REG)
&& REG_P (dst)(((enum rtx_code) (dst)->code) == REG)
1668 && HARD_REGISTER_P (XEXP (src, 0))((((rhs_regno((((src)->u.fld[0]).rt_rtx)))) < 76))
1669 && HARD_REGISTER_P (dst)((((rhs_regno(dst))) < 76)))
1670 {
1671 int i;
1672 rtx par = XEXP (src, 1)(((src)->u.fld[1]).rt_rtx);
1673 rtx src0 = XEXP (src, 0)(((src)->u.fld[0]).rt_rtx);
1674 poly_int64 c0;
1675 if (!poly_int_rtx_p (XVECEXP (par, 0, 0)(((((par)->u.fld[0]).rt_rtvec))->elem[0]), &c0))
1676 return 0;
1677 poly_int64 offset = GET_MODE_UNIT_SIZE (GET_MODE (src0))mode_to_unit_size (((machine_mode) (src0)->mode)) * c0;
1678
1679 for (i = 1; i < XVECLEN (par, 0)(((((par)->u.fld[0]).rt_rtvec))->num_elem); i++)
1680 {
1681 poly_int64 c0i;
1682 if (!poly_int_rtx_p (XVECEXP (par, 0, i)(((((par)->u.fld[0]).rt_rtvec))->elem[i]), &c0i)
1683 || maybe_ne (c0i, c0 + i))
1684 return 0;
1685 }
1686 return
1687 REG_CAN_CHANGE_MODE_P (REGNO (dst), GET_MODE (src0), GET_MODE (dst))(targetm.can_change_mode_class (((machine_mode) (src0)->mode
), ((machine_mode) (dst)->mode), (regclass_map[((rhs_regno
(dst)))])))
1688 && simplify_subreg_regno (REGNO (src0)(rhs_regno(src0)), GET_MODE (src0)((machine_mode) (src0)->mode),
1689 offset, GET_MODE (dst)((machine_mode) (dst)->mode)) == (int) REGNO (dst)(rhs_regno(dst));
1690 }
1691
1692 return (REG_P (src)(((enum rtx_code) (src)->code) == REG) && REG_P (dst)(((enum rtx_code) (dst)->code) == REG)
1693 && REGNO (src)(rhs_regno(src)) == REGNO (dst)(rhs_regno(dst)));
1694}
1695
1696/* Return nonzero if an insn consists only of SETs, each of which only sets a
1697 value to itself. */
1698
1699int
1700noop_move_p (const rtx_insn *insn)
1701{
1702 rtx pat = PATTERN (insn);
1703
1704 if (INSN_CODE (insn)(((insn)->u.fld[5]).rt_int) == NOOP_MOVE_INSN_CODE2147483647)
1705 return 1;
1706
1707 /* Check the code to be executed for COND_EXEC. */
1708 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == COND_EXEC)
1709 pat = COND_EXEC_CODE (pat)(((pat)->u.fld[1]).rt_rtx);
1710
1711 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == SET && set_noop_p (pat))
1712 return 1;
1713
1714 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == PARALLEL)
1715 {
1716 int i;
1717 /* If nothing but SETs of registers to themselves,
1718 this insn can also be deleted. */
1719 for (i = 0; i < XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem); i++)
1720 {
1721 rtx tem = XVECEXP (pat, 0, i)(((((pat)->u.fld[0]).rt_rtvec))->elem[i]);
1722
1723 if (GET_CODE (tem)((enum rtx_code) (tem)->code) == USE || GET_CODE (tem)((enum rtx_code) (tem)->code) == CLOBBER)
1724 continue;
1725
1726 if (GET_CODE (tem)((enum rtx_code) (tem)->code) != SET || ! set_noop_p (tem))
1727 return 0;
1728 }
1729
1730 return 1;
1731 }
1732 return 0;
1733}
1734
1735
1736/* Return nonzero if register in range [REGNO, ENDREGNO)
1737 appears either explicitly or implicitly in X
1738 other than being stored into.
1739
1740 References contained within the substructure at LOC do not count.
1741 LOC may be zero, meaning don't ignore anything. */
1742
1743bool
1744refers_to_regno_p (unsigned int regno, unsigned int endregno, const_rtx x,
1745 rtx *loc)
1746{
1747 int i;
1748 unsigned int x_regno;
1749 RTX_CODEenum rtx_code code;
1750 const char *fmt;
1751
1752 repeat:
1753 /* The contents of a REG_NONNEG note is always zero, so we must come here
1754 upon repeat in case the last REG_NOTE is a REG_NONNEG note. */
1755 if (x == 0)
1756 return false;
1757
1758 code = GET_CODE (x)((enum rtx_code) (x)->code);
1759
1760 switch (code)
1761 {
1762 case REG:
1763 x_regno = REGNO (x)(rhs_regno(x));
1764
1765 /* If we modifying the stack, frame, or argument pointer, it will
1766 clobber a virtual register. In fact, we could be more precise,
1767 but it isn't worth it. */
1768 if ((x_regno == STACK_POINTER_REGNUM7
1769 || (FRAME_POINTER_REGNUM19 != ARG_POINTER_REGNUM16
1770 && x_regno == ARG_POINTER_REGNUM16)
1771 || x_regno == FRAME_POINTER_REGNUM19)
1772 && regno >= FIRST_VIRTUAL_REGISTER(76) && regno <= LAST_VIRTUAL_REGISTER(((76)) + 5))
1773 return true;
1774
1775 return endregno > x_regno && regno < END_REGNO (x);
1776
1777 case SUBREG:
1778 /* If this is a SUBREG of a hard reg, we can see exactly which
1779 registers are being modified. Otherwise, handle normally. */
1780 if (REG_P (SUBREG_REG (x))(((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == REG
)
1781 && REGNO (SUBREG_REG (x))(rhs_regno((((x)->u.fld[0]).rt_rtx))) < FIRST_PSEUDO_REGISTER76)
1782 {
1783 unsigned int inner_regno = subreg_regno (x);
1784 unsigned int inner_endregno
1785 = inner_regno + (inner_regno < FIRST_PSEUDO_REGISTER76
1786 ? subreg_nregs (x) : 1);
1787
1788 return endregno > inner_regno && regno < inner_endregno;
1789 }
1790 break;
1791
1792 case CLOBBER:
1793 case SET:
1794 if (&SET_DEST (x)(((x)->u.fld[0]).rt_rtx) != loc
1795 /* Note setting a SUBREG counts as referring to the REG it is in for
1796 a pseudo but not for hard registers since we can
1797 treat each word individually. */
1798 && ((GET_CODE (SET_DEST (x))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == SUBREG
1799 && loc != &SUBREG_REG (SET_DEST (x))((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx)
1800 && REG_P (SUBREG_REG (SET_DEST (x)))(((enum rtx_code) (((((((x)->u.fld[0]).rt_rtx))->u.fld[
0]).rt_rtx))->code) == REG)
1801 && REGNO (SUBREG_REG (SET_DEST (x)))(rhs_regno(((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx
)))
>= FIRST_PSEUDO_REGISTER76
1802 && refers_to_regno_p (regno, endregno,
1803 SUBREG_REG (SET_DEST (x))((((((x)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx), loc))
1804 || (!REG_P (SET_DEST (x))(((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == REG
)
1805 && refers_to_regno_p (regno, endregno, SET_DEST (x)(((x)->u.fld[0]).rt_rtx), loc))))
1806 return true;
1807
1808 if (code == CLOBBER || loc == &SET_SRC (x)(((x)->u.fld[1]).rt_rtx))
1809 return false;
1810 x = SET_SRC (x)(((x)->u.fld[1]).rt_rtx);
1811 goto repeat;
1812
1813 default:
1814 break;
1815 }
1816
1817 /* X does not match, so try its subexpressions. */
1818
1819 fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
1820 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
1821 {
1822 if (fmt[i] == 'e' && loc != &XEXP (x, i)(((x)->u.fld[i]).rt_rtx))
1823 {
1824 if (i == 0)
1825 {
1826 x = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx);
1827 goto repeat;
1828 }
1829 else
1830 if (refers_to_regno_p (regno, endregno, XEXP (x, i)(((x)->u.fld[i]).rt_rtx), loc))
1831 return true;
1832 }
1833 else if (fmt[i] == 'E')
1834 {
1835 int j;
1836 for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--)
1837 if (loc != &XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j])
1838 && refers_to_regno_p (regno, endregno, XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), loc))
1839 return true;
1840 }
1841 }
1842 return false;
1843}
1844
1845/* Nonzero if modifying X will affect IN. If X is a register or a SUBREG,
1846 we check if any register number in X conflicts with the relevant register
1847 numbers. If X is a constant, return 0. If X is a MEM, return 1 iff IN
1848 contains a MEM (we don't bother checking for memory addresses that can't
1849 conflict because we expect this to be a rare case. */
1850
1851int
1852reg_overlap_mentioned_p (const_rtx x, const_rtx in)
1853{
1854 unsigned int regno, endregno;
1855
1856 /* If either argument is a constant, then modifying X cannot
1857 affect IN. Here we look at IN, we can profitably combine
1858 CONSTANT_P (x) with the switch statement below. */
1859 if (CONSTANT_P (in)((rtx_class[(int) (((enum rtx_code) (in)->code))]) == RTX_CONST_OBJ
)
)
1860 return 0;
1861
1862 recurse:
1863 switch (GET_CODE (x)((enum rtx_code) (x)->code))
1864 {
1865 case CLOBBER:
1866 case STRICT_LOW_PART:
1867 case ZERO_EXTRACT:
1868 case SIGN_EXTRACT:
1869 /* Overly conservative. */
1870 x = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx);
1871 goto recurse;
1872
1873 case SUBREG:
1874 regno = REGNO (SUBREG_REG (x))(rhs_regno((((x)->u.fld[0]).rt_rtx)));
1875 if (regno < FIRST_PSEUDO_REGISTER76)
1876 regno = subreg_regno (x);
1877 endregno = regno + (regno < FIRST_PSEUDO_REGISTER76
1878 ? subreg_nregs (x) : 1);
1879 goto do_reg;
1880
1881 case REG:
1882 regno = REGNO (x)(rhs_regno(x));
1883 endregno = END_REGNO (x);
1884 do_reg:
1885 return refers_to_regno_p (regno, endregno, in, (rtx*) 0);
1886
1887 case MEM:
1888 {
1889 const char *fmt;
1890 int i;
1891
1892 if (MEM_P (in)(((enum rtx_code) (in)->code) == MEM))
1893 return 1;
1894
1895 fmt = GET_RTX_FORMAT (GET_CODE (in))(rtx_format[(int) (((enum rtx_code) (in)->code))]);
1896 for (i = GET_RTX_LENGTH (GET_CODE (in))(rtx_length[(int) (((enum rtx_code) (in)->code))]) - 1; i >= 0; i--)
1897 if (fmt[i] == 'e')
1898 {
1899 if (reg_overlap_mentioned_p (x, XEXP (in, i)(((in)->u.fld[i]).rt_rtx)))
1900 return 1;
1901 }
1902 else if (fmt[i] == 'E')
1903 {
1904 int j;
1905 for (j = XVECLEN (in, i)(((((in)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; --j)
1906 if (reg_overlap_mentioned_p (x, XVECEXP (in, i, j)(((((in)->u.fld[i]).rt_rtvec))->elem[j])))
1907 return 1;
1908 }
1909
1910 return 0;
1911 }
1912
1913 case SCRATCH:
1914 case PC:
1915 return reg_mentioned_p (x, in);
1916
1917 case PARALLEL:
1918 {
1919 int i;
1920
1921 /* If any register in here refers to it we return true. */
1922 for (i = XVECLEN (x, 0)(((((x)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; i--)
1923 if (XEXP (XVECEXP (x, 0, i), 0)((((((((x)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld[0
]).rt_rtx)
!= 0
1924 && reg_overlap_mentioned_p (XEXP (XVECEXP (x, 0, i), 0)((((((((x)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld[0
]).rt_rtx)
, in))
1925 return 1;
1926 return 0;
1927 }
1928
1929 default:
1930 gcc_assert (CONSTANT_P (x))((void)(!(((rtx_class[(int) (((enum rtx_code) (x)->code))]
) == RTX_CONST_OBJ)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 1930, __FUNCTION__), 0 : 0))
;
1931 return 0;
1932 }
1933}
1934
1935/* Call FUN on each register or MEM that is stored into or clobbered by X.
1936 (X would be the pattern of an insn). DATA is an arbitrary pointer,
1937 ignored by note_stores, but passed to FUN.
1938
1939 FUN receives three arguments:
1940 1. the REG, MEM or PC being stored in or clobbered,
1941 2. the SET or CLOBBER rtx that does the store,
1942 3. the pointer DATA provided to note_stores.
1943
1944 If the item being stored in or clobbered is a SUBREG of a hard register,
1945 the SUBREG will be passed. */
1946
1947void
1948note_pattern_stores (const_rtx x,
1949 void (*fun) (rtx, const_rtx, void *), void *data)
1950{
1951 int i;
1952
1953 if (GET_CODE (x)((enum rtx_code) (x)->code) == COND_EXEC)
1954 x = COND_EXEC_CODE (x)(((x)->u.fld[1]).rt_rtx);
1955
1956 if (GET_CODE (x)((enum rtx_code) (x)->code) == SET || GET_CODE (x)((enum rtx_code) (x)->code) == CLOBBER)
1957 {
1958 rtx dest = SET_DEST (x)(((x)->u.fld[0]).rt_rtx);
1959
1960 while ((GET_CODE (dest)((enum rtx_code) (dest)->code) == SUBREG
1961 && (!REG_P (SUBREG_REG (dest))(((enum rtx_code) ((((dest)->u.fld[0]).rt_rtx))->code) ==
REG)
1962 || REGNO (SUBREG_REG (dest))(rhs_regno((((dest)->u.fld[0]).rt_rtx))) >= FIRST_PSEUDO_REGISTER76))
1963 || GET_CODE (dest)((enum rtx_code) (dest)->code) == ZERO_EXTRACT
1964 || GET_CODE (dest)((enum rtx_code) (dest)->code) == STRICT_LOW_PART)
1965 dest = XEXP (dest, 0)(((dest)->u.fld[0]).rt_rtx);
1966
1967 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
1968 each of whose first operand is a register. */
1969 if (GET_CODE (dest)((enum rtx_code) (dest)->code) == PARALLEL)
1970 {
1971 for (i = XVECLEN (dest, 0)(((((dest)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; i--)
1972 if (XEXP (XVECEXP (dest, 0, i), 0)((((((((dest)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld
[0]).rt_rtx)
!= 0)
1973 (*fun) (XEXP (XVECEXP (dest, 0, i), 0)((((((((dest)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld
[0]).rt_rtx)
, x, data);
1974 }
1975 else
1976 (*fun) (dest, x, data);
1977 }
1978
1979 else if (GET_CODE (x)((enum rtx_code) (x)->code) == PARALLEL)
1980 for (i = XVECLEN (x, 0)(((((x)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; i--)
1981 note_pattern_stores (XVECEXP (x, 0, i)(((((x)->u.fld[0]).rt_rtvec))->elem[i]), fun, data);
1982}
1983
1984/* Same, but for an instruction. If the instruction is a call, include
1985 any CLOBBERs in its CALL_INSN_FUNCTION_USAGE. */
1986
1987void
1988note_stores (const rtx_insn *insn,
1989 void (*fun) (rtx, const_rtx, void *), void *data)
1990{
1991 if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN))
1992 for (rtx link = CALL_INSN_FUNCTION_USAGE (insn)(((insn)->u.fld[7]).rt_rtx);
1993 link; link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx))
1994 if (GET_CODE (XEXP (link, 0))((enum rtx_code) ((((link)->u.fld[0]).rt_rtx))->code) == CLOBBER)
1995 note_pattern_stores (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx), fun, data);
1996 note_pattern_stores (PATTERN (insn), fun, data);
1997}
1998
1999/* Like notes_stores, but call FUN for each expression that is being
2000 referenced in PBODY, a pointer to the PATTERN of an insn. We only call
2001 FUN for each expression, not any interior subexpressions. FUN receives a
2002 pointer to the expression and the DATA passed to this function.
2003
2004 Note that this is not quite the same test as that done in reg_referenced_p
2005 since that considers something as being referenced if it is being
2006 partially set, while we do not. */
2007
2008void
2009note_uses (rtx *pbody, void (*fun) (rtx *, void *), void *data)
2010{
2011 rtx body = *pbody;
2012 int i;
2013
2014 switch (GET_CODE (body)((enum rtx_code) (body)->code))
2015 {
2016 case COND_EXEC:
2017 (*fun) (&COND_EXEC_TEST (body)(((body)->u.fld[0]).rt_rtx), data);
2018 note_uses (&COND_EXEC_CODE (body)(((body)->u.fld[1]).rt_rtx), fun, data);
2019 return;
2020
2021 case PARALLEL:
2022 for (i = XVECLEN (body, 0)(((((body)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; i--)
2023 note_uses (&XVECEXP (body, 0, i)(((((body)->u.fld[0]).rt_rtvec))->elem[i]), fun, data);
2024 return;
2025
2026 case SEQUENCE:
2027 for (i = XVECLEN (body, 0)(((((body)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; i--)
2028 note_uses (&PATTERN (XVECEXP (body, 0, i)(((((body)->u.fld[0]).rt_rtvec))->elem[i])), fun, data);
2029 return;
2030
2031 case USE:
2032 (*fun) (&XEXP (body, 0)(((body)->u.fld[0]).rt_rtx), data);
2033 return;
2034
2035 case ASM_OPERANDS:
2036 for (i = ASM_OPERANDS_INPUT_LENGTH (body)(((((body)->u.fld[3]).rt_rtvec))->num_elem) - 1; i >= 0; i--)
2037 (*fun) (&ASM_OPERANDS_INPUT (body, i)(((((body)->u.fld[3]).rt_rtvec))->elem[i]), data);
2038 return;
2039
2040 case TRAP_IF:
2041 (*fun) (&TRAP_CONDITION (body)(((body)->u.fld[0]).rt_rtx), data);
2042 return;
2043
2044 case PREFETCH:
2045 (*fun) (&XEXP (body, 0)(((body)->u.fld[0]).rt_rtx), data);
2046 return;
2047
2048 case UNSPEC:
2049 case UNSPEC_VOLATILE:
2050 for (i = XVECLEN (body, 0)(((((body)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; i--)
2051 (*fun) (&XVECEXP (body, 0, i)(((((body)->u.fld[0]).rt_rtvec))->elem[i]), data);
2052 return;
2053
2054 case CLOBBER:
2055 if (MEM_P (XEXP (body, 0))(((enum rtx_code) ((((body)->u.fld[0]).rt_rtx))->code) ==
MEM)
)
2056 (*fun) (&XEXP (XEXP (body, 0), 0)((((((body)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx), data);
2057 return;
2058
2059 case SET:
2060 {
2061 rtx dest = SET_DEST (body)(((body)->u.fld[0]).rt_rtx);
2062
2063 /* For sets we replace everything in source plus registers in memory
2064 expression in store and operands of a ZERO_EXTRACT. */
2065 (*fun) (&SET_SRC (body)(((body)->u.fld[1]).rt_rtx), data);
2066
2067 if (GET_CODE (dest)((enum rtx_code) (dest)->code) == ZERO_EXTRACT)
2068 {
2069 (*fun) (&XEXP (dest, 1)(((dest)->u.fld[1]).rt_rtx), data);
2070 (*fun) (&XEXP (dest, 2)(((dest)->u.fld[2]).rt_rtx), data);
2071 }
2072
2073 while (GET_CODE (dest)((enum rtx_code) (dest)->code) == SUBREG || GET_CODE (dest)((enum rtx_code) (dest)->code) == STRICT_LOW_PART)
2074 dest = XEXP (dest, 0)(((dest)->u.fld[0]).rt_rtx);
2075
2076 if (MEM_P (dest)(((enum rtx_code) (dest)->code) == MEM))
2077 (*fun) (&XEXP (dest, 0)(((dest)->u.fld[0]).rt_rtx), data);
2078 }
2079 return;
2080
2081 default:
2082 /* All the other possibilities never store. */
2083 (*fun) (pbody, data);
2084 return;
2085 }
2086}
2087
2088/* Try to add a description of REG X to this object, stopping once
2089 the REF_END limit has been reached. FLAGS is a bitmask of
2090 rtx_obj_reference flags that describe the context. */
2091
2092void
2093rtx_properties::try_to_add_reg (const_rtx x, unsigned int flags)
2094{
2095 if (REG_NREGS (x)((&(x)->u.reg)->nregs) != 1)
2096 flags |= rtx_obj_flags::IS_MULTIREG;
2097 machine_mode mode = GET_MODE (x)((machine_mode) (x)->mode);
2098 unsigned int start_regno = REGNO (x)(rhs_regno(x));
2099 unsigned int end_regno = END_REGNO (x);
2100 for (unsigned int regno = start_regno; regno < end_regno; ++regno)
2101 if (ref_iter != ref_end)
2102 *ref_iter++ = rtx_obj_reference (regno, flags, mode,
2103 regno - start_regno);
2104}
2105
2106/* Add a description of destination X to this object. FLAGS is a bitmask
2107 of rtx_obj_reference flags that describe the context.
2108
2109 This routine accepts all rtxes that can legitimately appear in a
2110 SET_DEST. */
2111
2112void
2113rtx_properties::try_to_add_dest (const_rtx x, unsigned int flags)
2114{
2115 /* If we have a PARALLEL, SET_DEST is a list of EXPR_LIST expressions,
2116 each of whose first operand is a register. */
2117 if (UNLIKELY (GET_CODE (x) == PARALLEL)(__builtin_expect ((((enum rtx_code) (x)->code) == PARALLEL
), 0))
)
2118 {
2119 for (int i = XVECLEN (x, 0)(((((x)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; --i)
2120 if (rtx dest = XEXP (XVECEXP (x, 0, i), 0)((((((((x)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld[0
]).rt_rtx)
)
2121 try_to_add_dest (dest, flags);
2122 return;
2123 }
2124
2125 unsigned int base_flags = flags & rtx_obj_flags::STICKY_FLAGS;
2126 flags |= rtx_obj_flags::IS_WRITE;
2127 for (;;)
2128 if (GET_CODE (x)((enum rtx_code) (x)->code) == ZERO_EXTRACT)
2129 {
2130 try_to_add_src (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), base_flags);
2131 try_to_add_src (XEXP (x, 2)(((x)->u.fld[2]).rt_rtx), base_flags);
2132 flags |= rtx_obj_flags::IS_READ;
2133 x = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx);
2134 }
2135 else if (GET_CODE (x)((enum rtx_code) (x)->code) == STRICT_LOW_PART)
2136 {
2137 flags |= rtx_obj_flags::IS_READ;
2138 x = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx);
2139 }
2140 else if (GET_CODE (x)((enum rtx_code) (x)->code) == SUBREG)
2141 {
2142 flags |= rtx_obj_flags::IN_SUBREG;
2143 if (read_modify_subreg_p (x))
2144 flags |= rtx_obj_flags::IS_READ;
2145 x = SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx);
2146 }
2147 else
2148 break;
2149
2150 if (MEM_P (x)(((enum rtx_code) (x)->code) == MEM))
2151 {
2152 if (ref_iter != ref_end)
2153 *ref_iter++ = rtx_obj_reference (MEM_REGNO, flags, GET_MODE (x)((machine_mode) (x)->mode));
2154
2155 unsigned int addr_flags = base_flags | rtx_obj_flags::IN_MEM_STORE;
2156 if (flags & rtx_obj_flags::IS_READ)
2157 addr_flags |= rtx_obj_flags::IN_MEM_LOAD;
2158 try_to_add_src (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), addr_flags);
2159 return;
2160 }
2161
2162 if (LIKELY (REG_P (x))(__builtin_expect (((((enum rtx_code) (x)->code) == REG)),
1))
)
2163 {
2164 /* We want to keep sp alive everywhere - by making all
2165 writes to sp also use sp. */
2166 if (REGNO (x)(rhs_regno(x)) == STACK_POINTER_REGNUM7)
2167 flags |= rtx_obj_flags::IS_READ;
2168 try_to_add_reg (x, flags);
2169 return;
2170 }
2171}
2172
2173/* Try to add a description of source X to this object, stopping once
2174 the REF_END limit has been reached. FLAGS is a bitmask of
2175 rtx_obj_reference flags that describe the context.
2176
2177 This routine accepts all rtxes that can legitimately appear in a SET_SRC. */
2178
2179void
2180rtx_properties::try_to_add_src (const_rtx x, unsigned int flags)
2181{
2182 unsigned int base_flags = flags & rtx_obj_flags::STICKY_FLAGS;
2183 subrtx_iterator::array_type array;
2184 FOR_EACH_SUBRTX (iter, array, x, NONCONST)for (subrtx_iterator iter (array, x, rtx_nonconst_subrtx_bounds
); !iter.at_end (); iter.next ())
2185 {
2186 const_rtx x = *iter;
2187 rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code);
2188 if (code == REG)
2189 try_to_add_reg (x, flags | rtx_obj_flags::IS_READ);
2190 else if (code == MEM)
2191 {
2192 if (MEM_VOLATILE_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM && ((enum rtx_code
) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code
) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2192, __FUNCTION__); _rtx; })->volatil)
)
2193 has_volatile_refs = true;
2194
2195 if (!MEM_READONLY_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_READONLY_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2195, __FUNCTION__); _rtx; })->unchanging)
&& ref_iter != ref_end)
2196 {
2197 auto mem_flags = flags | rtx_obj_flags::IS_READ;
2198 *ref_iter++ = rtx_obj_reference (MEM_REGNO, mem_flags,
2199 GET_MODE (x)((machine_mode) (x)->mode));
2200 }
2201
2202 try_to_add_src (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx),
2203 base_flags | rtx_obj_flags::IN_MEM_LOAD);
2204 iter.skip_subrtxes ();
2205 }
2206 else if (code == SUBREG)
2207 {
2208 try_to_add_src (SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx), flags | rtx_obj_flags::IN_SUBREG);
2209 iter.skip_subrtxes ();
2210 }
2211 else if (code == UNSPEC_VOLATILE)
2212 has_volatile_refs = true;
2213 else if (code == ASM_INPUT || code == ASM_OPERANDS)
2214 {
2215 has_asm = true;
2216 if (MEM_VOLATILE_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM && ((enum rtx_code
) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code
) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2216, __FUNCTION__); _rtx; })->volatil)
)
2217 has_volatile_refs = true;
2218 }
2219 else if (code == PRE_INC
2220 || code == PRE_DEC
2221 || code == POST_INC
2222 || code == POST_DEC
2223 || code == PRE_MODIFY
2224 || code == POST_MODIFY)
2225 {
2226 has_pre_post_modify = true;
2227
2228 unsigned int addr_flags = (base_flags
2229 | rtx_obj_flags::IS_PRE_POST_MODIFY
2230 | rtx_obj_flags::IS_READ);
2231 try_to_add_dest (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), addr_flags);
2232 if (code == PRE_MODIFY || code == POST_MODIFY)
2233 iter.substitute (XEXP (XEXP (x, 1), 1)((((((x)->u.fld[1]).rt_rtx))->u.fld[1]).rt_rtx));
2234 else
2235 iter.skip_subrtxes ();
2236 }
2237 else if (code == CALL)
2238 has_call = true;
2239 }
2240}
2241
2242/* Try to add a description of instruction pattern PAT to this object,
2243 stopping once the REF_END limit has been reached. */
2244
2245void
2246rtx_properties::try_to_add_pattern (const_rtx pat)
2247{
2248 switch (GET_CODE (pat)((enum rtx_code) (pat)->code))
2249 {
2250 case COND_EXEC:
2251 try_to_add_src (COND_EXEC_TEST (pat)(((pat)->u.fld[0]).rt_rtx));
2252 try_to_add_pattern (COND_EXEC_CODE (pat)(((pat)->u.fld[1]).rt_rtx));
2253 break;
2254
2255 case PARALLEL:
2256 {
2257 int last = XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem) - 1;
2258 for (int i = 0; i < last; ++i)
2259 try_to_add_pattern (XVECEXP (pat, 0, i)(((((pat)->u.fld[0]).rt_rtvec))->elem[i]));
2260 try_to_add_pattern (XVECEXP (pat, 0, last)(((((pat)->u.fld[0]).rt_rtvec))->elem[last]));
2261 break;
2262 }
2263
2264 case ASM_OPERANDS:
2265 for (int i = 0, len = ASM_OPERANDS_INPUT_LENGTH (pat)(((((pat)->u.fld[3]).rt_rtvec))->num_elem); i < len; ++i)
2266 try_to_add_src (ASM_OPERANDS_INPUT (pat, i)(((((pat)->u.fld[3]).rt_rtvec))->elem[i]));
2267 break;
2268
2269 case CLOBBER:
2270 try_to_add_dest (XEXP (pat, 0)(((pat)->u.fld[0]).rt_rtx), rtx_obj_flags::IS_CLOBBER);
2271 break;
2272
2273 case SET:
2274 try_to_add_dest (SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx));
2275 try_to_add_src (SET_SRC (pat)(((pat)->u.fld[1]).rt_rtx));
2276 break;
2277
2278 default:
2279 /* All the other possibilities never store and can use a normal
2280 rtx walk. This includes:
2281
2282 - USE
2283 - TRAP_IF
2284 - PREFETCH
2285 - UNSPEC
2286 - UNSPEC_VOLATILE. */
2287 try_to_add_src (pat);
2288 break;
2289 }
2290}
2291
2292/* Try to add a description of INSN to this object, stopping once
2293 the REF_END limit has been reached. INCLUDE_NOTES is true if the
2294 description should include REG_EQUAL and REG_EQUIV notes; all such
2295 references will then be marked with rtx_obj_flags::IN_NOTE.
2296
2297 For calls, this description includes all accesses in
2298 CALL_INSN_FUNCTION_USAGE. It also include all implicit accesses
2299 to global registers by the target function. However, it does not
2300 include clobbers performed by the target function; callers that want
2301 this information should instead use the function_abi interface. */
2302
2303void
2304rtx_properties::try_to_add_insn (const rtx_insn *insn, bool include_notes)
2305{
2306 if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN))
2307 {
2308 /* Non-const functions can read from global registers. Impure
2309 functions can also set them.
2310
2311 Adding the global registers first removes a situation in which
2312 a fixed-form clobber of register R could come before a real set
2313 of register R. */
2314 if (!hard_reg_set_empty_p (global_reg_set)
2315 && !RTL_CONST_CALL_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if
(((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("RTL_CONST_CALL_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2315, __FUNCTION__); _rtx; })->unchanging)
)
2316 {
2317 unsigned int flags = rtx_obj_flags::IS_READ;
2318 if (!RTL_PURE_CALL_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if
(((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("RTL_PURE_CALL_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2318, __FUNCTION__); _rtx; })->return_val)
)
2319 flags |= rtx_obj_flags::IS_WRITE;
2320 for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER76; ++regno)
2321 /* As a special case, the stack pointer is invariant across calls
2322 even if it has been marked global; see the corresponding
2323 handling in df_get_call_refs. */
2324 if (regno != STACK_POINTER_REGNUM7
2325 && global_regs[regno]
2326 && ref_iter != ref_end)
2327 *ref_iter++ = rtx_obj_reference (regno, flags,
2328 reg_raw_mode(this_target_regs->x_reg_raw_mode)[regno], 0);
2329 }
2330 /* Untyped calls implicitly set all function value registers.
2331 Again, we add them first in case the main pattern contains
2332 a fixed-form clobber. */
2333 if (find_reg_note (insn, REG_UNTYPED_CALL, NULL_RTX(rtx) 0))
2334 for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER76; ++regno)
2335 if (targetm.calls.function_value_regno_p (regno)
2336 && ref_iter != ref_end)
2337 *ref_iter++ = rtx_obj_reference (regno, rtx_obj_flags::IS_WRITE,
2338 reg_raw_mode(this_target_regs->x_reg_raw_mode)[regno], 0);
2339 if (ref_iter != ref_end && !RTL_CONST_CALL_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if
(((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("RTL_CONST_CALL_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2339, __FUNCTION__); _rtx; })->unchanging)
)
2340 {
2341 auto mem_flags = rtx_obj_flags::IS_READ;
2342 if (!RTL_PURE_CALL_P (insn)(__extension__ ({ __typeof ((insn)) const _rtx = ((insn)); if
(((enum rtx_code) (_rtx)->code) != CALL_INSN) rtl_check_failed_flag
("RTL_PURE_CALL_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2342, __FUNCTION__); _rtx; })->return_val)
)
2343 mem_flags |= rtx_obj_flags::IS_WRITE;
2344 *ref_iter++ = rtx_obj_reference (MEM_REGNO, mem_flags, BLKmode((void) 0, E_BLKmode));
2345 }
2346 try_to_add_pattern (PATTERN (insn));
2347 for (rtx link = CALL_INSN_FUNCTION_USAGE (insn)(((insn)->u.fld[7]).rt_rtx); link;
2348 link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx))
2349 {
2350 rtx x = XEXP (link, 0)(((link)->u.fld[0]).rt_rtx);
2351 if (GET_CODE (x)((enum rtx_code) (x)->code) == CLOBBER)
2352 try_to_add_dest (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), rtx_obj_flags::IS_CLOBBER);
2353 else if (GET_CODE (x)((enum rtx_code) (x)->code) == USE)
2354 try_to_add_src (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx));
2355 }
2356 }
2357 else
2358 try_to_add_pattern (PATTERN (insn));
2359
2360 if (include_notes)
2361 for (rtx note = REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); note; note = XEXP (note, 1)(((note)->u.fld[1]).rt_rtx))
2362 if (REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)) == REG_EQUAL
2363 || REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)) == REG_EQUIV)
2364 try_to_add_note (XEXP (note, 0)(((note)->u.fld[0]).rt_rtx));
2365}
2366
2367/* Grow the storage by a bit while keeping the contents of the first
2368 START elements. */
2369
2370void
2371vec_rtx_properties_base::grow (ptrdiff_t start)
2372{
2373 /* The same heuristic that vec uses. */
2374 ptrdiff_t new_elems = (ref_end - ref_begin) * 3 / 2;
2375 if (ref_begin == m_storage)
2376 {
2377 ref_begin = XNEWVEC (rtx_obj_reference, new_elems)((rtx_obj_reference *) xmalloc (sizeof (rtx_obj_reference) * (
new_elems)))
;
2378 if (start)
2379 memcpy (ref_begin, m_storage, start * sizeof (rtx_obj_reference));
2380 }
2381 else
2382 ref_begin = reinterpret_cast<rtx_obj_reference *>
2383 (xrealloc (ref_begin, new_elems * sizeof (rtx_obj_reference)));
2384 ref_iter = ref_begin + start;
2385 ref_end = ref_begin + new_elems;
2386}
2387
2388/* Return nonzero if X's old contents don't survive after INSN.
2389 This will be true if X is a register and X dies in INSN or because
2390 INSN entirely sets X.
2391
2392 "Entirely set" means set directly and not through a SUBREG, or
2393 ZERO_EXTRACT, so no trace of the old contents remains.
2394 Likewise, REG_INC does not count.
2395
2396 REG may be a hard or pseudo reg. Renumbering is not taken into account,
2397 but for this use that makes no difference, since regs don't overlap
2398 during their lifetimes. Therefore, this function may be used
2399 at any time after deaths have been computed.
2400
2401 If REG is a hard reg that occupies multiple machine registers, this
2402 function will only return 1 if each of those registers will be replaced
2403 by INSN. */
2404
2405int
2406dead_or_set_p (const rtx_insn *insn, const_rtx x)
2407{
2408 unsigned int regno, end_regno;
2409 unsigned int i;
2410
2411 gcc_assert (REG_P (x))((void)(!((((enum rtx_code) (x)->code) == REG)) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2411, __FUNCTION__), 0 : 0))
;
2412
2413 regno = REGNO (x)(rhs_regno(x));
2414 end_regno = END_REGNO (x);
2415 for (i = regno; i < end_regno; i++)
2416 if (! dead_or_set_regno_p (insn, i))
2417 return 0;
2418
2419 return 1;
2420}
2421
2422/* Return TRUE iff DEST is a register or subreg of a register, is a
2423 complete rather than read-modify-write destination, and contains
2424 register TEST_REGNO. */
2425
2426static bool
2427covers_regno_no_parallel_p (const_rtx dest, unsigned int test_regno)
2428{
2429 unsigned int regno, endregno;
2430
2431 if (GET_CODE (dest)((enum rtx_code) (dest)->code) == SUBREG && !read_modify_subreg_p (dest))
2432 dest = SUBREG_REG (dest)(((dest)->u.fld[0]).rt_rtx);
2433
2434 if (!REG_P (dest)(((enum rtx_code) (dest)->code) == REG))
2435 return false;
2436
2437 regno = REGNO (dest)(rhs_regno(dest));
2438 endregno = END_REGNO (dest);
2439 return (test_regno >= regno && test_regno < endregno);
2440}
2441
2442/* Like covers_regno_no_parallel_p, but also handles PARALLELs where
2443 any member matches the covers_regno_no_parallel_p criteria. */
2444
2445static bool
2446covers_regno_p (const_rtx dest, unsigned int test_regno)
2447{
2448 if (GET_CODE (dest)((enum rtx_code) (dest)->code) == PARALLEL)
2449 {
2450 /* Some targets place small structures in registers for return
2451 values of functions, and those registers are wrapped in
2452 PARALLELs that we may see as the destination of a SET. */
2453 int i;
2454
2455 for (i = XVECLEN (dest, 0)(((((dest)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; i--)
2456 {
2457 rtx inner = XEXP (XVECEXP (dest, 0, i), 0)((((((((dest)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld
[0]).rt_rtx)
;
2458 if (inner != NULL_RTX(rtx) 0
2459 && covers_regno_no_parallel_p (inner, test_regno))
2460 return true;
2461 }
2462
2463 return false;
2464 }
2465 else
2466 return covers_regno_no_parallel_p (dest, test_regno);
2467}
2468
2469/* Utility function for dead_or_set_p to check an individual register. */
2470
2471int
2472dead_or_set_regno_p (const rtx_insn *insn, unsigned int test_regno)
2473{
2474 const_rtx pattern;
2475
2476 /* See if there is a death note for something that includes TEST_REGNO. */
2477 if (find_regno_note (insn, REG_DEAD, test_regno))
2478 return 1;
2479
2480 if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)
2481 && find_regno_fusage (insn, CLOBBER, test_regno))
2482 return 1;
2483
2484 pattern = PATTERN (insn);
2485
2486 /* If a COND_EXEC is not executed, the value survives. */
2487 if (GET_CODE (pattern)((enum rtx_code) (pattern)->code) == COND_EXEC)
2488 return 0;
2489
2490 if (GET_CODE (pattern)((enum rtx_code) (pattern)->code) == SET || GET_CODE (pattern)((enum rtx_code) (pattern)->code) == CLOBBER)
2491 return covers_regno_p (SET_DEST (pattern)(((pattern)->u.fld[0]).rt_rtx), test_regno);
2492 else if (GET_CODE (pattern)((enum rtx_code) (pattern)->code) == PARALLEL)
2493 {
2494 int i;
2495
2496 for (i = XVECLEN (pattern, 0)(((((pattern)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; i--)
2497 {
2498 rtx body = XVECEXP (pattern, 0, i)(((((pattern)->u.fld[0]).rt_rtvec))->elem[i]);
2499
2500 if (GET_CODE (body)((enum rtx_code) (body)->code) == COND_EXEC)
2501 body = COND_EXEC_CODE (body)(((body)->u.fld[1]).rt_rtx);
2502
2503 if ((GET_CODE (body)((enum rtx_code) (body)->code) == SET || GET_CODE (body)((enum rtx_code) (body)->code) == CLOBBER)
2504 && covers_regno_p (SET_DEST (body)(((body)->u.fld[0]).rt_rtx), test_regno))
2505 return 1;
2506 }
2507 }
2508
2509 return 0;
2510}
2511
2512/* Return the reg-note of kind KIND in insn INSN, if there is one.
2513 If DATUM is nonzero, look for one whose datum is DATUM. */
2514
2515rtx
2516find_reg_note (const_rtx insn, enum reg_note kind, const_rtx datum)
2517{
2518 rtx link;
2519
2520 gcc_checking_assert (insn)((void)(!(insn) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2520, __FUNCTION__), 0 : 0))
;
2521
2522 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2523 if (! INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
)
2524 return 0;
2525 if (datum == 0)
2526 {
2527 for (link = REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); link; link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx))
2528 if (REG_NOTE_KIND (link)((enum reg_note) ((machine_mode) (link)->mode)) == kind)
2529 return link;
2530 return 0;
2531 }
2532
2533 for (link = REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); link; link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx))
2534 if (REG_NOTE_KIND (link)((enum reg_note) ((machine_mode) (link)->mode)) == kind && datum == XEXP (link, 0)(((link)->u.fld[0]).rt_rtx))
2535 return link;
2536 return 0;
2537}
2538
2539/* Return the reg-note of kind KIND in insn INSN which applies to register
2540 number REGNO, if any. Return 0 if there is no such reg-note. Note that
2541 the REGNO of this NOTE need not be REGNO if REGNO is a hard register;
2542 it might be the case that the note overlaps REGNO. */
2543
2544rtx
2545find_regno_note (const_rtx insn, enum reg_note kind, unsigned int regno)
2546{
2547 rtx link;
2548
2549 /* Ignore anything that is not an INSN, JUMP_INSN or CALL_INSN. */
2550 if (! INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
)
2551 return 0;
2552
2553 for (link = REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); link; link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx))
2554 if (REG_NOTE_KIND (link)((enum reg_note) ((machine_mode) (link)->mode)) == kind
2555 /* Verify that it is a register, so that scratch and MEM won't cause a
2556 problem here. */
2557 && REG_P (XEXP (link, 0))(((enum rtx_code) ((((link)->u.fld[0]).rt_rtx))->code) ==
REG)
2558 && REGNO (XEXP (link, 0))(rhs_regno((((link)->u.fld[0]).rt_rtx))) <= regno
2559 && END_REGNO (XEXP (link, 0)(((link)->u.fld[0]).rt_rtx)) > regno)
2560 return link;
2561 return 0;
2562}
2563
2564/* Return a REG_EQUIV or REG_EQUAL note if insn has only a single set and
2565 has such a note. */
2566
2567rtx
2568find_reg_equal_equiv_note (const_rtx insn)
2569{
2570 rtx link;
2571
2572 if (!INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
)
2573 return 0;
2574
2575 for (link = REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); link; link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx))
2576 if (REG_NOTE_KIND (link)((enum reg_note) ((machine_mode) (link)->mode)) == REG_EQUAL
2577 || REG_NOTE_KIND (link)((enum reg_note) ((machine_mode) (link)->mode)) == REG_EQUIV)
2578 {
2579 /* FIXME: We should never have REG_EQUAL/REG_EQUIV notes on
2580 insns that have multiple sets. Checking single_set to
2581 make sure of this is not the proper check, as explained
2582 in the comment in set_unique_reg_note.
2583
2584 This should be changed into an assert. */
2585 if (GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == PARALLEL && multiple_sets (insn))
2586 return 0;
2587 return link;
2588 }
2589 return NULLnullptr;
2590}
2591
2592/* Check whether INSN is a single_set whose source is known to be
2593 equivalent to a constant. Return that constant if so, otherwise
2594 return null. */
2595
2596rtx
2597find_constant_src (const rtx_insn *insn)
2598{
2599 rtx note, set, x;
2600
2601 set = single_set (insn);
2602 if (set)
2603 {
2604 x = avoid_constant_pool_reference (SET_SRC (set)(((set)->u.fld[1]).rt_rtx));
2605 if (CONSTANT_P (x)((rtx_class[(int) (((enum rtx_code) (x)->code))]) == RTX_CONST_OBJ
)
)
2606 return x;
2607 }
2608
2609 note = find_reg_equal_equiv_note (insn);
2610 if (note && CONSTANT_P (XEXP (note, 0))((rtx_class[(int) (((enum rtx_code) ((((note)->u.fld[0]).rt_rtx
))->code))]) == RTX_CONST_OBJ)
)
2611 return XEXP (note, 0)(((note)->u.fld[0]).rt_rtx);
2612
2613 return NULL_RTX(rtx) 0;
2614}
2615
2616/* Return true if DATUM, or any overlap of DATUM, of kind CODE is found
2617 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2618
2619int
2620find_reg_fusage (const_rtx insn, enum rtx_code code, const_rtx datum)
2621{
2622 /* If it's not a CALL_INSN, it can't possibly have a
2623 CALL_INSN_FUNCTION_USAGE field, so don't bother checking. */
2624 if (!CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN))
2625 return 0;
2626
2627 gcc_assert (datum)((void)(!(datum) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2627, __FUNCTION__), 0 : 0))
;
2628
2629 if (!REG_P (datum)(((enum rtx_code) (datum)->code) == REG))
2630 {
2631 rtx link;
2632
2633 for (link = CALL_INSN_FUNCTION_USAGE (insn)(((insn)->u.fld[7]).rt_rtx);
2634 link;
2635 link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx))
2636 if (GET_CODE (XEXP (link, 0))((enum rtx_code) ((((link)->u.fld[0]).rt_rtx))->code) == code
2637 && rtx_equal_p (datum, XEXP (XEXP (link, 0), 0)((((((link)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx)))
2638 return 1;
2639 }
2640 else
2641 {
2642 unsigned int regno = REGNO (datum)(rhs_regno(datum));
2643
2644 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2645 to pseudo registers, so don't bother checking. */
2646
2647 if (regno < FIRST_PSEUDO_REGISTER76)
2648 {
2649 unsigned int end_regno = END_REGNO (datum);
2650 unsigned int i;
2651
2652 for (i = regno; i < end_regno; i++)
2653 if (find_regno_fusage (insn, code, i))
2654 return 1;
2655 }
2656 }
2657
2658 return 0;
2659}
2660
2661/* Return true if REGNO, or any overlap of REGNO, of kind CODE is found
2662 in the CALL_INSN_FUNCTION_USAGE information of INSN. */
2663
2664int
2665find_regno_fusage (const_rtx insn, enum rtx_code code, unsigned int regno)
2666{
2667 rtx link;
2668
2669 /* CALL_INSN_FUNCTION_USAGE information cannot contain references
2670 to pseudo registers, so don't bother checking. */
2671
2672 if (regno >= FIRST_PSEUDO_REGISTER76
2673 || !CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN) )
2674 return 0;
2675
2676 for (link = CALL_INSN_FUNCTION_USAGE (insn)(((insn)->u.fld[7]).rt_rtx); link; link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx))
2677 {
2678 rtx op, reg;
2679
2680 if (GET_CODE (op = XEXP (link, 0))((enum rtx_code) (op = (((link)->u.fld[0]).rt_rtx))->code
)
== code
2681 && REG_P (reg = XEXP (op, 0))(((enum rtx_code) (reg = (((op)->u.fld[0]).rt_rtx))->code
) == REG)
2682 && REGNO (reg)(rhs_regno(reg)) <= regno
2683 && END_REGNO (reg) > regno)
2684 return 1;
2685 }
2686
2687 return 0;
2688}
2689
2690
2691/* Return true if KIND is an integer REG_NOTE. */
2692
2693static bool
2694int_reg_note_p (enum reg_note kind)
2695{
2696 return kind == REG_BR_PROB;
2697}
2698
2699/* Allocate a register note with kind KIND and datum DATUM. LIST is
2700 stored as the pointer to the next register note. */
2701
2702rtx
2703alloc_reg_note (enum reg_note kind, rtx datum, rtx list)
2704{
2705 rtx note;
2706
2707 gcc_checking_assert (!int_reg_note_p (kind))((void)(!(!int_reg_note_p (kind)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2707, __FUNCTION__), 0 : 0))
;
2708 switch (kind)
2709 {
2710 case REG_LABEL_TARGET:
2711 case REG_LABEL_OPERAND:
2712 case REG_TM:
2713 /* These types of register notes use an INSN_LIST rather than an
2714 EXPR_LIST, so that copying is done right and dumps look
2715 better. */
2716 note = alloc_INSN_LIST (datum, list);
2717 PUT_REG_NOTE_KIND (note, kind)((note)->mode = ((machine_mode) (kind)));
2718 break;
2719
2720 default:
2721 note = alloc_EXPR_LIST (kind, datum, list);
2722 break;
2723 }
2724
2725 return note;
2726}
2727
2728/* Add register note with kind KIND and datum DATUM to INSN. */
2729
2730void
2731add_reg_note (rtx insn, enum reg_note kind, rtx datum)
2732{
2733 REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx) = alloc_reg_note (kind, datum, REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx));
2734}
2735
2736/* Add an integer register note with kind KIND and datum DATUM to INSN. */
2737
2738void
2739add_int_reg_note (rtx_insn *insn, enum reg_note kind, int datum)
2740{
2741 gcc_checking_assert (int_reg_note_p (kind))((void)(!(int_reg_note_p (kind)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2741, __FUNCTION__), 0 : 0))
;
2742 REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx) = gen_rtx_INT_LIST ((machine_mode) kind,gen_rtx_fmt_ie_stat ((INT_LIST), (((machine_mode) kind)), ((datum
)), (((((insn)->u.fld[6]).rt_rtx))) )
2743 datum, REG_NOTES (insn))gen_rtx_fmt_ie_stat ((INT_LIST), (((machine_mode) kind)), ((datum
)), (((((insn)->u.fld[6]).rt_rtx))) )
;
2744}
2745
2746/* Add a REG_ARGS_SIZE note to INSN with value VALUE. */
2747
2748void
2749add_args_size_note (rtx_insn *insn, poly_int64 value)
2750{
2751 gcc_checking_assert (!find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX))((void)(!(!find_reg_note (insn, REG_ARGS_SIZE, (rtx) 0)) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2751, __FUNCTION__), 0 : 0))
;
2752 add_reg_note (insn, REG_ARGS_SIZE, gen_int_mode (value, Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode (
(scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode
::from_int) E_SImode)))
));
2753}
2754
2755/* Add a register note like NOTE to INSN. */
2756
2757void
2758add_shallow_copy_of_reg_note (rtx_insn *insn, rtx note)
2759{
2760 if (GET_CODE (note)((enum rtx_code) (note)->code) == INT_LIST)
2761 add_int_reg_note (insn, REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)), XINT (note, 0)(((note)->u.fld[0]).rt_int));
2762 else
2763 add_reg_note (insn, REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)), XEXP (note, 0)(((note)->u.fld[0]).rt_rtx));
2764}
2765
2766/* Duplicate NOTE and return the copy. */
2767rtx
2768duplicate_reg_note (rtx note)
2769{
2770 reg_note kind = REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode));
2771
2772 if (GET_CODE (note)((enum rtx_code) (note)->code) == INT_LIST)
2773 return gen_rtx_INT_LIST ((machine_mode) kind, XINT (note, 0), NULL_RTX)gen_rtx_fmt_ie_stat ((INT_LIST), (((machine_mode) kind)), (((
((note)->u.fld[0]).rt_int))), (((rtx) 0)) )
;
2774 else if (GET_CODE (note)((enum rtx_code) (note)->code) == EXPR_LIST)
2775 return alloc_reg_note (kind, copy_insn_1 (XEXP (note, 0)(((note)->u.fld[0]).rt_rtx)), NULL_RTX(rtx) 0);
2776 else
2777 return alloc_reg_note (kind, XEXP (note, 0)(((note)->u.fld[0]).rt_rtx), NULL_RTX(rtx) 0);
2778}
2779
2780/* Remove register note NOTE from the REG_NOTES of INSN. */
2781
2782void
2783remove_note (rtx_insn *insn, const_rtx note)
2784{
2785 rtx link;
2786
2787 if (note == NULL_RTX(rtx) 0)
2788 return;
2789
2790 if (REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx) == note)
2791 REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx) = XEXP (note, 1)(((note)->u.fld[1]).rt_rtx);
2792 else
2793 for (link = REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); link; link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx))
2794 if (XEXP (link, 1)(((link)->u.fld[1]).rt_rtx) == note)
2795 {
2796 XEXP (link, 1)(((link)->u.fld[1]).rt_rtx) = XEXP (note, 1)(((note)->u.fld[1]).rt_rtx);
2797 break;
2798 }
2799
2800 switch (REG_NOTE_KIND (note)((enum reg_note) ((machine_mode) (note)->mode)))
2801 {
2802 case REG_EQUAL:
2803 case REG_EQUIV:
2804 df_notes_rescan (insn);
2805 break;
2806 default:
2807 break;
2808 }
2809}
2810
2811/* Remove REG_EQUAL and/or REG_EQUIV notes if INSN has such notes.
2812 If NO_RESCAN is false and any notes were removed, call
2813 df_notes_rescan. Return true if any note has been removed. */
2814
2815bool
2816remove_reg_equal_equiv_notes (rtx_insn *insn, bool no_rescan)
2817{
2818 rtx *loc;
2819 bool ret = false;
2820
2821 loc = &REG_NOTES (insn)(((insn)->u.fld[6]).rt_rtx);
2822 while (*loc)
2823 {
2824 enum reg_note kind = REG_NOTE_KIND (*loc)((enum reg_note) ((machine_mode) (*loc)->mode));
2825 if (kind == REG_EQUAL || kind == REG_EQUIV)
2826 {
2827 *loc = XEXP (*loc, 1)(((*loc)->u.fld[1]).rt_rtx);
2828 ret = true;
2829 }
2830 else
2831 loc = &XEXP (*loc, 1)(((*loc)->u.fld[1]).rt_rtx);
2832 }
2833 if (ret && !no_rescan)
2834 df_notes_rescan (insn);
2835 return ret;
2836}
2837
2838/* Remove all REG_EQUAL and REG_EQUIV notes referring to REGNO. */
2839
2840void
2841remove_reg_equal_equiv_notes_for_regno (unsigned int regno)
2842{
2843 df_ref eq_use;
2844
2845 if (!df)
2846 return;
2847
2848 /* This loop is a little tricky. We cannot just go down the chain because
2849 it is being modified by some actions in the loop. So we just iterate
2850 over the head. We plan to drain the list anyway. */
2851 while ((eq_use = DF_REG_EQ_USE_CHAIN (regno)(df->eq_use_regs[(regno)]->reg_chain)) != NULLnullptr)
2852 {
2853 rtx_insn *insn = DF_REF_INSN (eq_use)((eq_use)->base.insn_info->insn);
2854 rtx note = find_reg_equal_equiv_note (insn);
2855
2856 /* This assert is generally triggered when someone deletes a REG_EQUAL
2857 or REG_EQUIV note by hacking the list manually rather than calling
2858 remove_note. */
2859 gcc_assert (note)((void)(!(note) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2859, __FUNCTION__), 0 : 0))
;
2860
2861 remove_note (insn, note);
2862 }
2863}
2864
2865/* Search LISTP (an EXPR_LIST) for an entry whose first operand is NODE and
2866 return 1 if it is found. A simple equality test is used to determine if
2867 NODE matches. */
2868
2869bool
2870in_insn_list_p (const rtx_insn_list *listp, const rtx_insn *node)
2871{
2872 const_rtx x;
2873
2874 for (x = listp; x; x = XEXP (x, 1)(((x)->u.fld[1]).rt_rtx))
2875 if (node == XEXP (x, 0)(((x)->u.fld[0]).rt_rtx))
2876 return true;
2877
2878 return false;
2879}
2880
2881/* Search LISTP (an INSN_LIST) for an entry whose first operand is NODE and
2882 remove that entry from the list if it is found.
2883
2884 A simple equality test is used to determine if NODE matches. */
2885
2886void
2887remove_node_from_insn_list (const rtx_insn *node, rtx_insn_list **listp)
2888{
2889 rtx_insn_list *temp = *listp;
2890 rtx_insn_list *prev = NULLnullptr;
2891
2892 while (temp)
2893 {
2894 if (node == temp->insn ())
2895 {
2896 /* Splice the node out of the list. */
2897 if (prev)
2898 XEXP (prev, 1)(((prev)->u.fld[1]).rt_rtx) = temp->next ();
2899 else
2900 *listp = temp->next ();
2901
2902 gcc_checking_assert (!in_insn_list_p (temp->next (), node))((void)(!(!in_insn_list_p (temp->next (), node)) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2902, __FUNCTION__), 0 : 0))
;
2903 return;
2904 }
2905
2906 prev = temp;
2907 temp = temp->next ();
2908 }
2909}
2910
2911/* Nonzero if X contains any volatile instructions. These are instructions
2912 which may cause unpredictable machine state instructions, and thus no
2913 instructions or register uses should be moved or combined across them.
2914 This includes only volatile asms and UNSPEC_VOLATILE instructions. */
2915
2916int
2917volatile_insn_p (const_rtx x)
2918{
2919 const RTX_CODEenum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code);
2920 switch (code)
2921 {
2922 case LABEL_REF:
2923 case SYMBOL_REF:
2924 case CONST:
2925 CASE_CONST_ANYcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case
CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR
:
2926 case PC:
2927 case REG:
2928 case SCRATCH:
2929 case CLOBBER:
2930 case ADDR_VEC:
2931 case ADDR_DIFF_VEC:
2932 case CALL:
2933 case MEM:
2934 return 0;
2935
2936 case UNSPEC_VOLATILE:
2937 return 1;
2938
2939 case ASM_INPUT:
2940 case ASM_OPERANDS:
2941 if (MEM_VOLATILE_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM && ((enum rtx_code
) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code
) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 2941, __FUNCTION__); _rtx; })->volatil)
)
2942 return 1;
2943
2944 default:
2945 break;
2946 }
2947
2948 /* Recursively scan the operands of this expression. */
2949
2950 {
2951 const char *const fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
2952 int i;
2953
2954 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
2955 {
2956 if (fmt[i] == 'e')
2957 {
2958 if (volatile_insn_p (XEXP (x, i)(((x)->u.fld[i]).rt_rtx)))
2959 return 1;
2960 }
2961 else if (fmt[i] == 'E')
2962 {
2963 int j;
2964 for (j = 0; j < XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); j++)
2965 if (volatile_insn_p (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j])))
2966 return 1;
2967 }
2968 }
2969 }
2970 return 0;
2971}
2972
2973/* Nonzero if X contains any volatile memory references
2974 UNSPEC_VOLATILE operations or volatile ASM_OPERANDS expressions. */
2975
2976int
2977volatile_refs_p (const_rtx x)
2978{
2979 const RTX_CODEenum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code);
2980 switch (code)
2981 {
2982 case LABEL_REF:
2983 case SYMBOL_REF:
2984 case CONST:
2985 CASE_CONST_ANYcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case
CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR
:
2986 case PC:
2987 case REG:
2988 case SCRATCH:
2989 case CLOBBER:
2990 case ADDR_VEC:
2991 case ADDR_DIFF_VEC:
2992 return 0;
2993
2994 case UNSPEC_VOLATILE:
2995 return 1;
2996
2997 case MEM:
2998 case ASM_INPUT:
2999 case ASM_OPERANDS:
3000 if (MEM_VOLATILE_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM && ((enum rtx_code
) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code
) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3000, __FUNCTION__); _rtx; })->volatil)
)
3001 return 1;
3002
3003 default:
3004 break;
3005 }
3006
3007 /* Recursively scan the operands of this expression. */
3008
3009 {
3010 const char *const fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
3011 int i;
3012
3013 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
3014 {
3015 if (fmt[i] == 'e')
3016 {
3017 if (volatile_refs_p (XEXP (x, i)(((x)->u.fld[i]).rt_rtx)))
3018 return 1;
3019 }
3020 else if (fmt[i] == 'E')
3021 {
3022 int j;
3023 for (j = 0; j < XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); j++)
3024 if (volatile_refs_p (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j])))
3025 return 1;
3026 }
3027 }
3028 }
3029 return 0;
3030}
3031
3032/* Similar to above, except that it also rejects register pre- and post-
3033 incrementing. */
3034
3035int
3036side_effects_p (const_rtx x)
3037{
3038 const RTX_CODEenum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code);
3039 switch (code)
3040 {
3041 case LABEL_REF:
3042 case SYMBOL_REF:
3043 case CONST:
3044 CASE_CONST_ANYcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case
CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR
:
3045 case PC:
3046 case REG:
3047 case SCRATCH:
3048 case ADDR_VEC:
3049 case ADDR_DIFF_VEC:
3050 case VAR_LOCATION:
3051 return 0;
3052
3053 case CLOBBER:
3054 /* Reject CLOBBER with a non-VOID mode. These are made by combine.cc
3055 when some combination can't be done. If we see one, don't think
3056 that we can simplify the expression. */
3057 return (GET_MODE (x)((machine_mode) (x)->mode) != VOIDmode((void) 0, E_VOIDmode));
3058
3059 case PRE_INC:
3060 case PRE_DEC:
3061 case POST_INC:
3062 case POST_DEC:
3063 case PRE_MODIFY:
3064 case POST_MODIFY:
3065 case CALL:
3066 case UNSPEC_VOLATILE:
3067 return 1;
3068
3069 case MEM:
3070 case ASM_INPUT:
3071 case ASM_OPERANDS:
3072 if (MEM_VOLATILE_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM && ((enum rtx_code
) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code
) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3072, __FUNCTION__); _rtx; })->volatil)
)
3073 return 1;
3074
3075 default:
3076 break;
3077 }
3078
3079 /* Recursively scan the operands of this expression. */
3080
3081 {
3082 const char *fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
3083 int i;
3084
3085 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
3086 {
3087 if (fmt[i] == 'e')
3088 {
3089 if (side_effects_p (XEXP (x, i)(((x)->u.fld[i]).rt_rtx)))
3090 return 1;
3091 }
3092 else if (fmt[i] == 'E')
3093 {
3094 int j;
3095 for (j = 0; j < XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); j++)
3096 if (side_effects_p (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j])))
3097 return 1;
3098 }
3099 }
3100 }
3101 return 0;
3102}
3103
3104/* Return nonzero if evaluating rtx X might cause a trap.
3105 FLAGS controls how to consider MEMs. A nonzero means the context
3106 of the access may have changed from the original, such that the
3107 address may have become invalid. */
3108
3109int
3110may_trap_p_1 (const_rtx x, unsigned flags)
3111{
3112 int i;
3113 enum rtx_code code;
3114 const char *fmt;
3115
3116 /* We make no distinction currently, but this function is part of
3117 the internal target-hooks ABI so we keep the parameter as
3118 "unsigned flags". */
3119 bool code_changed = flags != 0;
3120
3121 if (x == 0)
3122 return 0;
3123 code = GET_CODE (x)((enum rtx_code) (x)->code);
3124 switch (code)
3125 {
3126 /* Handle these cases quickly. */
3127 CASE_CONST_ANYcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case
CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR
:
3128 case SYMBOL_REF:
3129 case LABEL_REF:
3130 case CONST:
3131 case PC:
3132 case REG:
3133 case SCRATCH:
3134 return 0;
3135
3136 case UNSPEC:
3137 return targetm.unspec_may_trap_p (x, flags);
3138
3139 case UNSPEC_VOLATILE:
3140 case ASM_INPUT:
3141 case TRAP_IF:
3142 return 1;
3143
3144 case ASM_OPERANDS:
3145 return MEM_VOLATILE_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM && ((enum rtx_code
) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code
) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3145, __FUNCTION__); _rtx; })->volatil)
;
3146
3147 /* Memory ref can trap unless it's a static var or a stack slot. */
3148 case MEM:
3149 /* Recognize specific pattern of stack checking probes. */
3150 if (flag_stack_checkglobal_options.x_flag_stack_check
3151 && MEM_VOLATILE_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM && ((enum rtx_code
) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code
) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3151, __FUNCTION__); _rtx; })->volatil)
3152 && XEXP (x, 0)(((x)->u.fld[0]).rt_rtx) == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]))
3153 return 1;
3154 if (/* MEM_NOTRAP_P only relates to the actual position of the memory
3155 reference; moving it out of context such as when moving code
3156 when optimizing, might cause its address to become invalid. */
3157 code_changed
3158 || !MEM_NOTRAP_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_NOTRAP_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3158, __FUNCTION__); _rtx; })->call)
)
3159 {
3160 poly_int64 size = MEM_SIZE_KNOWN_P (x)(get_mem_attrs (x)->size_known_p) ? MEM_SIZE (x)(get_mem_attrs (x)->size) : -1;
3161 return rtx_addr_can_trap_p_1 (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), 0, size,
3162 GET_MODE (x)((machine_mode) (x)->mode), code_changed);
3163 }
3164
3165 return 0;
3166
3167 /* Division by a non-constant might trap. */
3168 case DIV:
3169 case MOD:
3170 case UDIV:
3171 case UMOD:
3172 if (HONOR_SNANS (x))
3173 return 1;
3174 if (FLOAT_MODE_P (GET_MODE (x))(((enum mode_class) mode_class[((machine_mode) (x)->mode)]
) == MODE_FLOAT || ((enum mode_class) mode_class[((machine_mode
) (x)->mode)]) == MODE_DECIMAL_FLOAT || ((enum mode_class)
mode_class[((machine_mode) (x)->mode)]) == MODE_COMPLEX_FLOAT
|| ((enum mode_class) mode_class[((machine_mode) (x)->mode
)]) == MODE_VECTOR_FLOAT)
)
3175 return flag_trapping_mathglobal_options.x_flag_trapping_math;
3176 if (!CONSTANT_P (XEXP (x, 1))((rtx_class[(int) (((enum rtx_code) ((((x)->u.fld[1]).rt_rtx
))->code))]) == RTX_CONST_OBJ)
|| (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx) == const0_rtx(const_int_rtx[64])))
3177 return 1;
3178 if (GET_CODE (XEXP (x, 1))((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_VECTOR)
3179 {
3180 /* For CONST_VECTOR, return 1 if any element is or might be zero. */
3181 unsigned int n_elts;
3182 rtx op = XEXP (x, 1)(((x)->u.fld[1]).rt_rtx);
3183 if (!GET_MODE_NUNITS (GET_MODE (op)((machine_mode) (op)->mode)).is_constant (&n_elts))
3184 {
3185 if (!CONST_VECTOR_DUPLICATE_P (op)((__extension__ ({ __typeof ((op)) const _rtx = ((op)); if ((
(enum rtx_code) (_rtx)->code) != CONST_VECTOR) rtl_check_failed_flag
("CONST_VECTOR_NELTS_PER_PATTERN", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3185, __FUNCTION__); _rtx; }) ->u2.const_vector.nelts_per_pattern
) == 1)
)
3186 return 1;
3187 for (unsigned i = 0; i < (unsigned int) XVECLEN (op, 0)(((((op)->u.fld[0]).rt_rtvec))->num_elem); i++)
3188 if (CONST_VECTOR_ENCODED_ELT (op, i)(((((op)->u.fld[0]).rt_rtvec))->elem[i]) == const0_rtx(const_int_rtx[64]))
3189 return 1;
3190 }
3191 else
3192 for (unsigned i = 0; i < n_elts; i++)
3193 if (CONST_VECTOR_ELT (op, i)const_vector_elt (op, i) == const0_rtx(const_int_rtx[64]))
3194 return 1;
3195 }
3196 break;
3197
3198 case EXPR_LIST:
3199 /* An EXPR_LIST is used to represent a function call. This
3200 certainly may trap. */
3201 return 1;
3202
3203 case GE:
3204 case GT:
3205 case LE:
3206 case LT:
3207 case LTGT:
3208 case COMPARE:
3209 /* Some floating point comparisons may trap. */
3210 if (!flag_trapping_mathglobal_options.x_flag_trapping_math)
3211 break;
3212 /* ??? There is no machine independent way to check for tests that trap
3213 when COMPARE is used, though many targets do make this distinction.
3214 For instance, sparc uses CCFPE for compares which generate exceptions
3215 and CCFP for compares which do not generate exceptions. */
3216 if (HONOR_NANS (x))
3217 return 1;
3218 /* But often the compare has some CC mode, so check operand
3219 modes as well. */
3220 if (HONOR_NANS (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx))
3221 || HONOR_NANS (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx)))
3222 return 1;
3223 break;
3224
3225 case EQ:
3226 case NE:
3227 if (HONOR_SNANS (x))
3228 return 1;
3229 /* Often comparison is CC mode, so check operand modes. */
3230 if (HONOR_SNANS (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx))
3231 || HONOR_SNANS (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx)))
3232 return 1;
3233 break;
3234
3235 case FIX:
3236 case UNSIGNED_FIX:
3237 /* Conversion of floating point might trap. */
3238 if (flag_trapping_mathglobal_options.x_flag_trapping_math && HONOR_NANS (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx)))
3239 return 1;
3240 break;
3241
3242 case NEG:
3243 case ABS:
3244 case SUBREG:
3245 case VEC_MERGE:
3246 case VEC_SELECT:
3247 case VEC_CONCAT:
3248 case VEC_DUPLICATE:
3249 /* These operations don't trap even with floating point. */
3250 break;
3251
3252 default:
3253 /* Any floating arithmetic may trap. */
3254 if (FLOAT_MODE_P (GET_MODE (x))(((enum mode_class) mode_class[((machine_mode) (x)->mode)]
) == MODE_FLOAT || ((enum mode_class) mode_class[((machine_mode
) (x)->mode)]) == MODE_DECIMAL_FLOAT || ((enum mode_class)
mode_class[((machine_mode) (x)->mode)]) == MODE_COMPLEX_FLOAT
|| ((enum mode_class) mode_class[((machine_mode) (x)->mode
)]) == MODE_VECTOR_FLOAT)
&& flag_trapping_mathglobal_options.x_flag_trapping_math)
3255 return 1;
3256 }
3257
3258 fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
3259 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
3260 {
3261 if (fmt[i] == 'e')
3262 {
3263 if (may_trap_p_1 (XEXP (x, i)(((x)->u.fld[i]).rt_rtx), flags))
3264 return 1;
3265 }
3266 else if (fmt[i] == 'E')
3267 {
3268 int j;
3269 for (j = 0; j < XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); j++)
3270 if (may_trap_p_1 (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), flags))
3271 return 1;
3272 }
3273 }
3274 return 0;
3275}
3276
3277/* Return nonzero if evaluating rtx X might cause a trap. */
3278
3279int
3280may_trap_p (const_rtx x)
3281{
3282 return may_trap_p_1 (x, 0);
3283}
3284
3285/* Same as above, but additionally return nonzero if evaluating rtx X might
3286 cause a fault. We define a fault for the purpose of this function as a
3287 erroneous execution condition that cannot be encountered during the normal
3288 execution of a valid program; the typical example is an unaligned memory
3289 access on a strict alignment machine. The compiler guarantees that it
3290 doesn't generate code that will fault from a valid program, but this
3291 guarantee doesn't mean anything for individual instructions. Consider
3292 the following example:
3293
3294 struct S { int d; union { char *cp; int *ip; }; };
3295
3296 int foo(struct S *s)
3297 {
3298 if (s->d == 1)
3299 return *s->ip;
3300 else
3301 return *s->cp;
3302 }
3303
3304 on a strict alignment machine. In a valid program, foo will never be
3305 invoked on a structure for which d is equal to 1 and the underlying
3306 unique field of the union not aligned on a 4-byte boundary, but the
3307 expression *s->ip might cause a fault if considered individually.
3308
3309 At the RTL level, potentially problematic expressions will almost always
3310 verify may_trap_p; for example, the above dereference can be emitted as
3311 (mem:SI (reg:P)) and this expression is may_trap_p for a generic register.
3312 However, suppose that foo is inlined in a caller that causes s->cp to
3313 point to a local character variable and guarantees that s->d is not set
3314 to 1; foo may have been effectively translated into pseudo-RTL as:
3315
3316 if ((reg:SI) == 1)
3317 (set (reg:SI) (mem:SI (%fp - 7)))
3318 else
3319 (set (reg:QI) (mem:QI (%fp - 7)))
3320
3321 Now (mem:SI (%fp - 7)) is considered as not may_trap_p since it is a
3322 memory reference to a stack slot, but it will certainly cause a fault
3323 on a strict alignment machine. */
3324
3325int
3326may_trap_or_fault_p (const_rtx x)
3327{
3328 return may_trap_p_1 (x, 1);
3329}
3330
3331/* Replace any occurrence of FROM in X with TO. The function does
3332 not enter into CONST_DOUBLE for the replace.
3333
3334 Note that copying is not done so X must not be shared unless all copies
3335 are to be modified.
3336
3337 ALL_REGS is true if we want to replace all REGs equal to FROM, not just
3338 those pointer-equal ones. */
3339
3340rtx
3341replace_rtx (rtx x, rtx from, rtx to, bool all_regs)
3342{
3343 int i, j;
3344 const char *fmt;
3345
3346 if (x == from)
3347 return to;
3348
3349 /* Allow this function to make replacements in EXPR_LISTs. */
3350 if (x == 0)
3351 return 0;
3352
3353 if (all_regs
3354 && REG_P (x)(((enum rtx_code) (x)->code) == REG)
3355 && REG_P (from)(((enum rtx_code) (from)->code) == REG)
3356 && REGNO (x)(rhs_regno(x)) == REGNO (from)(rhs_regno(from)))
3357 {
3358 gcc_assert (GET_MODE (x) == GET_MODE (from))((void)(!(((machine_mode) (x)->mode) == ((machine_mode) (from
)->mode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3358, __FUNCTION__), 0 : 0))
;
3359 return to;
3360 }
3361 else if (GET_CODE (x)((enum rtx_code) (x)->code) == SUBREG)
3362 {
3363 rtx new_rtx = replace_rtx (SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx), from, to, all_regs);
3364
3365 if (CONST_SCALAR_INT_P (new_rtx)((((enum rtx_code) (new_rtx)->code) == CONST_INT) || (((enum
rtx_code) (new_rtx)->code) == CONST_WIDE_INT))
)
3366 {
3367 x = simplify_subreg (GET_MODE (x)((machine_mode) (x)->mode), new_rtx,
3368 GET_MODE (SUBREG_REG (x))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode),
3369 SUBREG_BYTE (x)(((x)->u.fld[1]).rt_subreg));
3370 gcc_assert (x)((void)(!(x) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3370, __FUNCTION__), 0 : 0))
;
3371 }
3372 else
3373 SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx) = new_rtx;
3374
3375 return x;
3376 }
3377 else if (GET_CODE (x)((enum rtx_code) (x)->code) == ZERO_EXTEND)
3378 {
3379 rtx new_rtx = replace_rtx (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), from, to, all_regs);
3380
3381 if (CONST_SCALAR_INT_P (new_rtx)((((enum rtx_code) (new_rtx)->code) == CONST_INT) || (((enum
rtx_code) (new_rtx)->code) == CONST_WIDE_INT))
)
3382 {
3383 x = simplify_unary_operation (ZERO_EXTEND, GET_MODE (x)((machine_mode) (x)->mode),
3384 new_rtx, GET_MODE (XEXP (x, 0))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode));
3385 gcc_assert (x)((void)(!(x) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3385, __FUNCTION__), 0 : 0))
;
3386 }
3387 else
3388 XEXP (x, 0)(((x)->u.fld[0]).rt_rtx) = new_rtx;
3389
3390 return x;
3391 }
3392
3393 fmt = GET_RTX_FORMAT (GET_CODE (x))(rtx_format[(int) (((enum rtx_code) (x)->code))]);
3394 for (i = GET_RTX_LENGTH (GET_CODE (x))(rtx_length[(int) (((enum rtx_code) (x)->code))]) - 1; i >= 0; i--)
3395 {
3396 if (fmt[i] == 'e')
3397 XEXP (x, i)(((x)->u.fld[i]).rt_rtx) = replace_rtx (XEXP (x, i)(((x)->u.fld[i]).rt_rtx), from, to, all_regs);
3398 else if (fmt[i] == 'E')
3399 for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--)
3400 XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]) = replace_rtx (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]),
3401 from, to, all_regs);
3402 }
3403
3404 return x;
3405}
3406
3407/* Replace occurrences of the OLD_LABEL in *LOC with NEW_LABEL. Also track
3408 the change in LABEL_NUSES if UPDATE_LABEL_NUSES. */
3409
3410void
3411replace_label (rtx *loc, rtx old_label, rtx new_label, bool update_label_nuses)
3412{
3413 /* Handle jump tables specially, since ADDR_{DIFF_,}VECs can be long. */
3414 rtx x = *loc;
3415 if (JUMP_TABLE_DATA_P (x)(((enum rtx_code) (x)->code) == JUMP_TABLE_DATA))
3416 {
3417 x = PATTERN (x);
3418 rtvec vec = XVEC (x, GET_CODE (x) == ADDR_DIFF_VEC)(((x)->u.fld[((enum rtx_code) (x)->code) == ADDR_DIFF_VEC
]).rt_rtvec)
;
3419 int len = GET_NUM_ELEM (vec)((vec)->num_elem);
3420 for (int i = 0; i < len; ++i)
3421 {
3422 rtx ref = RTVEC_ELT (vec, i)((vec)->elem[i]);
3423 if (XEXP (ref, 0)(((ref)->u.fld[0]).rt_rtx) == old_label)
3424 {
3425 XEXP (ref, 0)(((ref)->u.fld[0]).rt_rtx) = new_label;
3426 if (update_label_nuses)
3427 {
3428 ++LABEL_NUSES (new_label)(((new_label)->u.fld[4]).rt_int);
3429 --LABEL_NUSES (old_label)(((old_label)->u.fld[4]).rt_int);
3430 }
3431 }
3432 }
3433 return;
3434 }
3435
3436 /* If this is a JUMP_INSN, then we also need to fix the JUMP_LABEL
3437 field. This is not handled by the iterator because it doesn't
3438 handle unprinted ('0') fields. */
3439 if (JUMP_P (x)(((enum rtx_code) (x)->code) == JUMP_INSN) && JUMP_LABEL (x)(((x)->u.fld[7]).rt_rtx) == old_label)
3440 JUMP_LABEL (x)(((x)->u.fld[7]).rt_rtx) = new_label;
3441
3442 subrtx_ptr_iterator::array_type array;
3443 FOR_EACH_SUBRTX_PTR (iter, array, loc, ALL)for (subrtx_ptr_iterator iter (array, loc, rtx_all_subrtx_bounds
); !iter.at_end (); iter.next ())
3444 {
3445 rtx *loc = *iter;
3446 if (rtx x = *loc)
3447 {
3448 if (GET_CODE (x)((enum rtx_code) (x)->code) == SYMBOL_REF
3449 && CONSTANT_POOL_ADDRESS_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag
("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3449, __FUNCTION__); _rtx; })->unchanging)
)
3450 {
3451 rtx c = get_pool_constant (x);
3452 if (rtx_referenced_p (old_label, c))
3453 {
3454 /* Create a copy of constant C; replace the label inside
3455 but do not update LABEL_NUSES because uses in constant pool
3456 are not counted. */
3457 rtx new_c = copy_rtx (c);
3458 replace_label (&new_c, old_label, new_label, false);
3459
3460 /* Add the new constant NEW_C to constant pool and replace
3461 the old reference to constant by new reference. */
3462 rtx new_mem = force_const_mem (get_pool_mode (x), new_c);
3463 *loc = replace_rtx (x, x, XEXP (new_mem, 0)(((new_mem)->u.fld[0]).rt_rtx));
3464 }
3465 }
3466
3467 if ((GET_CODE (x)((enum rtx_code) (x)->code) == LABEL_REF
3468 || GET_CODE (x)((enum rtx_code) (x)->code) == INSN_LIST)
3469 && XEXP (x, 0)(((x)->u.fld[0]).rt_rtx) == old_label)
3470 {
3471 XEXP (x, 0)(((x)->u.fld[0]).rt_rtx) = new_label;
3472 if (update_label_nuses)
3473 {
3474 ++LABEL_NUSES (new_label)(((new_label)->u.fld[4]).rt_int);
3475 --LABEL_NUSES (old_label)(((old_label)->u.fld[4]).rt_int);
3476 }
3477 }
3478 }
3479 }
3480}
3481
3482void
3483replace_label_in_insn (rtx_insn *insn, rtx_insn *old_label,
3484 rtx_insn *new_label, bool update_label_nuses)
3485{
3486 rtx insn_as_rtx = insn;
3487 replace_label (&insn_as_rtx, old_label, new_label, update_label_nuses);
3488 gcc_checking_assert (insn_as_rtx == insn)((void)(!(insn_as_rtx == insn) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3488, __FUNCTION__), 0 : 0))
;
3489}
3490
3491/* Return true if X is referenced in BODY. */
3492
3493bool
3494rtx_referenced_p (const_rtx x, const_rtx body)
3495{
3496 subrtx_iterator::array_type array;
3497 FOR_EACH_SUBRTX (iter, array, body, ALL)for (subrtx_iterator iter (array, body, rtx_all_subrtx_bounds
); !iter.at_end (); iter.next ())
3498 if (const_rtx y = *iter)
3499 {
3500 /* Check if a label_ref Y refers to label X. */
3501 if (GET_CODE (y)((enum rtx_code) (y)->code) == LABEL_REF
3502 && LABEL_P (x)(((enum rtx_code) (x)->code) == CODE_LABEL)
3503 && label_ref_label (y) == x)
3504 return true;
3505
3506 if (rtx_equal_p (x, y))
3507 return true;
3508
3509 /* If Y is a reference to pool constant traverse the constant. */
3510 if (GET_CODE (y)((enum rtx_code) (y)->code) == SYMBOL_REF
3511 && CONSTANT_POOL_ADDRESS_P (y)(__extension__ ({ __typeof ((y)) const _rtx = ((y)); if (((enum
rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag
("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3511, __FUNCTION__); _rtx; })->unchanging)
)
3512 iter.substitute (get_pool_constant (y));
3513 }
3514 return false;
3515}
3516
3517/* If INSN is a tablejump return true and store the label (before jump table) to
3518 *LABELP and the jump table to *TABLEP. LABELP and TABLEP may be NULL. */
3519
3520bool
3521tablejump_p (const rtx_insn *insn, rtx_insn **labelp,
3522 rtx_jump_table_data **tablep)
3523{
3524 if (!JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN))
3525 return false;
3526
3527 rtx target = JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx);
3528 if (target == NULL_RTX(rtx) 0 || ANY_RETURN_P (target)(((enum rtx_code) (target)->code) == RETURN || ((enum rtx_code
) (target)->code) == SIMPLE_RETURN)
)
3529 return false;
3530
3531 rtx_insn *label = as_a<rtx_insn *> (target);
3532 rtx_insn *table = next_insn (label);
3533 if (table == NULL_RTX(rtx) 0 || !JUMP_TABLE_DATA_P (table)(((enum rtx_code) (table)->code) == JUMP_TABLE_DATA))
3534 return false;
3535
3536 if (labelp)
3537 *labelp = label;
3538 if (tablep)
3539 *tablep = as_a <rtx_jump_table_data *> (table);
3540 return true;
3541}
3542
3543/* For INSN known to satisfy tablejump_p, determine if it actually is a
3544 CASESI. Return the insn pattern if so, NULL_RTX otherwise. */
3545
3546rtx
3547tablejump_casesi_pattern (const rtx_insn *insn)
3548{
3549 rtx tmp;
3550
3551 if ((tmp = single_set (insn)) != NULLnullptr
3552 && SET_DEST (tmp)(((tmp)->u.fld[0]).rt_rtx) == pc_rtx
3553 && GET_CODE (SET_SRC (tmp))((enum rtx_code) ((((tmp)->u.fld[1]).rt_rtx))->code) == IF_THEN_ELSE
3554 && GET_CODE (XEXP (SET_SRC (tmp), 2))((enum rtx_code) (((((((tmp)->u.fld[1]).rt_rtx))->u.fld
[2]).rt_rtx))->code)
== LABEL_REF)
3555 return tmp;
3556
3557 return NULL_RTX(rtx) 0;
3558}
3559
3560/* A subroutine of computed_jump_p, return 1 if X contains a REG or MEM or
3561 constant that is not in the constant pool and not in the condition
3562 of an IF_THEN_ELSE. */
3563
3564static int
3565computed_jump_p_1 (const_rtx x)
3566{
3567 const enum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code);
3568 int i, j;
3569 const char *fmt;
3570
3571 switch (code)
3572 {
3573 case LABEL_REF:
3574 case PC:
3575 return 0;
3576
3577 case CONST:
3578 CASE_CONST_ANYcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case
CONST_DOUBLE: case CONST_FIXED: case CONST_VECTOR
:
3579 case SYMBOL_REF:
3580 case REG:
3581 return 1;
3582
3583 case MEM:
3584 return ! (GET_CODE (XEXP (x, 0))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == SYMBOL_REF
3585 && CONSTANT_POOL_ADDRESS_P (XEXP (x, 0))(__extension__ ({ __typeof (((((x)->u.fld[0]).rt_rtx))) const
_rtx = (((((x)->u.fld[0]).rt_rtx))); if (((enum rtx_code)
(_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag ("CONSTANT_POOL_ADDRESS_P"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3585, __FUNCTION__); _rtx; })->unchanging)
);
3586
3587 case IF_THEN_ELSE:
3588 return (computed_jump_p_1 (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx))
3589 || computed_jump_p_1 (XEXP (x, 2)(((x)->u.fld[2]).rt_rtx)));
3590
3591 default:
3592 break;
3593 }
3594
3595 fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
3596 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
3597 {
3598 if (fmt[i] == 'e'
3599 && computed_jump_p_1 (XEXP (x, i)(((x)->u.fld[i]).rt_rtx)))
3600 return 1;
3601
3602 else if (fmt[i] == 'E')
3603 for (j = 0; j < XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); j++)
3604 if (computed_jump_p_1 (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j])))
3605 return 1;
3606 }
3607
3608 return 0;
3609}
3610
3611/* Return nonzero if INSN is an indirect jump (aka computed jump).
3612
3613 Tablejumps and casesi insns are not considered indirect jumps;
3614 we can recognize them by a (use (label_ref)). */
3615
3616int
3617computed_jump_p (const rtx_insn *insn)
3618{
3619 int i;
3620 if (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN))
3621 {
3622 rtx pat = PATTERN (insn);
3623
3624 /* If we have a JUMP_LABEL set, we're not a computed jump. */
3625 if (JUMP_LABEL (insn)(((insn)->u.fld[7]).rt_rtx) != NULLnullptr)
3626 return 0;
3627
3628 if (GET_CODE (pat)((enum rtx_code) (pat)->code) == PARALLEL)
3629 {
3630 int len = XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem);
3631 int has_use_labelref = 0;
3632
3633 for (i = len - 1; i >= 0; i--)
3634 if (GET_CODE (XVECEXP (pat, 0, i))((enum rtx_code) ((((((pat)->u.fld[0]).rt_rtvec))->elem
[i]))->code)
== USE
3635 && (GET_CODE (XEXP (XVECEXP (pat, 0, i), 0))((enum rtx_code) (((((((((pat)->u.fld[0]).rt_rtvec))->elem
[i]))->u.fld[0]).rt_rtx))->code)
3636 == LABEL_REF))
3637 {
3638 has_use_labelref = 1;
3639 break;
3640 }
3641
3642 if (! has_use_labelref)
3643 for (i = len - 1; i >= 0; i--)
3644 if (GET_CODE (XVECEXP (pat, 0, i))((enum rtx_code) ((((((pat)->u.fld[0]).rt_rtvec))->elem
[i]))->code)
== SET
3645 && SET_DEST (XVECEXP (pat, 0, i))((((((((pat)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld
[0]).rt_rtx)
== pc_rtx
3646 && computed_jump_p_1 (SET_SRC (XVECEXP (pat, 0, i))((((((((pat)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld
[1]).rt_rtx)
))
3647 return 1;
3648 }
3649 else if (GET_CODE (pat)((enum rtx_code) (pat)->code) == SET
3650 && SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx) == pc_rtx
3651 && computed_jump_p_1 (SET_SRC (pat)(((pat)->u.fld[1]).rt_rtx)))
3652 return 1;
3653 }
3654 return 0;
3655}
3656
3657
3658
3659/* MEM has a PRE/POST-INC/DEC/MODIFY address X. Extract the operands of
3660 the equivalent add insn and pass the result to FN, using DATA as the
3661 final argument. */
3662
3663static int
3664for_each_inc_dec_find_inc_dec (rtx mem, for_each_inc_dec_fn fn, void *data)
3665{
3666 rtx x = XEXP (mem, 0)(((mem)->u.fld[0]).rt_rtx);
3667 switch (GET_CODE (x)((enum rtx_code) (x)->code))
3668 {
3669 case PRE_INC:
3670 case POST_INC:
3671 {
3672 poly_int64 size = GET_MODE_SIZE (GET_MODE (mem)((machine_mode) (mem)->mode));
3673 rtx r1 = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx);
3674 rtx c = gen_int_mode (size, GET_MODE (r1)((machine_mode) (r1)->mode));
3675 return fn (mem, x, r1, r1, c, data);
3676 }
3677
3678 case PRE_DEC:
3679 case POST_DEC:
3680 {
3681 poly_int64 size = GET_MODE_SIZE (GET_MODE (mem)((machine_mode) (mem)->mode));
3682 rtx r1 = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx);
3683 rtx c = gen_int_mode (-size, GET_MODE (r1)((machine_mode) (r1)->mode));
3684 return fn (mem, x, r1, r1, c, data);
3685 }
3686
3687 case PRE_MODIFY:
3688 case POST_MODIFY:
3689 {
3690 rtx r1 = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx);
3691 rtx add = XEXP (x, 1)(((x)->u.fld[1]).rt_rtx);
3692 return fn (mem, x, r1, add, NULLnullptr, data);
3693 }
3694
3695 default:
3696 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3696, __FUNCTION__))
;
3697 }
3698}
3699
3700/* Traverse *LOC looking for MEMs that have autoinc addresses.
3701 For each such autoinc operation found, call FN, passing it
3702 the innermost enclosing MEM, the operation itself, the RTX modified
3703 by the operation, two RTXs (the second may be NULL) that, once
3704 added, represent the value to be held by the modified RTX
3705 afterwards, and DATA. FN is to return 0 to continue the
3706 traversal or any other value to have it returned to the caller of
3707 for_each_inc_dec. */
3708
3709int
3710for_each_inc_dec (rtx x,
3711 for_each_inc_dec_fn fn,
3712 void *data)
3713{
3714 subrtx_var_iterator::array_type array;
3715 FOR_EACH_SUBRTX_VAR (iter, array, x, NONCONST)for (subrtx_var_iterator iter (array, x, rtx_nonconst_subrtx_bounds
); !iter.at_end (); iter.next ())
3716 {
3717 rtx mem = *iter;
3718 if (mem
3719 && MEM_P (mem)(((enum rtx_code) (mem)->code) == MEM)
3720 && GET_RTX_CLASS (GET_CODE (XEXP (mem, 0)))(rtx_class[(int) (((enum rtx_code) ((((mem)->u.fld[0]).rt_rtx
))->code))])
== RTX_AUTOINC)
3721 {
3722 int res = for_each_inc_dec_find_inc_dec (mem, fn, data);
3723 if (res != 0)
3724 return res;
3725 iter.skip_subrtxes ();
3726 }
3727 }
3728 return 0;
3729}
3730
3731
3732/* Searches X for any reference to REGNO, returning the rtx of the
3733 reference found if any. Otherwise, returns NULL_RTX. */
3734
3735rtx
3736regno_use_in (unsigned int regno, rtx x)
3737{
3738 const char *fmt;
3739 int i, j;
3740 rtx tem;
3741
3742 if (REG_P (x)(((enum rtx_code) (x)->code) == REG) && REGNO (x)(rhs_regno(x)) == regno)
3743 return x;
3744
3745 fmt = GET_RTX_FORMAT (GET_CODE (x))(rtx_format[(int) (((enum rtx_code) (x)->code))]);
3746 for (i = GET_RTX_LENGTH (GET_CODE (x))(rtx_length[(int) (((enum rtx_code) (x)->code))]) - 1; i >= 0; i--)
3747 {
3748 if (fmt[i] == 'e')
3749 {
3750 if ((tem = regno_use_in (regno, XEXP (x, i)(((x)->u.fld[i]).rt_rtx))))
3751 return tem;
3752 }
3753 else if (fmt[i] == 'E')
3754 for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--)
3755 if ((tem = regno_use_in (regno , XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]))))
3756 return tem;
3757 }
3758
3759 return NULL_RTX(rtx) 0;
3760}
3761
3762/* Return a value indicating whether OP, an operand of a commutative
3763 operation, is preferred as the first or second operand. The more
3764 positive the value, the stronger the preference for being the first
3765 operand. */
3766
3767int
3768commutative_operand_precedence (rtx op)
3769{
3770 enum rtx_code code = GET_CODE (op)((enum rtx_code) (op)->code);
3771
3772 /* Constants always become the second operand. Prefer "nice" constants. */
3773 if (code == CONST_INT)
3774 return -10;
3775 if (code == CONST_WIDE_INT)
3776 return -9;
3777 if (code == CONST_POLY_INT)
3778 return -8;
3779 if (code == CONST_DOUBLE)
3780 return -8;
3781 if (code == CONST_FIXED)
3782 return -8;
3783 op = avoid_constant_pool_reference (op);
3784 code = GET_CODE (op)((enum rtx_code) (op)->code);
3785
3786 switch (GET_RTX_CLASS (code)(rtx_class[(int) (code)]))
3787 {
3788 case RTX_CONST_OBJ:
3789 if (code == CONST_INT)
3790 return -7;
3791 if (code == CONST_WIDE_INT)
3792 return -6;
3793 if (code == CONST_POLY_INT)
3794 return -5;
3795 if (code == CONST_DOUBLE)
3796 return -5;
3797 if (code == CONST_FIXED)
3798 return -5;
3799 return -4;
3800
3801 case RTX_EXTRA:
3802 /* SUBREGs of objects should come second. */
3803 if (code == SUBREG && OBJECT_P (SUBREG_REG (op))(((rtx_class[(int) (((enum rtx_code) ((((op)->u.fld[0]).rt_rtx
))->code))]) & (~1)) == (RTX_OBJ & (~1)))
)
3804 return -3;
3805 return 0;
3806
3807 case RTX_OBJ:
3808 /* Complex expressions should be the first, so decrease priority
3809 of objects. Prefer pointer objects over non pointer objects. */
3810 if ((REG_P (op)(((enum rtx_code) (op)->code) == REG) && REG_POINTER (op)(__extension__ ({ __typeof ((op)) const _rtx = ((op)); if (((
enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag
("REG_POINTER", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3810, __FUNCTION__); _rtx; })->frame_related)
)
3811 || (MEM_P (op)(((enum rtx_code) (op)->code) == MEM) && MEM_POINTER (op)(__extension__ ({ __typeof ((op)) const _rtx = ((op)); if (((
enum rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag
("MEM_POINTER", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3811, __FUNCTION__); _rtx; })->frame_related)
))
3812 return -1;
3813 return -2;
3814
3815 case RTX_COMM_ARITH:
3816 /* Prefer operands that are themselves commutative to be first.
3817 This helps to make things linear. In particular,
3818 (and (and (reg) (reg)) (not (reg))) is canonical. */
3819 return 4;
3820
3821 case RTX_BIN_ARITH:
3822 /* If only one operand is a binary expression, it will be the first
3823 operand. In particular, (plus (minus (reg) (reg)) (neg (reg)))
3824 is canonical, although it will usually be further simplified. */
3825 return 2;
3826
3827 case RTX_UNARY:
3828 /* Then prefer NEG and NOT. */
3829 if (code == NEG || code == NOT)
3830 return 1;
3831 /* FALLTHRU */
3832
3833 default:
3834 return 0;
3835 }
3836}
3837
3838/* Return 1 iff it is necessary to swap operands of commutative operation
3839 in order to canonicalize expression. */
3840
3841bool
3842swap_commutative_operands_p (rtx x, rtx y)
3843{
3844 return (commutative_operand_precedence (x)
3845 < commutative_operand_precedence (y));
3846}
3847
3848/* Return 1 if X is an autoincrement side effect and the register is
3849 not the stack pointer. */
3850int
3851auto_inc_p (const_rtx x)
3852{
3853 switch (GET_CODE (x)((enum rtx_code) (x)->code))
3854 {
3855 case PRE_INC:
3856 case POST_INC:
3857 case PRE_DEC:
3858 case POST_DEC:
3859 case PRE_MODIFY:
3860 case POST_MODIFY:
3861 /* There are no REG_INC notes for SP. */
3862 if (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx) != stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]))
3863 return 1;
3864 default:
3865 break;
3866 }
3867 return 0;
3868}
3869
3870/* Return nonzero if IN contains a piece of rtl that has the address LOC. */
3871int
3872loc_mentioned_in_p (rtx *loc, const_rtx in)
3873{
3874 enum rtx_code code;
3875 const char *fmt;
3876 int i, j;
3877
3878 if (!in)
3879 return 0;
3880
3881 code = GET_CODE (in)((enum rtx_code) (in)->code);
3882 fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
3883 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
3884 {
3885 if (fmt[i] == 'e')
3886 {
3887 if (loc == &XEXP (in, i)(((in)->u.fld[i]).rt_rtx) || loc_mentioned_in_p (loc, XEXP (in, i)(((in)->u.fld[i]).rt_rtx)))
3888 return 1;
3889 }
3890 else if (fmt[i] == 'E')
3891 for (j = XVECLEN (in, i)(((((in)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--)
3892 if (loc == &XVECEXP (in, i, j)(((((in)->u.fld[i]).rt_rtvec))->elem[j])
3893 || loc_mentioned_in_p (loc, XVECEXP (in, i, j)(((((in)->u.fld[i]).rt_rtvec))->elem[j])))
3894 return 1;
3895 }
3896 return 0;
3897}
3898
3899/* Reinterpret a subreg as a bit extraction from an integer and return
3900 the position of the least significant bit of the extracted value.
3901 In other words, if the extraction were performed as a shift right
3902 and mask, return the number of bits to shift right.
3903
3904 The outer value of the subreg has OUTER_BYTES bytes and starts at
3905 byte offset SUBREG_BYTE within an inner value of INNER_BYTES bytes. */
3906
3907poly_uint64
3908subreg_size_lsb (poly_uint64 outer_bytes,
3909 poly_uint64 inner_bytes,
3910 poly_uint64 subreg_byte)
3911{
3912 poly_uint64 subreg_end, trailing_bytes, byte_pos;
3913
3914 /* A paradoxical subreg begins at bit position 0. */
3915 gcc_checking_assert (ordered_p (outer_bytes, inner_bytes))((void)(!(ordered_p (outer_bytes, inner_bytes)) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3915, __FUNCTION__), 0 : 0))
;
3916 if (maybe_gt (outer_bytes, inner_bytes)maybe_lt (inner_bytes, outer_bytes))
3917 {
3918 gcc_checking_assert (known_eq (subreg_byte, 0U))((void)(!((!maybe_ne (subreg_byte, 0U))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3918, __FUNCTION__), 0 : 0))
;
3919 return 0;
3920 }
3921
3922 subreg_end = subreg_byte + outer_bytes;
3923 trailing_bytes = inner_bytes - subreg_end;
3924 if (WORDS_BIG_ENDIAN0 && BYTES_BIG_ENDIAN0)
3925 byte_pos = trailing_bytes;
3926 else if (!WORDS_BIG_ENDIAN0 && !BYTES_BIG_ENDIAN0)
3927 byte_pos = subreg_byte;
3928 else
3929 {
3930 /* When bytes and words have opposite endianness, we must be able
3931 to split offsets into words and bytes at compile time. */
3932 poly_uint64 leading_word_part
3933 = force_align_down (subreg_byte, UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) !=
0) ? 8 : 4)
);
3934 poly_uint64 trailing_word_part
3935 = force_align_down (trailing_bytes, UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) !=
0) ? 8 : 4)
);
3936 /* If the subreg crosses a word boundary ensure that
3937 it also begins and ends on a word boundary. */
3938 gcc_assert (known_le (subreg_end - leading_word_part,((void)(!((!maybe_lt ((unsigned int) (((global_options.x_ix86_isa_flags
& (1UL << 1)) != 0) ? 8 : 4), subreg_end - leading_word_part
)) || ((!maybe_ne (leading_word_part, subreg_byte)) &&
(!maybe_ne (trailing_word_part, trailing_bytes)))) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3941, __FUNCTION__), 0 : 0))
3939 (unsigned int) UNITS_PER_WORD)((void)(!((!maybe_lt ((unsigned int) (((global_options.x_ix86_isa_flags
& (1UL << 1)) != 0) ? 8 : 4), subreg_end - leading_word_part
)) || ((!maybe_ne (leading_word_part, subreg_byte)) &&
(!maybe_ne (trailing_word_part, trailing_bytes)))) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3941, __FUNCTION__), 0 : 0))
3940 || (known_eq (leading_word_part, subreg_byte)((void)(!((!maybe_lt ((unsigned int) (((global_options.x_ix86_isa_flags
& (1UL << 1)) != 0) ? 8 : 4), subreg_end - leading_word_part
)) || ((!maybe_ne (leading_word_part, subreg_byte)) &&
(!maybe_ne (trailing_word_part, trailing_bytes)))) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3941, __FUNCTION__), 0 : 0))
3941 && known_eq (trailing_word_part, trailing_bytes)))((void)(!((!maybe_lt ((unsigned int) (((global_options.x_ix86_isa_flags
& (1UL << 1)) != 0) ? 8 : 4), subreg_end - leading_word_part
)) || ((!maybe_ne (leading_word_part, subreg_byte)) &&
(!maybe_ne (trailing_word_part, trailing_bytes)))) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3941, __FUNCTION__), 0 : 0))
;
3942 if (WORDS_BIG_ENDIAN0)
3943 byte_pos = trailing_word_part + (subreg_byte - leading_word_part);
3944 else
3945 byte_pos = leading_word_part + (trailing_bytes - trailing_word_part);
3946 }
3947
3948 return byte_pos * BITS_PER_UNIT(8);
3949}
3950
3951/* Given a subreg X, return the bit offset where the subreg begins
3952 (counting from the least significant bit of the reg). */
3953
3954poly_uint64
3955subreg_lsb (const_rtx x)
3956{
3957 return subreg_lsb_1 (GET_MODE (x)((machine_mode) (x)->mode), GET_MODE (SUBREG_REG (x))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode),
3958 SUBREG_BYTE (x)(((x)->u.fld[1]).rt_subreg));
3959}
3960
3961/* Return the subreg byte offset for a subreg whose outer value has
3962 OUTER_BYTES bytes, whose inner value has INNER_BYTES bytes, and where
3963 there are LSB_SHIFT *bits* between the lsb of the outer value and the
3964 lsb of the inner value. This is the inverse of the calculation
3965 performed by subreg_lsb_1 (which converts byte offsets to bit shifts). */
3966
3967poly_uint64
3968subreg_size_offset_from_lsb (poly_uint64 outer_bytes, poly_uint64 inner_bytes,
3969 poly_uint64 lsb_shift)
3970{
3971 /* A paradoxical subreg begins at bit position 0. */
3972 gcc_checking_assert (ordered_p (outer_bytes, inner_bytes))((void)(!(ordered_p (outer_bytes, inner_bytes)) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3972, __FUNCTION__), 0 : 0))
;
3973 if (maybe_gt (outer_bytes, inner_bytes)maybe_lt (inner_bytes, outer_bytes))
3974 {
3975 gcc_checking_assert (known_eq (lsb_shift, 0U))((void)(!((!maybe_ne (lsb_shift, 0U))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 3975, __FUNCTION__), 0 : 0))
;
3976 return 0;
3977 }
3978
3979 poly_uint64 lower_bytes = exact_div (lsb_shift, BITS_PER_UNIT(8));
3980 poly_uint64 upper_bytes = inner_bytes - (lower_bytes + outer_bytes);
3981 if (WORDS_BIG_ENDIAN0 && BYTES_BIG_ENDIAN0)
3982 return upper_bytes;
3983 else if (!WORDS_BIG_ENDIAN0 && !BYTES_BIG_ENDIAN0)
3984 return lower_bytes;
3985 else
3986 {
3987 /* When bytes and words have opposite endianness, we must be able
3988 to split offsets into words and bytes at compile time. */
3989 poly_uint64 lower_word_part = force_align_down (lower_bytes,
3990 UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) !=
0) ? 8 : 4)
);
3991 poly_uint64 upper_word_part = force_align_down (upper_bytes,
3992 UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) !=
0) ? 8 : 4)
);
3993 if (WORDS_BIG_ENDIAN0)
3994 return upper_word_part + (lower_bytes - lower_word_part);
3995 else
3996 return lower_word_part + (upper_bytes - upper_word_part);
3997 }
3998}
3999
4000/* Fill in information about a subreg of a hard register.
4001 xregno - A regno of an inner hard subreg_reg (or what will become one).
4002 xmode - The mode of xregno.
4003 offset - The byte offset.
4004 ymode - The mode of a top level SUBREG (or what may become one).
4005 info - Pointer to structure to fill in.
4006
4007 Rather than considering one particular inner register (and thus one
4008 particular "outer" register) in isolation, this function really uses
4009 XREGNO as a model for a sequence of isomorphic hard registers. Thus the
4010 function does not check whether adding INFO->offset to XREGNO gives
4011 a valid hard register; even if INFO->offset + XREGNO is out of range,
4012 there might be another register of the same type that is in range.
4013 Likewise it doesn't check whether targetm.hard_regno_mode_ok accepts
4014 the new register, since that can depend on things like whether the final
4015 register number is even or odd. Callers that want to check whether
4016 this particular subreg can be replaced by a simple (reg ...) should
4017 use simplify_subreg_regno. */
4018
4019void
4020subreg_get_info (unsigned int xregno, machine_mode xmode,
4021 poly_uint64 offset, machine_mode ymode,
4022 struct subreg_info *info)
4023{
4024 unsigned int nregs_xmode, nregs_ymode;
4025
4026 gcc_assert (xregno < FIRST_PSEUDO_REGISTER)((void)(!(xregno < 76) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4026, __FUNCTION__), 0 : 0))
;
4027
4028 poly_uint64 xsize = GET_MODE_SIZE (xmode);
4029 poly_uint64 ysize = GET_MODE_SIZE (ymode);
4030
4031 bool rknown = false;
4032
4033 /* If the register representation of a non-scalar mode has holes in it,
4034 we expect the scalar units to be concatenated together, with the holes
4035 distributed evenly among the scalar units. Each scalar unit must occupy
4036 at least one register. */
4037 if (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)(((global_options.x_target_flags & (1U << 0)) != 0)
&& !((global_options.x_ix86_isa_flags & (1UL <<
1)) != 0) && ((((unsigned long) ((xregno)) - (unsigned
long) (0) <= (unsigned long) (7) - (unsigned long) (0))) ||
((unsigned long) ((xregno)) - (unsigned long) (36) <= (unsigned
long) (43) - (unsigned long) (36))) && ((xmode) == (
scalar_float_mode ((scalar_float_mode::from_int) E_XFmode)) ||
(xmode) == (complex_mode ((complex_mode::from_int) E_XCmode)
)))
)
4038 {
4039 /* As a consequence, we must be dealing with a constant number of
4040 scalars, and thus a constant offset and number of units. */
4041 HOST_WIDE_INTlong coffset = offset.to_constant ();
4042 HOST_WIDE_INTlong cysize = ysize.to_constant ();
4043 nregs_xmode = HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode)((xmode) == (scalar_float_mode ((scalar_float_mode::from_int)
E_XFmode)) ? 4 : 8)
;
4044 unsigned int nunits = GET_MODE_NUNITS (xmode).to_constant ();
4045 scalar_mode xmode_unit = GET_MODE_INNER (xmode)(mode_to_inner (xmode));
4046 gcc_assert (HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode_unit))((void)(!((((global_options.x_target_flags & (1U <<
0)) != 0) && !((global_options.x_ix86_isa_flags &
(1UL << 1)) != 0) && ((((unsigned long) ((xregno
)) - (unsigned long) (0) <= (unsigned long) (7) - (unsigned
long) (0))) || ((unsigned long) ((xregno)) - (unsigned long)
(36) <= (unsigned long) (43) - (unsigned long) (36))) &&
((xmode_unit) == (scalar_float_mode ((scalar_float_mode::from_int
) E_XFmode)) || (xmode_unit) == (complex_mode ((complex_mode::
from_int) E_XCmode))))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4046, __FUNCTION__), 0 : 0))
;
4047 gcc_assert (nregs_xmode((void)(!(nregs_xmode == (nunits * ((xmode_unit) == (scalar_float_mode
((scalar_float_mode::from_int) E_XFmode)) ? 4 : 8))) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4049, __FUNCTION__), 0 : 0))
4048 == (nunits((void)(!(nregs_xmode == (nunits * ((xmode_unit) == (scalar_float_mode
((scalar_float_mode::from_int) E_XFmode)) ? 4 : 8))) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4049, __FUNCTION__), 0 : 0))
4049 * HARD_REGNO_NREGS_WITH_PADDING (xregno, xmode_unit)))((void)(!(nregs_xmode == (nunits * ((xmode_unit) == (scalar_float_mode
((scalar_float_mode::from_int) E_XFmode)) ? 4 : 8))) ? fancy_abort
("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4049, __FUNCTION__), 0 : 0))
;
4050 gcc_assert (hard_regno_nregs (xregno, xmode)((void)(!(hard_regno_nregs (xregno, xmode) == hard_regno_nregs
(xregno, xmode_unit) * nunits) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4051, __FUNCTION__), 0 : 0))
4051 == hard_regno_nregs (xregno, xmode_unit) * nunits)((void)(!(hard_regno_nregs (xregno, xmode) == hard_regno_nregs
(xregno, xmode_unit) * nunits) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4051, __FUNCTION__), 0 : 0))
;
4052
4053 /* You can only ask for a SUBREG of a value with holes in the middle
4054 if you don't cross the holes. (Such a SUBREG should be done by
4055 picking a different register class, or doing it in memory if
4056 necessary.) An example of a value with holes is XCmode on 32-bit
4057 x86 with -m128bit-long-double; it's represented in 6 32-bit registers,
4058 3 for each part, but in memory it's two 128-bit parts.
4059 Padding is assumed to be at the end (not necessarily the 'high part')
4060 of each unit. */
4061 if ((coffset / GET_MODE_SIZE (xmode_unit) + 1 < nunits)
4062 && (coffset / GET_MODE_SIZE (xmode_unit)
4063 != ((coffset + cysize - 1) / GET_MODE_SIZE (xmode_unit))))
4064 {
4065 info->representable_p = false;
4066 rknown = true;
4067 }
4068 }
4069 else
4070 nregs_xmode = hard_regno_nregs (xregno, xmode);
4071
4072 nregs_ymode = hard_regno_nregs (xregno, ymode);
4073
4074 /* Subreg sizes must be ordered, so that we can tell whether they are
4075 partial, paradoxical or complete. */
4076 gcc_checking_assert (ordered_p (xsize, ysize))((void)(!(ordered_p (xsize, ysize)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4076, __FUNCTION__), 0 : 0))
;
4077
4078 /* Paradoxical subregs are otherwise valid. */
4079 if (!rknown && known_eq (offset, 0U)(!maybe_ne (offset, 0U)) && maybe_gt (ysize, xsize)maybe_lt (xsize, ysize))
4080 {
4081 info->representable_p = true;
4082 /* If this is a big endian paradoxical subreg, which uses more
4083 actual hard registers than the original register, we must
4084 return a negative offset so that we find the proper highpart
4085 of the register.
4086
4087 We assume that the ordering of registers within a multi-register
4088 value has a consistent endianness: if bytes and register words
4089 have different endianness, the hard registers that make up a
4090 multi-register value must be at least word-sized. */
4091 if (REG_WORDS_BIG_ENDIAN0)
4092 info->offset = (int) nregs_xmode - (int) nregs_ymode;
4093 else
4094 info->offset = 0;
4095 info->nregs = nregs_ymode;
4096 return;
4097 }
4098
4099 /* If registers store different numbers of bits in the different
4100 modes, we cannot generally form this subreg. */
4101 poly_uint64 regsize_xmode, regsize_ymode;
4102 if (!HARD_REGNO_NREGS_HAS_PADDING (xregno, xmode)(((global_options.x_target_flags & (1U << 0)) != 0)
&& !((global_options.x_ix86_isa_flags & (1UL <<
1)) != 0) && ((((unsigned long) ((xregno)) - (unsigned
long) (0) <= (unsigned long) (7) - (unsigned long) (0))) ||
((unsigned long) ((xregno)) - (unsigned long) (36) <= (unsigned
long) (43) - (unsigned long) (36))) && ((xmode) == (
scalar_float_mode ((scalar_float_mode::from_int) E_XFmode)) ||
(xmode) == (complex_mode ((complex_mode::from_int) E_XCmode)
)))
4103 && !HARD_REGNO_NREGS_HAS_PADDING (xregno, ymode)(((global_options.x_target_flags & (1U << 0)) != 0)
&& !((global_options.x_ix86_isa_flags & (1UL <<
1)) != 0) && ((((unsigned long) ((xregno)) - (unsigned
long) (0) <= (unsigned long) (7) - (unsigned long) (0))) ||
((unsigned long) ((xregno)) - (unsigned long) (36) <= (unsigned
long) (43) - (unsigned long) (36))) && ((ymode) == (
scalar_float_mode ((scalar_float_mode::from_int) E_XFmode)) ||
(ymode) == (complex_mode ((complex_mode::from_int) E_XCmode)
)))
4104 && multiple_p (xsize, nregs_xmode, &regsize_xmode)
4105 && multiple_p (ysize, nregs_ymode, &regsize_ymode))
4106 {
4107 if (!rknown
4108 && ((nregs_ymode > 1 && maybe_gt (regsize_xmode, regsize_ymode)maybe_lt (regsize_ymode, regsize_xmode))
4109 || (nregs_xmode > 1 && maybe_gt (regsize_ymode, regsize_xmode)maybe_lt (regsize_xmode, regsize_ymode))))
4110 {
4111 info->representable_p = false;
4112 if (!can_div_away_from_zero_p (ysize, regsize_xmode, &info->nregs)
4113 || !can_div_trunc_p (offset, regsize_xmode, &info->offset))
4114 /* Checked by validate_subreg. We must know at compile time
4115 which inner registers are being accessed. */
4116 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4116, __FUNCTION__))
;
4117 return;
4118 }
4119 /* It's not valid to extract a subreg of mode YMODE at OFFSET that
4120 would go outside of XMODE. */
4121 if (!rknown && maybe_gt (ysize + offset, xsize)maybe_lt (xsize, ysize + offset))
4122 {
4123 info->representable_p = false;
4124 info->nregs = nregs_ymode;
4125 if (!can_div_trunc_p (offset, regsize_xmode, &info->offset))
4126 /* Checked by validate_subreg. We must know at compile time
4127 which inner registers are being accessed. */
4128 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4128, __FUNCTION__))
;
4129 return;
4130 }
4131 /* Quick exit for the simple and common case of extracting whole
4132 subregisters from a multiregister value. */
4133 /* ??? It would be better to integrate this into the code below,
4134 if we can generalize the concept enough and figure out how
4135 odd-sized modes can coexist with the other weird cases we support. */
4136 HOST_WIDE_INTlong count;
4137 if (!rknown
4138 && WORDS_BIG_ENDIAN0 == REG_WORDS_BIG_ENDIAN0
4139 && known_eq (regsize_xmode, regsize_ymode)(!maybe_ne (regsize_xmode, regsize_ymode))
4140 && constant_multiple_p (offset, regsize_ymode, &count))
4141 {
4142 info->representable_p = true;
4143 info->nregs = nregs_ymode;
4144 info->offset = count;
4145 gcc_assert (info->offset + info->nregs <= (int) nregs_xmode)((void)(!(info->offset + info->nregs <= (int) nregs_xmode
) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4145, __FUNCTION__), 0 : 0))
;
4146 return;
4147 }
4148 }
4149
4150 /* Lowpart subregs are otherwise valid. */
4151 if (!rknown && known_eq (offset, subreg_lowpart_offset (ymode, xmode))(!maybe_ne (offset, subreg_lowpart_offset (ymode, xmode))))
4152 {
4153 info->representable_p = true;
4154 rknown = true;
4155
4156 if (known_eq (offset, 0U)(!maybe_ne (offset, 0U)) || nregs_xmode == nregs_ymode)
4157 {
4158 info->offset = 0;
4159 info->nregs = nregs_ymode;
4160 return;
4161 }
4162 }
4163
4164 /* Set NUM_BLOCKS to the number of independently-representable YMODE
4165 values there are in (reg:XMODE XREGNO). We can view the register
4166 as consisting of this number of independent "blocks", where each
4167 block occupies NREGS_YMODE registers and contains exactly one
4168 representable YMODE value. */
4169 gcc_assert ((nregs_xmode % nregs_ymode) == 0)((void)(!((nregs_xmode % nregs_ymode) == 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4169, __FUNCTION__), 0 : 0))
;
4170 unsigned int num_blocks = nregs_xmode / nregs_ymode;
4171
4172 /* Calculate the number of bytes in each block. This must always
4173 be exact, otherwise we don't know how to verify the constraint.
4174 These conditions may be relaxed but subreg_regno_offset would
4175 need to be redesigned. */
4176 poly_uint64 bytes_per_block = exact_div (xsize, num_blocks);
4177
4178 /* Get the number of the first block that contains the subreg and the byte
4179 offset of the subreg from the start of that block. */
4180 unsigned int block_number;
4181 poly_uint64 subblock_offset;
4182 if (!can_div_trunc_p (offset, bytes_per_block, &block_number,
4183 &subblock_offset))
4184 /* Checked by validate_subreg. We must know at compile time which
4185 inner registers are being accessed. */
4186 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4186, __FUNCTION__))
;
4187
4188 if (!rknown)
4189 {
4190 /* Only the lowpart of each block is representable. */
4191 info->representable_p
4192 = known_eq (subblock_offset,(!maybe_ne (subblock_offset, subreg_size_lowpart_offset (ysize
, bytes_per_block)))
4193 subreg_size_lowpart_offset (ysize, bytes_per_block))(!maybe_ne (subblock_offset, subreg_size_lowpart_offset (ysize
, bytes_per_block)))
;
4194 rknown = true;
4195 }
4196
4197 /* We assume that the ordering of registers within a multi-register
4198 value has a consistent endianness: if bytes and register words
4199 have different endianness, the hard registers that make up a
4200 multi-register value must be at least word-sized. */
4201 if (WORDS_BIG_ENDIAN0 != REG_WORDS_BIG_ENDIAN0)
4202 /* The block number we calculated above followed memory endianness.
4203 Convert it to register endianness by counting back from the end.
4204 (Note that, because of the assumption above, each block must be
4205 at least word-sized.) */
4206 info->offset = (num_blocks - block_number - 1) * nregs_ymode;
4207 else
4208 info->offset = block_number * nregs_ymode;
4209 info->nregs = nregs_ymode;
4210}
4211
4212/* This function returns the regno offset of a subreg expression.
4213 xregno - A regno of an inner hard subreg_reg (or what will become one).
4214 xmode - The mode of xregno.
4215 offset - The byte offset.
4216 ymode - The mode of a top level SUBREG (or what may become one).
4217 RETURN - The regno offset which would be used. */
4218unsigned int
4219subreg_regno_offset (unsigned int xregno, machine_mode xmode,
4220 poly_uint64 offset, machine_mode ymode)
4221{
4222 struct subreg_info info;
4223 subreg_get_info (xregno, xmode, offset, ymode, &info);
4224 return info.offset;
4225}
4226
4227/* This function returns true when the offset is representable via
4228 subreg_offset in the given regno.
4229 xregno - A regno of an inner hard subreg_reg (or what will become one).
4230 xmode - The mode of xregno.
4231 offset - The byte offset.
4232 ymode - The mode of a top level SUBREG (or what may become one).
4233 RETURN - Whether the offset is representable. */
4234bool
4235subreg_offset_representable_p (unsigned int xregno, machine_mode xmode,
4236 poly_uint64 offset, machine_mode ymode)
4237{
4238 struct subreg_info info;
4239 subreg_get_info (xregno, xmode, offset, ymode, &info);
4240 return info.representable_p;
4241}
4242
4243/* Return the number of a YMODE register to which
4244
4245 (subreg:YMODE (reg:XMODE XREGNO) OFFSET)
4246
4247 can be simplified. Return -1 if the subreg can't be simplified.
4248
4249 XREGNO is a hard register number. */
4250
4251int
4252simplify_subreg_regno (unsigned int xregno, machine_mode xmode,
4253 poly_uint64 offset, machine_mode ymode)
4254{
4255 struct subreg_info info;
4256 unsigned int yregno;
4257
4258 /* Give the backend a chance to disallow the mode change. */
4259 if (GET_MODE_CLASS (xmode)((enum mode_class) mode_class[xmode]) != MODE_COMPLEX_INT
4260 && GET_MODE_CLASS (xmode)((enum mode_class) mode_class[xmode]) != MODE_COMPLEX_FLOAT
4261 && !REG_CAN_CHANGE_MODE_P (xregno, xmode, ymode)(targetm.can_change_mode_class (xmode, ymode, (regclass_map[(
xregno)])))
)
4262 return -1;
4263
4264 /* We shouldn't simplify stack-related registers. */
4265 if ((!reload_completed || frame_pointer_needed((&x_rtl)->frame_pointer_needed))
4266 && xregno == FRAME_POINTER_REGNUM19)
4267 return -1;
4268
4269 if (FRAME_POINTER_REGNUM19 != ARG_POINTER_REGNUM16
4270 && xregno == ARG_POINTER_REGNUM16)
4271 return -1;
4272
4273 if (xregno == STACK_POINTER_REGNUM7
4274 /* We should convert hard stack register in LRA if it is
4275 possible. */
4276 && ! lra_in_progress)
4277 return -1;
4278
4279 /* Try to get the register offset. */
4280 subreg_get_info (xregno, xmode, offset, ymode, &info);
4281 if (!info.representable_p)
4282 return -1;
4283
4284 /* Make sure that the offsetted register value is in range. */
4285 yregno = xregno + info.offset;
4286 if (!HARD_REGISTER_NUM_P (yregno)((yregno) < 76))
4287 return -1;
4288
4289 /* See whether (reg:YMODE YREGNO) is valid.
4290
4291 ??? We allow invalid registers if (reg:XMODE XREGNO) is also invalid.
4292 This is a kludge to work around how complex FP arguments are passed
4293 on IA-64 and should be fixed. See PR target/49226. */
4294 if (!targetm.hard_regno_mode_ok (yregno, ymode)
4295 && targetm.hard_regno_mode_ok (xregno, xmode))
4296 return -1;
4297
4298 return (int) yregno;
4299}
4300
4301/* A wrapper around simplify_subreg_regno that uses subreg_lowpart_offset
4302 (xmode, ymode) as the offset. */
4303
4304int
4305lowpart_subreg_regno (unsigned int regno, machine_mode xmode,
4306 machine_mode ymode)
4307{
4308 poly_uint64 offset = subreg_lowpart_offset (xmode, ymode);
4309 return simplify_subreg_regno (regno, xmode, offset, ymode);
4310}
4311
4312/* Return the final regno that a subreg expression refers to. */
4313unsigned int
4314subreg_regno (const_rtx x)
4315{
4316 unsigned int ret;
4317 rtx subreg = SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx);
4318 int regno = REGNO (subreg)(rhs_regno(subreg));
4319
4320 ret = regno + subreg_regno_offset (regno,
4321 GET_MODE (subreg)((machine_mode) (subreg)->mode),
4322 SUBREG_BYTE (x)(((x)->u.fld[1]).rt_subreg),
4323 GET_MODE (x)((machine_mode) (x)->mode));
4324 return ret;
4325
4326}
4327
4328/* Return the number of registers that a subreg expression refers
4329 to. */
4330unsigned int
4331subreg_nregs (const_rtx x)
4332{
4333 return subreg_nregs_with_regno (REGNO (SUBREG_REG (x))(rhs_regno((((x)->u.fld[0]).rt_rtx))), x);
4334}
4335
4336/* Return the number of registers that a subreg REG with REGNO
4337 expression refers to. This is a copy of the rtlanal.cc:subreg_nregs
4338 changed so that the regno can be passed in. */
4339
4340unsigned int
4341subreg_nregs_with_regno (unsigned int regno, const_rtx x)
4342{
4343 struct subreg_info info;
4344 rtx subreg = SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx);
4345
4346 subreg_get_info (regno, GET_MODE (subreg)((machine_mode) (subreg)->mode), SUBREG_BYTE (x)(((x)->u.fld[1]).rt_subreg), GET_MODE (x)((machine_mode) (x)->mode),
4347 &info);
4348 return info.nregs;
4349}
4350
4351struct parms_set_data
4352{
4353 int nregs;
4354 HARD_REG_SET regs;
4355};
4356
4357/* Helper function for noticing stores to parameter registers. */
4358static void
4359parms_set (rtx x, const_rtx pat ATTRIBUTE_UNUSED__attribute__ ((__unused__)), void *data)
4360{
4361 struct parms_set_data *const d = (struct parms_set_data *) data;
4362 if (REG_P (x)(((enum rtx_code) (x)->code) == REG) && REGNO (x)(rhs_regno(x)) < FIRST_PSEUDO_REGISTER76
4363 && TEST_HARD_REG_BIT (d->regs, REGNO (x)(rhs_regno(x))))
4364 {
4365 CLEAR_HARD_REG_BIT (d->regs, REGNO (x)(rhs_regno(x)));
4366 d->nregs--;
4367 }
4368}
4369
4370/* Look backward for first parameter to be loaded.
4371 Note that loads of all parameters will not necessarily be
4372 found if CSE has eliminated some of them (e.g., an argument
4373 to the outer function is passed down as a parameter).
4374 Do not skip BOUNDARY. */
4375rtx_insn *
4376find_first_parameter_load (rtx_insn *call_insn, rtx_insn *boundary)
4377{
4378 struct parms_set_data parm;
4379 rtx p;
4380 rtx_insn *before, *first_set;
4381
4382 /* Since different machines initialize their parameter registers
4383 in different orders, assume nothing. Collect the set of all
4384 parameter registers. */
4385 CLEAR_HARD_REG_SET (parm.regs);
4386 parm.nregs = 0;
4387 for (p = CALL_INSN_FUNCTION_USAGE (call_insn)(((call_insn)->u.fld[7]).rt_rtx); p; p = XEXP (p, 1)(((p)->u.fld[1]).rt_rtx))
4388 if (GET_CODE (XEXP (p, 0))((enum rtx_code) ((((p)->u.fld[0]).rt_rtx))->code) == USE
4389 && REG_P (XEXP (XEXP (p, 0), 0))(((enum rtx_code) (((((((p)->u.fld[0]).rt_rtx))->u.fld[
0]).rt_rtx))->code) == REG)
4390 && !STATIC_CHAIN_REG_P (XEXP (XEXP (p, 0), 0))(__extension__ ({ __typeof ((((((((p)->u.fld[0]).rt_rtx))->
u.fld[0]).rt_rtx))) const _rtx = ((((((((p)->u.fld[0]).rt_rtx
))->u.fld[0]).rt_rtx))); if (((enum rtx_code) (_rtx)->code
) != REG) rtl_check_failed_flag ("STATIC_CHAIN_REG_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4390, __FUNCTION__); _rtx; })->jump)
)
4391 {
4392 gcc_assert (REGNO (XEXP (XEXP (p, 0), 0)) < FIRST_PSEUDO_REGISTER)((void)(!((rhs_regno(((((((p)->u.fld[0]).rt_rtx))->u.fld
[0]).rt_rtx))) < 76) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4392, __FUNCTION__), 0 : 0))
;
4393
4394 /* We only care about registers which can hold function
4395 arguments. */
4396 if (!FUNCTION_ARG_REGNO_P (REGNO (XEXP (XEXP (p, 0), 0)))ix86_function_arg_regno_p ((rhs_regno(((((((p)->u.fld[0]).
rt_rtx))->u.fld[0]).rt_rtx))))
)
4397 continue;
4398
4399 SET_HARD_REG_BIT (parm.regs, REGNO (XEXP (XEXP (p, 0), 0))(rhs_regno(((((((p)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx
)))
);
4400 parm.nregs++;
4401 }
4402 before = call_insn;
4403 first_set = call_insn;
4404
4405 /* Search backward for the first set of a register in this set. */
4406 while (parm.nregs && before != boundary)
4407 {
4408 before = PREV_INSN (before);
4409
4410 /* It is possible that some loads got CSEed from one call to
4411 another. Stop in that case. */
4412 if (CALL_P (before)(((enum rtx_code) (before)->code) == CALL_INSN))
4413 break;
4414
4415 /* Our caller needs either ensure that we will find all sets
4416 (in case code has not been optimized yet), or take care
4417 for possible labels in a way by setting boundary to preceding
4418 CODE_LABEL. */
4419 if (LABEL_P (before)(((enum rtx_code) (before)->code) == CODE_LABEL))
4420 {
4421 gcc_assert (before == boundary)((void)(!(before == boundary) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4421, __FUNCTION__), 0 : 0))
;
4422 break;
4423 }
4424
4425 if (INSN_P (before)(((((enum rtx_code) (before)->code) == INSN) || (((enum rtx_code
) (before)->code) == JUMP_INSN) || (((enum rtx_code) (before
)->code) == CALL_INSN)) || (((enum rtx_code) (before)->
code) == DEBUG_INSN))
)
4426 {
4427 int nregs_old = parm.nregs;
4428 note_stores (before, parms_set, &parm);
4429 /* If we found something that did not set a parameter reg,
4430 we're done. Do not keep going, as that might result
4431 in hoisting an insn before the setting of a pseudo
4432 that is used by the hoisted insn. */
4433 if (nregs_old != parm.nregs)
4434 first_set = before;
4435 else
4436 break;
4437 }
4438 }
4439 return first_set;
4440}
4441
4442/* Return true if we should avoid inserting code between INSN and preceding
4443 call instruction. */
4444
4445bool
4446keep_with_call_p (const rtx_insn *insn)
4447{
4448 rtx set;
4449
4450 if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code
) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)->
code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) ==
DEBUG_INSN))
&& (set = single_set (insn)) != NULLnullptr)
4451 {
4452 if (REG_P (SET_DEST (set))(((enum rtx_code) ((((set)->u.fld[0]).rt_rtx))->code) ==
REG)
4453 && REGNO (SET_DEST (set))(rhs_regno((((set)->u.fld[0]).rt_rtx))) < FIRST_PSEUDO_REGISTER76
4454 && fixed_regs(this_target_hard_regs->x_fixed_regs)[REGNO (SET_DEST (set))(rhs_regno((((set)->u.fld[0]).rt_rtx)))]
4455 && general_operand (SET_SRC (set)(((set)->u.fld[1]).rt_rtx), VOIDmode((void) 0, E_VOIDmode)))
4456 return true;
4457 if (REG_P (SET_SRC (set))(((enum rtx_code) ((((set)->u.fld[1]).rt_rtx))->code) ==
REG)
4458 && targetm.calls.function_value_regno_p (REGNO (SET_SRC (set))(rhs_regno((((set)->u.fld[1]).rt_rtx))))
4459 && REG_P (SET_DEST (set))(((enum rtx_code) ((((set)->u.fld[0]).rt_rtx))->code) ==
REG)
4460 && REGNO (SET_DEST (set))(rhs_regno((((set)->u.fld[0]).rt_rtx))) >= FIRST_PSEUDO_REGISTER76)
4461 return true;
4462 /* There may be a stack pop just after the call and before the store
4463 of the return register. Search for the actual store when deciding
4464 if we can break or not. */
4465 if (SET_DEST (set)(((set)->u.fld[0]).rt_rtx) == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]))
4466 {
4467 /* This CONST_CAST is okay because next_nonnote_insn just
4468 returns its argument and we assign it to a const_rtx
4469 variable. */
4470 const rtx_insn *i2
4471 = next_nonnote_insn (const_cast<rtx_insn *> (insn));
4472 if (i2 && keep_with_call_p (i2))
4473 return true;
4474 }
4475 }
4476 return false;
4477}
4478
4479/* Return true if LABEL is a target of JUMP_INSN. This applies only
4480 to non-complex jumps. That is, direct unconditional, conditional,
4481 and tablejumps, but not computed jumps or returns. It also does
4482 not apply to the fallthru case of a conditional jump. */
4483
4484bool
4485label_is_jump_target_p (const_rtx label, const rtx_insn *jump_insn)
4486{
4487 rtx tmp = JUMP_LABEL (jump_insn)(((jump_insn)->u.fld[7]).rt_rtx);
4488 rtx_jump_table_data *table;
4489
4490 if (label == tmp)
4491 return true;
4492
4493 if (tablejump_p (jump_insn, NULLnullptr, &table))
4494 {
4495 rtvec vec = table->get_labels ();
4496 int i, veclen = GET_NUM_ELEM (vec)((vec)->num_elem);
4497
4498 for (i = 0; i < veclen; ++i)
4499 if (XEXP (RTVEC_ELT (vec, i), 0)(((((vec)->elem[i]))->u.fld[0]).rt_rtx) == label)
4500 return true;
4501 }
4502
4503 if (find_reg_note (jump_insn, REG_LABEL_TARGET, label))
4504 return true;
4505
4506 return false;
4507}
4508
4509
4510/* Return an estimate of the cost of computing rtx X.
4511 One use is in cse, to decide which expression to keep in the hash table.
4512 Another is in rtl generation, to pick the cheapest way to multiply.
4513 Other uses like the latter are expected in the future.
4514
4515 X appears as operand OPNO in an expression with code OUTER_CODE.
4516 SPEED specifies whether costs optimized for speed or size should
4517 be returned. */
4518
4519int
4520rtx_cost (rtx x, machine_mode mode, enum rtx_code outer_code,
4521 int opno, bool speed)
4522{
4523 int i, j;
4524 enum rtx_code code;
4525 const char *fmt;
4526 int total;
4527 int factor;
4528 unsigned mode_size;
4529
4530 if (x == 0)
4531 return 0;
4532
4533 if (GET_CODE (x)((enum rtx_code) (x)->code) == SET)
4534 /* A SET doesn't have a mode, so let's look at the SET_DEST to get
4535 the mode for the factor. */
4536 mode = GET_MODE (SET_DEST (x))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode);
4537 else if (GET_MODE (x)((machine_mode) (x)->mode) != VOIDmode((void) 0, E_VOIDmode))
4538 mode = GET_MODE (x)((machine_mode) (x)->mode);
4539
4540 mode_size = estimated_poly_value (GET_MODE_SIZE (mode));
4541
4542 /* A size N times larger than UNITS_PER_WORD likely needs N times as
4543 many insns, taking N times as long. */
4544 factor = mode_size > UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) !=
0) ? 8 : 4)
? mode_size / UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) !=
0) ? 8 : 4)
: 1;
4545
4546 /* Compute the default costs of certain things.
4547 Note that targetm.rtx_costs can override the defaults. */
4548
4549 code = GET_CODE (x)((enum rtx_code) (x)->code);
4550 switch (code)
4551 {
4552 case MULT:
4553 case FMA:
4554 case SS_MULT:
4555 case US_MULT:
4556 case SMUL_HIGHPART:
4557 case UMUL_HIGHPART:
4558 /* Multiplication has time-complexity O(N*N), where N is the
4559 number of units (translated from digits) when using
4560 schoolbook long multiplication. */
4561 total = factor * factor * COSTS_N_INSNS (5)((5) * 4);
4562 break;
4563 case DIV:
4564 case UDIV:
4565 case MOD:
4566 case UMOD:
4567 case SS_DIV:
4568 case US_DIV:
4569 /* Similarly, complexity for schoolbook long division. */
4570 total = factor * factor * COSTS_N_INSNS (7)((7) * 4);
4571 break;
4572 case USE:
4573 /* Used in combine.cc as a marker. */
4574 total = 0;
4575 break;
4576 default:
4577 total = factor * COSTS_N_INSNS (1)((1) * 4);
4578 }
4579
4580 switch (code)
4581 {
4582 case REG:
4583 return 0;
4584
4585 case SUBREG:
4586 total = 0;
4587 /* If we can't tie these modes, make this expensive. The larger
4588 the mode, the more expensive it is. */
4589 if (!targetm.modes_tieable_p (mode, GET_MODE (SUBREG_REG (x))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode)))
4590 return COSTS_N_INSNS (2 + factor)((2 + factor) * 4);
4591 break;
4592
4593 case TRUNCATE:
4594 if (targetm.modes_tieable_p (mode, GET_MODE (XEXP (x, 0))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode)))
4595 {
4596 total = 0;
4597 break;
4598 }
4599 /* FALLTHRU */
4600 default:
4601 if (targetm.rtx_costs (x, mode, outer_code, opno, &total, speed))
4602 return total;
4603 break;
4604 }
4605
4606 /* Sum the costs of the sub-rtx's, plus cost of this operation,
4607 which is already in total. */
4608
4609 fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]);
4610 for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--)
4611 if (fmt[i] == 'e')
4612 total += rtx_cost (XEXP (x, i)(((x)->u.fld[i]).rt_rtx), mode, code, i, speed);
4613 else if (fmt[i] == 'E')
4614 for (j = 0; j < XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); j++)
4615 total += rtx_cost (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), mode, code, i, speed);
4616
4617 return total;
4618}
4619
4620/* Fill in the structure C with information about both speed and size rtx
4621 costs for X, which is operand OPNO in an expression with code OUTER. */
4622
4623void
4624get_full_rtx_cost (rtx x, machine_mode mode, enum rtx_code outer, int opno,
4625 struct full_rtx_costs *c)
4626{
4627 c->speed = rtx_cost (x, mode, outer, opno, true);
4628 c->size = rtx_cost (x, mode, outer, opno, false);
4629}
4630
4631
4632/* Return cost of address expression X.
4633 Expect that X is properly formed address reference.
4634
4635 SPEED parameter specify whether costs optimized for speed or size should
4636 be returned. */
4637
4638int
4639address_cost (rtx x, machine_mode mode, addr_space_t as, bool speed)
4640{
4641 /* We may be asked for cost of various unusual addresses, such as operands
4642 of push instruction. It is not worthwhile to complicate writing
4643 of the target hook by such cases. */
4644
4645 if (!memory_address_addr_space_p (mode, x, as))
4646 return 1000;
4647
4648 return targetm.address_cost (x, mode, as, speed);
4649}
4650
4651/* If the target doesn't override, compute the cost as with arithmetic. */
4652
4653int
4654default_address_cost (rtx x, machine_mode, addr_space_t, bool speed)
4655{
4656 return rtx_cost (x, Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode (
(scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode
::from_int) E_SImode)))
, MEM, 0, speed);
4657}
4658
4659
4660unsigned HOST_WIDE_INTlong
4661nonzero_bits (const_rtx x, machine_mode mode)
4662{
4663 if (mode == VOIDmode((void) 0, E_VOIDmode))
4664 mode = GET_MODE (x)((machine_mode) (x)->mode);
4665 scalar_int_mode int_mode;
4666 if (!is_a <scalar_int_mode> (mode, &int_mode))
4667 return GET_MODE_MASK (mode)mode_mask_array[mode];
4668 return cached_nonzero_bits (x, int_mode, NULL_RTX(rtx) 0, VOIDmode((void) 0, E_VOIDmode), 0);
4669}
4670
4671unsigned int
4672num_sign_bit_copies (const_rtx x, machine_mode mode)
4673{
4674 if (mode == VOIDmode((void) 0, E_VOIDmode))
4675 mode = GET_MODE (x)((machine_mode) (x)->mode);
4676 scalar_int_mode int_mode;
4677 if (!is_a <scalar_int_mode> (mode, &int_mode))
4678 return 1;
4679 return cached_num_sign_bit_copies (x, int_mode, NULL_RTX(rtx) 0, VOIDmode((void) 0, E_VOIDmode), 0);
4680}
4681
4682/* Return true if nonzero_bits1 might recurse into both operands
4683 of X. */
4684
4685static inline bool
4686nonzero_bits_binary_arith_p (const_rtx x)
4687{
4688 if (!ARITHMETIC_P (x)(((rtx_class[(int) (((enum rtx_code) (x)->code))]) & (
~1)) == (RTX_COMM_ARITH & (~1)))
)
4689 return false;
4690 switch (GET_CODE (x)((enum rtx_code) (x)->code))
4691 {
4692 case AND:
4693 case XOR:
4694 case IOR:
4695 case UMIN:
4696 case UMAX:
4697 case SMIN:
4698 case SMAX:
4699 case PLUS:
4700 case MINUS:
4701 case MULT:
4702 case DIV:
4703 case UDIV:
4704 case MOD:
4705 case UMOD:
4706 return true;
4707 default:
4708 return false;
4709 }
4710}
4711
4712/* The function cached_nonzero_bits is a wrapper around nonzero_bits1.
4713 It avoids exponential behavior in nonzero_bits1 when X has
4714 identical subexpressions on the first or the second level. */
4715
4716static unsigned HOST_WIDE_INTlong
4717cached_nonzero_bits (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4718 machine_mode known_mode,
4719 unsigned HOST_WIDE_INTlong known_ret)
4720{
4721 if (x == known_x && mode == known_mode)
4722 return known_ret;
4723
4724 /* Try to find identical subexpressions. If found call
4725 nonzero_bits1 on X with the subexpressions as KNOWN_X and the
4726 precomputed value for the subexpression as KNOWN_RET. */
4727
4728 if (nonzero_bits_binary_arith_p (x))
4729 {
4730 rtx x0 = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx);
4731 rtx x1 = XEXP (x, 1)(((x)->u.fld[1]).rt_rtx);
4732
4733 /* Check the first level. */
4734 if (x0 == x1)
4735 return nonzero_bits1 (x, mode, x0, mode,
4736 cached_nonzero_bits (x0, mode, known_x,
4737 known_mode, known_ret));
4738
4739 /* Check the second level. */
4740 if (nonzero_bits_binary_arith_p (x0)
4741 && (x1 == XEXP (x0, 0)(((x0)->u.fld[0]).rt_rtx) || x1 == XEXP (x0, 1)(((x0)->u.fld[1]).rt_rtx)))
4742 return nonzero_bits1 (x, mode, x1, mode,
4743 cached_nonzero_bits (x1, mode, known_x,
4744 known_mode, known_ret));
4745
4746 if (nonzero_bits_binary_arith_p (x1)
4747 && (x0 == XEXP (x1, 0)(((x1)->u.fld[0]).rt_rtx) || x0 == XEXP (x1, 1)(((x1)->u.fld[1]).rt_rtx)))
4748 return nonzero_bits1 (x, mode, x0, mode,
4749 cached_nonzero_bits (x0, mode, known_x,
4750 known_mode, known_ret));
4751 }
4752
4753 return nonzero_bits1 (x, mode, known_x, known_mode, known_ret);
4754}
4755
4756/* We let num_sign_bit_copies recur into nonzero_bits as that is useful.
4757 We don't let nonzero_bits recur into num_sign_bit_copies, because that
4758 is less useful. We can't allow both, because that results in exponential
4759 run time recursion. There is a nullstone testcase that triggered
4760 this. This macro avoids accidental uses of num_sign_bit_copies. */
4761#define cached_num_sign_bit_copies sorry_i_am_preventing_exponential_behavior
4762
4763/* Given an expression, X, compute which bits in X can be nonzero.
4764 We don't care about bits outside of those defined in MODE.
4765
4766 For most X this is simply GET_MODE_MASK (GET_MODE (X)), but if X is
4767 an arithmetic operation, we can do better. */
4768
4769static unsigned HOST_WIDE_INTlong
4770nonzero_bits1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
4771 machine_mode known_mode,
4772 unsigned HOST_WIDE_INTlong known_ret)
4773{
4774 unsigned HOST_WIDE_INTlong nonzero = GET_MODE_MASK (mode)mode_mask_array[mode];
4775 unsigned HOST_WIDE_INTlong inner_nz;
4776 enum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code);
4777 machine_mode inner_mode;
4778 unsigned int inner_width;
4779 scalar_int_mode xmode;
4780
4781 unsigned int mode_width = GET_MODE_PRECISION (mode);
4782
4783 if (CONST_INT_P (x)(((enum rtx_code) (x)->code) == CONST_INT))
4784 {
4785 if (SHORT_IMMEDIATES_SIGN_EXTEND0
4786 && INTVAL (x)((x)->u.hwint[0]) > 0
4787 && mode_width < BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL <<
1)) != 0) ? 8 : 4))
4788 && (UINTVAL (x)((unsigned long) ((x)->u.hwint[0])) & (HOST_WIDE_INT_1U1UL << (mode_width - 1))) != 0)
4789 return UINTVAL (x)((unsigned long) ((x)->u.hwint[0])) | (HOST_WIDE_INT_M1U-1UL << mode_width);
4790
4791 return UINTVAL (x)((unsigned long) ((x)->u.hwint[0]));
4792 }
4793
4794 if (!is_a <scalar_int_mode> (GET_MODE (x)((machine_mode) (x)->mode), &xmode))
4795 return nonzero;
4796 unsigned int xmode_width = GET_MODE_PRECISION (xmode);
4797
4798 /* If X is wider than MODE, use its mode instead. */
4799 if (xmode_width > mode_width)
4800 {
4801 mode = xmode;
4802 nonzero = GET_MODE_MASK (mode)mode_mask_array[mode];
4803 mode_width = xmode_width;
4804 }
4805
4806 if (mode_width > HOST_BITS_PER_WIDE_INT64)
4807 /* Our only callers in this case look for single bit values. So
4808 just return the mode mask. Those tests will then be false. */
4809 return nonzero;
4810
4811 /* If MODE is wider than X, but both are a single word for both the host
4812 and target machines, we can compute this from which bits of the object
4813 might be nonzero in its own mode, taking into account the fact that, on
4814 CISC machines, accessing an object in a wider mode generally causes the
4815 high-order bits to become undefined, so they are not known to be zero.
4816 We extend this reasoning to RISC machines for operations that might not
4817 operate on the full registers. */
4818 if (mode_width > xmode_width
4819 && xmode_width <= BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL <<
1)) != 0) ? 8 : 4))
4820 && xmode_width <= HOST_BITS_PER_WIDE_INT64
4821 && !(WORD_REGISTER_OPERATIONS0 && word_register_operation_p (x)))
4822 {
4823 nonzero &= cached_nonzero_bits (x, xmode,
4824 known_x, known_mode, known_ret);
4825 nonzero |= GET_MODE_MASK (mode)mode_mask_array[mode] & ~GET_MODE_MASK (xmode)mode_mask_array[xmode];
4826 return nonzero;
4827 }
4828
4829 /* Please keep nonzero_bits_binary_arith_p above in sync with
4830 the code in the switch below. */
4831 switch (code)
4832 {
4833 case REG:
4834#if defined(POINTERS_EXTEND_UNSIGNED1)
4835 /* If pointers extend unsigned and this is a pointer in Pmode, say that
4836 all the bits above ptr_mode are known to be zero. */
4837 /* As we do not know which address space the pointer is referring to,
4838 we can do this only if the target does not support different pointer
4839 or address modes depending on the address space. */
4840 if (target_default_pointer_address_modes_p ()
4841 && POINTERS_EXTEND_UNSIGNED1
4842 && xmode == Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode (
(scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode
::from_int) E_SImode)))
4843 && REG_POINTER (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("REG_POINTER"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 4843, __FUNCTION__); _rtx; })->frame_related)
4844 && !targetm.have_ptr_extend ())
4845 nonzero &= GET_MODE_MASK (ptr_mode)mode_mask_array[ptr_mode];
4846#endif
4847
4848 /* Include declared information about alignment of pointers. */
4849 /* ??? We don't properly preserve REG_POINTER changes across
4850 pointer-to-integer casts, so we can't trust it except for
4851 things that we know must be pointers. See execute/960116-1.c. */
4852 if ((x == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER])
4853 || x == frame_pointer_rtx((this_target_rtl->x_global_rtl)[GR_FRAME_POINTER])
4854 || x == arg_pointer_rtx((this_target_rtl->x_global_rtl)[GR_ARG_POINTER]))
4855 && REGNO_POINTER_ALIGN (REGNO (x))((&x_rtl)->emit.regno_pointer_align[(rhs_regno(x))]))
4856 {
4857 unsigned HOST_WIDE_INTlong alignment
4858 = REGNO_POINTER_ALIGN (REGNO (x))((&x_rtl)->emit.regno_pointer_align[(rhs_regno(x))]) / BITS_PER_UNIT(8);
4859
4860#ifdef PUSH_ROUNDING
4861 /* If PUSH_ROUNDING is defined, it is possible for the
4862 stack to be momentarily aligned only to that amount,
4863 so we pick the least alignment. */
4864 if (x == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]) && targetm.calls.push_argument (0))
4865 {
4866 poly_uint64 rounded_1 = PUSH_ROUNDING (poly_int64 (1))ix86_push_rounding (poly_int64 (1));
4867 alignment = MIN (known_alignment (rounded_1), alignment)((known_alignment (rounded_1)) < (alignment) ? (known_alignment
(rounded_1)) : (alignment))
;
4868 }
4869#endif
4870
4871 nonzero &= ~(alignment - 1);
4872 }
4873
4874 {
4875 unsigned HOST_WIDE_INTlong nonzero_for_hook = nonzero;
4876 rtx new_rtx = rtl_hooks.reg_nonzero_bits (x, xmode, mode,
4877 &nonzero_for_hook);
4878
4879 if (new_rtx)
4880 nonzero_for_hook &= cached_nonzero_bits (new_rtx, mode, known_x,
4881 known_mode, known_ret);
4882
4883 return nonzero_for_hook;
4884 }
4885
4886 case MEM:
4887 /* In many, if not most, RISC machines, reading a byte from memory
4888 zeros the rest of the register. Noticing that fact saves a lot
4889 of extra zero-extends. */
4890 if (load_extend_op (xmode) == ZERO_EXTEND)
4891 nonzero &= GET_MODE_MASK (xmode)mode_mask_array[xmode];
4892 break;
4893
4894 case EQ: case NE:
4895 case UNEQ: case LTGT:
4896 case GT: case GTU: case UNGT:
4897 case LT: case LTU: case UNLT:
4898 case GE: case GEU: case UNGE:
4899 case LE: case LEU: case UNLE:
4900 case UNORDERED: case ORDERED:
4901 /* If this produces an integer result, we know which bits are set.
4902 Code here used to clear bits outside the mode of X, but that is
4903 now done above. */
4904 /* Mind that MODE is the mode the caller wants to look at this
4905 operation in, and not the actual operation mode. We can wind
4906 up with (subreg:DI (gt:V4HI x y)), and we don't have anything
4907 that describes the results of a vector compare. */
4908 if (GET_MODE_CLASS (xmode)((enum mode_class) mode_class[xmode]) == MODE_INT
4909 && mode_width <= HOST_BITS_PER_WIDE_INT64)
4910 nonzero = STORE_FLAG_VALUE1;
4911 break;
4912
4913 case NEG:
4914#if 0
4915 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4916 and num_sign_bit_copies. */
4917 if (num_sign_bit_copies (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), xmode) == xmode_width)
4918 nonzero = 1;
4919#endif
4920
4921 if (xmode_width < mode_width)
4922 nonzero |= (GET_MODE_MASK (mode)mode_mask_array[mode] & ~GET_MODE_MASK (xmode)mode_mask_array[xmode]);
4923 break;
4924
4925 case ABS:
4926#if 0
4927 /* Disabled to avoid exponential mutual recursion between nonzero_bits
4928 and num_sign_bit_copies. */
4929 if (num_sign_bit_copies (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), xmode) == xmode_width)
4930 nonzero = 1;
4931#endif
4932 break;
4933
4934 case TRUNCATE:
4935 nonzero &= (cached_nonzero_bits (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode,
4936 known_x, known_mode, known_ret)
4937 & GET_MODE_MASK (mode)mode_mask_array[mode]);
4938 break;
4939
4940 case ZERO_EXTEND:
4941 nonzero &= cached_nonzero_bits (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode,
4942 known_x, known_mode, known_ret);
4943 if (GET_MODE (XEXP (x, 0))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode) != VOIDmode((void) 0, E_VOIDmode))
4944 nonzero &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)))mode_mask_array[((machine_mode) ((((x)->u.fld[0]).rt_rtx))
->mode)]
;
4945 break;
4946
4947 case SIGN_EXTEND:
4948 /* If the sign bit is known clear, this is the same as ZERO_EXTEND.
4949 Otherwise, show all the bits in the outer mode but not the inner
4950 may be nonzero. */
4951 inner_nz = cached_nonzero_bits (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode,
4952 known_x, known_mode, known_ret);
4953 if (GET_MODE (XEXP (x, 0))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode) != VOIDmode((void) 0, E_VOIDmode))
4954 {
4955 inner_nz &= GET_MODE_MASK (GET_MODE (XEXP (x, 0)))mode_mask_array[((machine_mode) ((((x)->u.fld[0]).rt_rtx))
->mode)]
;
4956 if (val_signbit_known_set_p (GET_MODE (XEXP (x, 0))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode), inner_nz))
4957 inner_nz |= (GET_MODE_MASK (mode)mode_mask_array[mode]
4958 & ~GET_MODE_MASK (GET_MODE (XEXP (x, 0)))mode_mask_array[((machine_mode) ((((x)->u.fld[0]).rt_rtx))
->mode)]
);
4959 }
4960
4961 nonzero &= inner_nz;
4962 break;
4963
4964 case AND:
4965 nonzero &= cached_nonzero_bits (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode,
4966 known_x, known_mode, known_ret)
4967 & cached_nonzero_bits (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), mode,
4968 known_x, known_mode, known_ret);
4969 break;
4970
4971 case XOR: case IOR:
4972 case UMIN: case UMAX: case SMIN: case SMAX:
4973 {
4974 unsigned HOST_WIDE_INTlong nonzero0
4975 = cached_nonzero_bits (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode,
4976 known_x, known_mode, known_ret);
4977
4978 /* Don't call nonzero_bits for the second time if it cannot change
4979 anything. */
4980 if ((nonzero & nonzero0) != nonzero)
4981 nonzero &= nonzero0
4982 | cached_nonzero_bits (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), mode,
4983 known_x, known_mode, known_ret);
4984 }
4985 break;
4986
4987 case PLUS: case MINUS:
4988 case MULT:
4989 case DIV: case UDIV:
4990 case MOD: case UMOD:
4991 /* We can apply the rules of arithmetic to compute the number of
4992 high- and low-order zero bits of these operations. We start by
4993 computing the width (position of the highest-order nonzero bit)
4994 and the number of low-order zero bits for each value. */
4995 {
4996 unsigned HOST_WIDE_INTlong nz0
4997 = cached_nonzero_bits (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode,
4998 known_x, known_mode, known_ret);
4999 unsigned HOST_WIDE_INTlong nz1
5000 = cached_nonzero_bits (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), mode,
5001 known_x, known_mode, known_ret);
5002 int sign_index = xmode_width - 1;
5003 int width0 = floor_log2 (nz0) + 1;
5004 int width1 = floor_log2 (nz1) + 1;
5005 int low0 = ctz_or_zero (nz0);
5006 int low1 = ctz_or_zero (nz1);
5007 unsigned HOST_WIDE_INTlong op0_maybe_minusp
5008 = nz0 & (HOST_WIDE_INT_1U1UL << sign_index);
5009 unsigned HOST_WIDE_INTlong op1_maybe_minusp
5010 = nz1 & (HOST_WIDE_INT_1U1UL << sign_index);
5011 unsigned int result_width = mode_width;
5012 int result_low = 0;
5013
5014 switch (code)
5015 {
5016 case PLUS:
5017 result_width = MAX (width0, width1)((width0) > (width1) ? (width0) : (width1)) + 1;
5018 result_low = MIN (low0, low1)((low0) < (low1) ? (low0) : (low1));
5019 break;
5020 case MINUS:
5021 result_low = MIN (low0, low1)((low0) < (low1) ? (low0) : (low1));
5022 break;
5023 case MULT:
5024 result_width = width0 + width1;
5025 result_low = low0 + low1;
5026 break;
5027 case DIV:
5028 if (width1 == 0)
5029 break;
5030 if (!op0_maybe_minusp && !op1_maybe_minusp)
5031 result_width = width0;
5032 break;
5033 case UDIV:
5034 if (width1 == 0)
5035 break;
5036 result_width = width0;
5037 break;
5038 case MOD:
5039 if (width1 == 0)
5040 break;
5041 if (!op0_maybe_minusp && !op1_maybe_minusp)
5042 result_width = MIN (width0, width1)((width0) < (width1) ? (width0) : (width1));
5043 result_low = MIN (low0, low1)((low0) < (low1) ? (low0) : (low1));
5044 break;
5045 case UMOD:
5046 if (width1 == 0)
5047 break;
5048 result_width = MIN (width0, width1)((width0) < (width1) ? (width0) : (width1));
5049 result_low = MIN (low0, low1)((low0) < (low1) ? (low0) : (low1));
5050 break;
5051 default:
5052 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 5052, __FUNCTION__))
;
5053 }
5054
5055 /* Note that mode_width <= HOST_BITS_PER_WIDE_INT, see above. */
5056 if (result_width < mode_width)
5057 nonzero &= (HOST_WIDE_INT_1U1UL << result_width) - 1;
5058
5059 if (result_low > 0)
5060 {
5061 if (result_low < HOST_BITS_PER_WIDE_INT64)
5062 nonzero &= ~((HOST_WIDE_INT_1U1UL << result_low) - 1);
5063 else
5064 nonzero = 0;
5065 }
5066 }
5067 break;
5068
5069 case ZERO_EXTRACT:
5070 if (CONST_INT_P (XEXP (x, 1))(((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_INT
)
5071 && INTVAL (XEXP (x, 1))(((((x)->u.fld[1]).rt_rtx))->u.hwint[0]) < HOST_BITS_PER_WIDE_INT64)
5072 nonzero &= (HOST_WIDE_INT_1U1UL << INTVAL (XEXP (x, 1))(((((x)->u.fld[1]).rt_rtx))->u.hwint[0])) - 1;
5073 break;
5074
5075 case SUBREG:
5076 /* If this is a SUBREG formed for a promoted variable that has
5077 been zero-extended, we know that at least the high-order bits
5078 are zero, though others might be too. */
5079 if (SUBREG_PROMOTED_VAR_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag (
"SUBREG_PROMOTED", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 5079, __FUNCTION__); _rtx; })->in_struct)
&& SUBREG_PROMOTED_UNSIGNED_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag (
"SUBREG_PROMOTED_UNSIGNED_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 5079, __FUNCTION__); _rtx; })->volatil)
)
5080 nonzero = GET_MODE_MASK (xmode)mode_mask_array[xmode]
5081 & cached_nonzero_bits (SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx), xmode,
5082 known_x, known_mode, known_ret);
5083
5084 /* If the inner mode is a single word for both the host and target
5085 machines, we can compute this from which bits of the inner
5086 object might be nonzero. */
5087 inner_mode = GET_MODE (SUBREG_REG (x))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode);
5088 if (GET_MODE_PRECISION (inner_mode).is_constant (&inner_width)
5089 && inner_width <= BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL <<
1)) != 0) ? 8 : 4))
5090 && inner_width <= HOST_BITS_PER_WIDE_INT64)
5091 {
5092 nonzero &= cached_nonzero_bits (SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx), mode,
5093 known_x, known_mode, known_ret);
5094
5095 /* On a typical CISC machine, accessing an object in a wider mode
5096 causes the high-order bits to become undefined. So they are
5097 not known to be zero.
5098
5099 On a typical RISC machine, we only have to worry about the way
5100 loads are extended. Otherwise, if we get a reload for the inner
5101 part, it may be loaded from the stack, and then we may lose all
5102 the zero bits that existed before the store to the stack. */
5103 rtx_code extend_op;
5104 if ((!WORD_REGISTER_OPERATIONS0
5105 || ((extend_op = load_extend_op (inner_mode)) == SIGN_EXTEND
5106 ? val_signbit_known_set_p (inner_mode, nonzero)
5107 : extend_op != ZERO_EXTEND)
5108 || !MEM_P (SUBREG_REG (x))(((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == MEM
)
)
5109 && xmode_width > inner_width)
5110 nonzero
5111 |= (GET_MODE_MASK (GET_MODE (x))mode_mask_array[((machine_mode) (x)->mode)] & ~GET_MODE_MASK (inner_mode)mode_mask_array[inner_mode]);
5112 }
5113 break;
5114
5115 case ASHIFT:
5116 case ASHIFTRT:
5117 case LSHIFTRT:
5118 case ROTATE:
5119 case ROTATERT:
5120 /* The nonzero bits are in two classes: any bits within MODE
5121 that aren't in xmode are always significant. The rest of the
5122 nonzero bits are those that are significant in the operand of
5123 the shift when shifted the appropriate number of bits. This
5124 shows that high-order bits are cleared by the right shift and
5125 low-order bits by left shifts. */
5126 if (CONST_INT_P (XEXP (x, 1))(((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_INT
)
5127 && INTVAL (XEXP (x, 1))(((((x)->u.fld[1]).rt_rtx))->u.hwint[0]) >= 0
5128 && INTVAL (XEXP (x, 1))(((((x)->u.fld[1]).rt_rtx))->u.hwint[0]) < HOST_BITS_PER_WIDE_INT64
5129 && INTVAL (XEXP (x, 1))(((((x)->u.fld[1]).rt_rtx))->u.hwint[0]) < xmode_width)
5130 {
5131 int count = INTVAL (XEXP (x, 1))(((((x)->u.fld[1]).rt_rtx))->u.hwint[0]);
5132 unsigned HOST_WIDE_INTlong mode_mask = GET_MODE_MASK (xmode)mode_mask_array[xmode];
5133 unsigned HOST_WIDE_INTlong op_nonzero
5134 = cached_nonzero_bits (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode,
5135 known_x, known_mode, known_ret);
5136 unsigned HOST_WIDE_INTlong inner = op_nonzero & mode_mask;
5137 unsigned HOST_WIDE_INTlong outer = 0;
5138
5139 if (mode_width > xmode_width)
5140 outer = (op_nonzero & nonzero & ~mode_mask);
5141
5142 switch (code)
5143 {
5144 case ASHIFT:
5145 inner <<= count;
5146 break;
5147
5148 case LSHIFTRT:
5149 inner >>= count;
5150 break;
5151
5152 case ASHIFTRT:
5153 inner >>= count;
5154
5155 /* If the sign bit may have been nonzero before the shift, we
5156 need to mark all the places it could have been copied to
5157 by the shift as possibly nonzero. */
5158 if (inner & (HOST_WIDE_INT_1U1UL << (xmode_width - 1 - count)))
5159 inner |= (((HOST_WIDE_INT_1U1UL << count) - 1)
5160 << (xmode_width - count));
5161 break;
5162
5163 case ROTATE:
5164 inner = (inner << (count % xmode_width)
5165 | (inner >> (xmode_width - (count % xmode_width))))
5166 & mode_mask;
5167 break;
5168
5169 case ROTATERT:
5170 inner = (inner >> (count % xmode_width)
5171 | (inner << (xmode_width - (count % xmode_width))))
5172 & mode_mask;
5173 break;
5174
5175 default:
5176 gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 5176, __FUNCTION__))
;
5177 }
5178
5179 nonzero &= (outer | inner);
5180 }
5181 break;
5182
5183 case FFS:
5184 case POPCOUNT:
5185 /* This is at most the number of bits in the mode. */
5186 nonzero = ((unsigned HOST_WIDE_INTlong) 2 << (floor_log2 (mode_width))) - 1;
5187 break;
5188
5189 case CLZ:
5190 /* If CLZ has a known value at zero, then the nonzero bits are
5191 that value, plus the number of bits in the mode minus one. */
5192 if (CLZ_DEFINED_VALUE_AT_ZERO (mode, nonzero)((nonzero) = GET_MODE_BITSIZE (mode), ((global_options.x_ix86_isa_flags
& (1UL << 35)) != 0) ? 2 : 0)
)
5193 nonzero
5194 |= (HOST_WIDE_INT_1U1UL << (floor_log2 (mode_width))) - 1;
5195 else
5196 nonzero = -1;
5197 break;
5198
5199 case CTZ:
5200 /* If CTZ has a known value at zero, then the nonzero bits are
5201 that value, plus the number of bits in the mode minus one. */
5202 if (CTZ_DEFINED_VALUE_AT_ZERO (mode, nonzero)((nonzero) = GET_MODE_BITSIZE (mode), ((global_options.x_ix86_isa_flags
& (1UL << 23)) != 0) ? 2 : 0)
)
5203 nonzero
5204 |= (HOST_WIDE_INT_1U1UL << (floor_log2 (mode_width))) - 1;
5205 else
5206 nonzero = -1;
5207 break;
5208
5209 case CLRSB:
5210 /* This is at most the number of bits in the mode minus 1. */
5211 nonzero = (HOST_WIDE_INT_1U1UL << (floor_log2 (mode_width))) - 1;
5212 break;
5213
5214 case PARITY:
5215 nonzero = 1;
5216 break;
5217
5218 case IF_THEN_ELSE:
5219 {
5220 unsigned HOST_WIDE_INTlong nonzero_true
5221 = cached_nonzero_bits (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), mode,
5222 known_x, known_mode, known_ret);
5223
5224 /* Don't call nonzero_bits for the second time if it cannot change
5225 anything. */
5226 if ((nonzero & nonzero_true) != nonzero)
5227 nonzero &= nonzero_true
5228 | cached_nonzero_bits (XEXP (x, 2)(((x)->u.fld[2]).rt_rtx), mode,
5229 known_x, known_mode, known_ret);
5230 }
5231 break;
5232
5233 default:
5234 break;
5235 }
5236
5237 return nonzero;
5238}
5239
5240/* See the macro definition above. */
5241#undef cached_num_sign_bit_copies
5242
5243
5244/* Return true if num_sign_bit_copies1 might recurse into both operands
5245 of X. */
5246
5247static inline bool
5248num_sign_bit_copies_binary_arith_p (const_rtx x)
5249{
5250 if (!ARITHMETIC_P (x)(((rtx_class[(int) (((enum rtx_code) (x)->code))]) & (
~1)) == (RTX_COMM_ARITH & (~1)))
)
5251 return false;
5252 switch (GET_CODE (x)((enum rtx_code) (x)->code))
5253 {
5254 case IOR:
5255 case AND:
5256 case XOR:
5257 case SMIN:
5258 case SMAX:
5259 case UMIN:
5260 case UMAX:
5261 case PLUS:
5262 case MINUS:
5263 case MULT:
5264 return true;
5265 default:
5266 return false;
5267 }
5268}
5269
5270/* The function cached_num_sign_bit_copies is a wrapper around
5271 num_sign_bit_copies1. It avoids exponential behavior in
5272 num_sign_bit_copies1 when X has identical subexpressions on the
5273 first or the second level. */
5274
5275static unsigned int
5276cached_num_sign_bit_copies (const_rtx x, scalar_int_mode mode,
5277 const_rtx known_x, machine_mode known_mode,
5278 unsigned int known_ret)
5279{
5280 if (x == known_x && mode == known_mode)
5281 return known_ret;
5282
5283 /* Try to find identical subexpressions. If found call
5284 num_sign_bit_copies1 on X with the subexpressions as KNOWN_X and
5285 the precomputed value for the subexpression as KNOWN_RET. */
5286
5287 if (num_sign_bit_copies_binary_arith_p (x))
5288 {
5289 rtx x0 = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx);
5290 rtx x1 = XEXP (x, 1)(((x)->u.fld[1]).rt_rtx);
5291
5292 /* Check the first level. */
5293 if (x0 == x1)
5294 return
5295 num_sign_bit_copies1 (x, mode, x0, mode,
5296 cached_num_sign_bit_copies (x0, mode, known_x,
5297 known_mode,
5298 known_ret));
5299
5300 /* Check the second level. */
5301 if (num_sign_bit_copies_binary_arith_p (x0)
5302 && (x1 == XEXP (x0, 0)(((x0)->u.fld[0]).rt_rtx) || x1 == XEXP (x0, 1)(((x0)->u.fld[1]).rt_rtx)))
5303 return
5304 num_sign_bit_copies1 (x, mode, x1, mode,
5305 cached_num_sign_bit_copies (x1, mode, known_x,
5306 known_mode,
5307 known_ret));
5308
5309 if (num_sign_bit_copies_binary_arith_p (x1)
5310 && (x0 == XEXP (x1, 0)(((x1)->u.fld[0]).rt_rtx) || x0 == XEXP (x1, 1)(((x1)->u.fld[1]).rt_rtx)))
5311 return
5312 num_sign_bit_copies1 (x, mode, x0, mode,
5313 cached_num_sign_bit_copies (x0, mode, known_x,
5314 known_mode,
5315 known_ret));
5316 }
5317
5318 return num_sign_bit_copies1 (x, mode, known_x, known_mode, known_ret);
5319}
5320
5321/* Return the number of bits at the high-order end of X that are known to
5322 be equal to the sign bit. X will be used in mode MODE. The returned
5323 value will always be between 1 and the number of bits in MODE. */
5324
5325static unsigned int
5326num_sign_bit_copies1 (const_rtx x, scalar_int_mode mode, const_rtx known_x,
5327 machine_mode known_mode,
5328 unsigned int known_ret)
5329{
5330 enum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code);
5331 unsigned int bitwidth = GET_MODE_PRECISION (mode);
5332 int num0, num1, result;
5333 unsigned HOST_WIDE_INTlong nonzero;
5334
5335 if (CONST_INT_P (x)(((enum rtx_code) (x)->code) == CONST_INT))
5336 {
5337 /* If the constant is negative, take its 1's complement and remask.
5338 Then see how many zero bits we have. */
5339 nonzero = UINTVAL (x)((unsigned long) ((x)->u.hwint[0])) & GET_MODE_MASK (mode)mode_mask_array[mode];
5340 if (bitwidth <= HOST_BITS_PER_WIDE_INT64
5341 && (nonzero & (HOST_WIDE_INT_1U1UL << (bitwidth - 1))) != 0)
5342 nonzero = (~nonzero) & GET_MODE_MASK (mode)mode_mask_array[mode];
5343
5344 return (nonzero == 0 ? bitwidth : bitwidth - floor_log2 (nonzero) - 1);
5345 }
5346
5347 scalar_int_mode xmode, inner_mode;
5348 if (!is_a <scalar_int_mode> (GET_MODE (x)((machine_mode) (x)->mode), &xmode))
5349 return 1;
5350
5351 unsigned int xmode_width = GET_MODE_PRECISION (xmode);
5352
5353 /* For a smaller mode, just ignore the high bits. */
5354 if (bitwidth < xmode_width)
5355 {
5356 num0 = cached_num_sign_bit_copies (x, xmode,
5357 known_x, known_mode, known_ret);
5358 return MAX (1, num0 - (int) (xmode_width - bitwidth))((1) > (num0 - (int) (xmode_width - bitwidth)) ? (1) : (num0
- (int) (xmode_width - bitwidth)))
;
5359 }
5360
5361 if (bitwidth > xmode_width)
5362 {
5363 /* If this machine does not do all register operations on the entire
5364 register and MODE is wider than the mode of X, we can say nothing
5365 at all about the high-order bits. We extend this reasoning to RISC
5366 machines for operations that might not operate on full registers. */
5367 if (!(WORD_REGISTER_OPERATIONS0 && word_register_operation_p (x)))
5368 return 1;
5369
5370 /* Likewise on machines that do, if the mode of the object is smaller
5371 than a word and loads of that size don't sign extend, we can say
5372 nothing about the high order bits. */
5373 if (xmode_width < BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL <<
1)) != 0) ? 8 : 4))
5374 && load_extend_op (xmode) != SIGN_EXTEND)
5375 return 1;
5376 }
5377
5378 /* Please keep num_sign_bit_copies_binary_arith_p above in sync with
5379 the code in the switch below. */
5380 switch (code)
5381 {
5382 case REG:
5383
5384#if defined(POINTERS_EXTEND_UNSIGNED1)
5385 /* If pointers extend signed and this is a pointer in Pmode, say that
5386 all the bits above ptr_mode are known to be sign bit copies. */
5387 /* As we do not know which address space the pointer is referring to,
5388 we can do this only if the target does not support different pointer
5389 or address modes depending on the address space. */
5390 if (target_default_pointer_address_modes_p ()
5391 && ! POINTERS_EXTEND_UNSIGNED1 && xmode == Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode (
(scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode
::from_int) E_SImode)))
5392 && mode == Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode (
(scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode
::from_int) E_SImode)))
&& REG_POINTER (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("REG_POINTER"
, _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 5392, __FUNCTION__); _rtx; })->frame_related)
5393 && !targetm.have_ptr_extend ())
5394 return GET_MODE_PRECISION (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode (
(scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode
::from_int) E_SImode)))
) - GET_MODE_PRECISION (ptr_mode) + 1;
5395#endif
5396
5397 {
5398 unsigned int copies_for_hook = 1, copies = 1;
5399 rtx new_rtx = rtl_hooks.reg_num_sign_bit_copies (x, xmode, mode,
5400 &copies_for_hook);
5401
5402 if (new_rtx)
5403 copies = cached_num_sign_bit_copies (new_rtx, mode, known_x,
5404 known_mode, known_ret);
5405
5406 if (copies > 1 || copies_for_hook > 1)
5407 return MAX (copies, copies_for_hook)((copies) > (copies_for_hook) ? (copies) : (copies_for_hook
))
;
5408
5409 /* Else, use nonzero_bits to guess num_sign_bit_copies (see below). */
5410 }
5411 break;
5412
5413 case MEM:
5414 /* Some RISC machines sign-extend all loads of smaller than a word. */
5415 if (load_extend_op (xmode) == SIGN_EXTEND)
5416 return MAX (1, ((int) bitwidth - (int) xmode_width + 1))((1) > (((int) bitwidth - (int) xmode_width + 1)) ? (1) : (
((int) bitwidth - (int) xmode_width + 1)))
;
5417 break;
5418
5419 case SUBREG:
5420 /* If this is a SUBREG for a promoted object that is sign-extended
5421 and we are looking at it in a wider mode, we know that at least the
5422 high-order bits are known to be sign bit copies. */
5423
5424 if (SUBREG_PROMOTED_VAR_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag (
"SUBREG_PROMOTED", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 5424, __FUNCTION__); _rtx; })->in_struct)
&& SUBREG_PROMOTED_SIGNED_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum
rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag (
"SUBREG_PROMOTED_SIGNED_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/rtlanal.cc"
, 5424, __FUNCTION__); _rtx; })->unchanging)
)
5425 {
5426 num0 = cached_num_sign_bit_copies (SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx), mode,
5427 known_x, known_mode, known_ret);
5428 return MAX ((int) bitwidth - (int) xmode_width + 1, num0)(((int) bitwidth - (int) xmode_width + 1) > (num0) ? ((int
) bitwidth - (int) xmode_width + 1) : (num0))
;
5429 }
5430
5431 if (is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode), &inner_mode))
5432 {
5433 /* For a smaller object, just ignore the high bits. */
5434 if (bitwidth <= GET_MODE_PRECISION (inner_mode))
5435 {
5436 num0 = cached_num_sign_bit_copies (SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx), inner_mode,
5437 known_x, known_mode,
5438 known_ret);
5439 return MAX (1, num0 - (int) (GET_MODE_PRECISION (inner_mode)((1) > (num0 - (int) (GET_MODE_PRECISION (inner_mode) - bitwidth
)) ? (1) : (num0 - (int) (GET_MODE_PRECISION (inner_mode) - bitwidth
)))
5440 - bitwidth))((1) > (num0 - (int) (GET_MODE_PRECISION (inner_mode) - bitwidth
)) ? (1) : (num0 - (int) (GET_MODE_PRECISION (inner_mode) - bitwidth
)))
;
5441 }
5442
5443 /* For paradoxical SUBREGs on machines where all register operations
5444 affect the entire register, just look inside. Note that we are
5445 passing MODE to the recursive call, so the number of sign bit
5446 copies will remain relative to that mode, not the inner mode.
5447
5448 This works only if loads sign extend. Otherwise, if we get a
5449 reload for the inner part, it may be loaded from the stack, and
5450 then we lose all sign bit copies that existed before the store
5451 to the stack. */
5452 if (WORD_REGISTER_OPERATIONS0
5453 && load_extend_op (inner_mode) == SIGN_EXTEND
5454 && paradoxical_subreg_p (x)
5455 && MEM_P (SUBREG_REG (x))(((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == MEM
)
)
5456 return cached_num_sign_bit_copies (SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx), mode,
5457 known_x, known_mode, known_ret);
5458 }
5459 break;
5460
5461 case SIGN_EXTRACT:
5462 if (CONST_INT_P (XEXP (x, 1))(((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_INT
)
)
5463 return MAX (1, (int) bitwidth - INTVAL (XEXP (x, 1)))((1) > ((int) bitwidth - (((((x)->u.fld[1]).rt_rtx))->
u.hwint[0])) ? (1) : ((int) bitwidth - (((((x)->u.fld[1]).
rt_rtx))->u.hwint[0])))
;
5464 break;
5465
5466 case SIGN_EXTEND:
5467 if (is_a <scalar_int_mode> (GET_MODE (XEXP (x, 0))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode), &inner_mode))
5468 return (bitwidth - GET_MODE_PRECISION (inner_mode)
5469 + cached_num_sign_bit_copies (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), inner_mode,
5470 known_x, known_mode, known_ret));
5471 break;
5472
5473 case TRUNCATE:
5474 /* For a smaller object, just ignore the high bits. */
5475 inner_mode = as_a <scalar_int_mode> (GET_MODE (XEXP (x, 0))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode));
5476 num0 = cached_num_sign_bit_copies (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), inner_mode,
5477 known_x, known_mode, known_ret);
5478 return MAX (1, (num0 - (int) (GET_MODE_PRECISION (inner_mode)((1) > ((num0 - (int) (GET_MODE_PRECISION (inner_mode) - bitwidth
))) ? (1) : ((num0 - (int) (GET_MODE_PRECISION (inner_mode) -
bitwidth))))
5479 - bitwidth)))((1) > ((num0 - (int) (GET_MODE_PRECISION (inner_mode) - bitwidth
))) ? (1) : ((num0 - (int) (GET_MODE_PRECISION (inner_mode) -
bitwidth))))
;
5480
5481 case NOT:
5482 return cached_num_sign_bit_copies (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode,
5483 known_x, known_mode, known_ret);
5484
5485 case ROTATE: case ROTATERT:
5486 /* If we are rotating left by a number of bits less than the number
5487 of sign bit copies, we can just subtract that amount from the
5488 number. */
5489 if (CONST_INT_P (XEXP (x, 1))(((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_INT
)
5490 && INTVAL (XEXP (x, 1))(((((x)->u.fld[1]).rt_rtx))->u.hwint[0]) >= 0
5491 && INTVAL (XEXP (x, 1))(((((x)->u.fld[1]).rt_rtx))->u.hwint[0]) < (int) bitwidth)
5492 {
5493 num0 = cached_num_sign_bit_copies (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode,
5494 known_x, known_mode, known_ret);
5495 return MAX (1, num0 - (code == ROTATE ? INTVAL (XEXP (x, 1))((1) > (num0 - (code == ROTATE ? (((((x)->u.fld[1]).rt_rtx
))->u.hwint[0]) : (int) bitwidth - (((((x)->u.fld[1]).rt_rtx
))->u.hwint[0]))) ? (1) : (num0 - (code == ROTATE ? (((((x
)->u.fld[1]).rt_rtx))->u.hwint[0]) : (int) bitwidth - (
((((x)->u.fld[1]).rt_rtx))->u.hwint[0]))))
5496 : (int) bitwidth - INTVAL (XEXP (x, 1))))((1) > (num0 - (code == ROTATE ? (((((x)->u.fld[1]).rt_rtx
))->u.hwint[0]) : (int) bitwidth - (((((x)->u.fld[1]).rt_rtx
))->u.hwint[0]))) ? (1) : (num0 - (code == ROTATE ? (((((x
)->u.fld[1]).rt_rtx))->u.hwint[0]) : (int) bitwidth - (
((((x)->u.fld[1]).rt_rtx))->u.hwint[0]))))
;
5497 }
5498 break;
5499
5500 case NEG:
5501 /* In general, this subtracts one sign bit copy. But if the value
5502 is known to be positive, the number of sign bit copies is the
5503 same as that of the input. Finally, if the input has just one bit
5504 that might be nonzero, all the bits are copies of the sign bit. */
5505 num0 = cached_num_sign_bit_copies (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode,
5506 known_x, known_mode, known_ret);
5507 if (bitwidth > HOST_BITS_PER_WIDE_INT64)
5508 return num0 > 1 ? num0 - 1 : 1;
5509
5510 nonzero = nonzero_bits (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode);
5511 if (nonzero == 1)
5512 return bitwidth;
5513
5514 if (num0 > 1
5515 && ((HOST_WIDE_INT_1U1UL << (bitwidth - 1)) & nonzero))
5516 num0--;
5517
5518 return num0;
5519
5520 case IOR: case AND: case XOR:
5521 case SMIN: case SMAX: case UMIN: case UMAX:
5522 /* Logical operations will preserve the number of sign-bit copies.
5523 MIN and MAX operations always return one of the operands. */
5524 num0 = cached_num_sign_bit_copies (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode,
5525 known_x, known_mode, known_ret);
5526 num1 = cached_num_sign_bit_copies (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), mode,
5527 known_x, known_mode, known_ret);
5528
5529 /* If num1 is clearing some of the top bits then regardless of
5530 the other term, we are guaranteed to have at least that many
5531 high-order zero bits. */
5532 if (code == AND
5533 && num1 > 1
5534 && bitwidth <= HOST_BITS_PER_WIDE_INT64
5535 && CONST_INT_P (XEXP (x, 1))(((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_INT
)
5536 && (UINTVAL (XEXP (x, 1))((unsigned long) (((((x)->u.fld[1]).rt_rtx))->u.hwint[0
]))
5537 & (HOST_WIDE_INT_1U1UL << (bitwidth - 1))) == 0)
5538 return num1;
5539
5540 /* Similarly for IOR when setting high-order bits. */
5541 if (code == IOR
5542 && num1 > 1
5543 && bitwidth <= HOST_BITS_PER_WIDE_INT64
5544 && CONST_INT_P (XEXP (x, 1))(((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_INT
)
5545 && (UINTVAL (XEXP (x, 1))((unsigned long) (((((x)->u.fld[1]).rt_rtx))->u.hwint[0
]))
5546 & (HOST_WIDE_INT_1U1UL << (bitwidth - 1))) != 0)
5547 return num1;
5548
5549 return MIN (num0, num1)((num0) < (num1) ? (num0) : (num1));
5550
5551 case PLUS: case MINUS:
5552 /* For addition and subtraction, we can have a 1-bit carry. However,
5553 if we are subtracting 1 from a positive number, there will not
5554 be such a carry. Furthermore, if the positive number is known to
5555 be 0 or 1, we know the result is either -1 or 0. */
5556
5557 if (code == PLUS && XEXP (x, 1)(((x)->u.fld[1]).rt_rtx) == constm1_rtx(const_int_rtx[64 -1])
5558 && bitwidth <= HOST_BITS_PER_WIDE_INT64)
5559 {
5560 nonzero = nonzero_bits (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode);
5561 if (((HOST_WIDE_INT_1U1UL << (bitwidth - 1)) & nonzero) == 0)
5562 return (nonzero == 1 || nonzero == 0 ? bitwidth
5563 : bitwidth - floor_log2 (nonzero) - 1);
5564 }
5565
5566 num0 = cached_num_sign_bit_copies (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode,
5567 known_x, known_mode, known_ret);
5568 num1 = cached_num_sign_bit_copies (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), mode,
5569 known_x, known_mode, known_ret);
5570 result = MAX (1, MIN (num0, num1) - 1)((1) > (((num0) < (num1) ? (num0) : (num1)) - 1) ? (1) :
(((num0) < (num1) ? (num0) : (num1)) - 1))
;
5571
5572 return result;
5573
5574 case MULT:
5575 /* The number of bits of the product is the sum of the number of
5576 bits of both terms. However, unless one of the terms if known
5577 to be positive, we must allow for an additional bit since negating
5578 a negative number can remove one sign bit copy. */
5579
5580 num0 = cached_num_sign_bit_copies (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode,
5581 known_x, known_mode, known_ret);
5582 num1 = cached_num_sign_bit_copies (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), mode,
5583 known_x, known_mode, known_ret);
5584
5585 result = bitwidth - (bitwidth - num0) - (bitwidth - num1);
5586 if (result > 0
5587 && (bitwidth > HOST_BITS_PER_WIDE_INT64
5588 || (((nonzero_bits (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode)
5589 & (HOST_WIDE_INT_1U1UL << (bitwidth - 1))) != 0)
5590 && ((nonzero_bits (XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), mode)
5591 & (HOST_WIDE_INT_1U1UL << (bitwidth - 1)))
5592 != 0))))
5593 result--;
5594
5595 return MAX (1, result)((1) > (result) ? (1) : (result));
5596
5597 case UDIV:
5598 /* The result must be <= the first operand. If the first operand
5599 has the high bit set, we know nothing about the number of sign
5600 bit copies. */
5601 if (bitwidth > HOST_BITS_PER_WIDE_INT64)
5602 return 1;
5603 else if ((nonzero_bits (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), mode)
5604 & (HOST_WIDE_INT_1U1UL << (bitwidth - 1))) != 0)
5605 return 1;
5606 else