File: | build/gcc/expr.cc |
Warning: | line 3338, column 5 2nd function call argument is an uninitialized value |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* Convert tree expression to rtl instructions, for GNU compiler. | |||
2 | Copyright (C) 1988-2023 Free Software Foundation, Inc. | |||
3 | ||||
4 | This file is part of GCC. | |||
5 | ||||
6 | GCC is free software; you can redistribute it and/or modify it under | |||
7 | the terms of the GNU General Public License as published by the Free | |||
8 | Software Foundation; either version 3, or (at your option) any later | |||
9 | version. | |||
10 | ||||
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY | |||
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or | |||
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |||
14 | for more details. | |||
15 | ||||
16 | You should have received a copy of the GNU General Public License | |||
17 | along with GCC; see the file COPYING3. If not see | |||
18 | <http://www.gnu.org/licenses/>. */ | |||
19 | ||||
20 | #include "config.h" | |||
21 | #include "system.h" | |||
22 | #include "coretypes.h" | |||
23 | #include "backend.h" | |||
24 | #include "target.h" | |||
25 | #include "rtl.h" | |||
26 | #include "tree.h" | |||
27 | #include "gimple.h" | |||
28 | #include "predict.h" | |||
29 | #include "memmodel.h" | |||
30 | #include "tm_p.h" | |||
31 | #include "ssa.h" | |||
32 | #include "optabs.h" | |||
33 | #include "expmed.h" | |||
34 | #include "regs.h" | |||
35 | #include "emit-rtl.h" | |||
36 | #include "recog.h" | |||
37 | #include "cgraph.h" | |||
38 | #include "diagnostic.h" | |||
39 | #include "alias.h" | |||
40 | #include "fold-const.h" | |||
41 | #include "stor-layout.h" | |||
42 | #include "attribs.h" | |||
43 | #include "varasm.h" | |||
44 | #include "except.h" | |||
45 | #include "insn-attr.h" | |||
46 | #include "dojump.h" | |||
47 | #include "explow.h" | |||
48 | #include "calls.h" | |||
49 | #include "stmt.h" | |||
50 | /* Include expr.h after insn-config.h so we get HAVE_conditional_move. */ | |||
51 | #include "expr.h" | |||
52 | #include "optabs-tree.h" | |||
53 | #include "libfuncs.h" | |||
54 | #include "reload.h" | |||
55 | #include "langhooks.h" | |||
56 | #include "common/common-target.h" | |||
57 | #include "tree-dfa.h" | |||
58 | #include "tree-ssa-live.h" | |||
59 | #include "tree-outof-ssa.h" | |||
60 | #include "tree-ssa-address.h" | |||
61 | #include "builtins.h" | |||
62 | #include "ccmp.h" | |||
63 | #include "gimple-iterator.h" | |||
64 | #include "gimple-fold.h" | |||
65 | #include "rtx-vector-builder.h" | |||
66 | #include "tree-pretty-print.h" | |||
67 | #include "flags.h" | |||
68 | ||||
69 | ||||
70 | /* If this is nonzero, we do not bother generating VOLATILE | |||
71 | around volatile memory references, and we are willing to | |||
72 | output indirect addresses. If cse is to follow, we reject | |||
73 | indirect addresses so a useful potential cse is generated; | |||
74 | if it is used only once, instruction combination will produce | |||
75 | the same indirect address eventually. */ | |||
76 | int cse_not_expected; | |||
77 | ||||
78 | static bool block_move_libcall_safe_for_call_parm (void); | |||
79 | static bool emit_block_move_via_pattern (rtx, rtx, rtx, unsigned, unsigned, | |||
80 | HOST_WIDE_INTlong, unsigned HOST_WIDE_INTlong, | |||
81 | unsigned HOST_WIDE_INTlong, | |||
82 | unsigned HOST_WIDE_INTlong, bool); | |||
83 | static void emit_block_move_via_loop (rtx, rtx, rtx, unsigned); | |||
84 | static void clear_by_pieces (rtx, unsigned HOST_WIDE_INTlong, unsigned int); | |||
85 | static rtx_insn *compress_float_constant (rtx, rtx); | |||
86 | static rtx get_subtarget (rtx); | |||
87 | static rtx store_field (rtx, poly_int64, poly_int64, poly_uint64, poly_uint64, | |||
88 | machine_mode, tree, alias_set_type, bool, bool); | |||
89 | ||||
90 | static unsigned HOST_WIDE_INTlong highest_pow2_factor_for_target (const_tree, const_tree); | |||
91 | ||||
92 | static int is_aligning_offset (const_tree, const_tree); | |||
93 | static rtx reduce_to_bit_field_precision (rtx, rtx, tree); | |||
94 | static rtx do_store_flag (sepops, rtx, machine_mode); | |||
95 | #ifdef PUSH_ROUNDING | |||
96 | static void emit_single_push_insn (machine_mode, rtx, tree); | |||
97 | #endif | |||
98 | static void do_tablejump (rtx, machine_mode, rtx, rtx, rtx, | |||
99 | profile_probability); | |||
100 | static rtx const_vector_from_tree (tree); | |||
101 | static tree tree_expr_size (const_tree); | |||
102 | static void convert_mode_scalar (rtx, rtx, int); | |||
103 | ||||
104 | ||||
105 | /* This is run to set up which modes can be used | |||
106 | directly in memory and to initialize the block move optab. It is run | |||
107 | at the beginning of compilation and when the target is reinitialized. */ | |||
108 | ||||
109 | void | |||
110 | init_expr_target (void) | |||
111 | { | |||
112 | rtx pat; | |||
113 | int num_clobbers; | |||
114 | rtx mem, mem1; | |||
115 | rtx reg; | |||
116 | ||||
117 | /* Try indexing by frame ptr and try by stack ptr. | |||
118 | It is known that on the Convex the stack ptr isn't a valid index. | |||
119 | With luck, one or the other is valid on any machine. */ | |||
120 | mem = gen_rtx_MEM (word_mode, stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER])); | |||
121 | mem1 = gen_rtx_MEM (word_mode, frame_pointer_rtx((this_target_rtl->x_global_rtl)[GR_FRAME_POINTER])); | |||
122 | ||||
123 | /* A scratch register we can modify in-place below to avoid | |||
124 | useless RTL allocations. */ | |||
125 | reg = gen_rtx_REG (word_mode, LAST_VIRTUAL_REGISTER(((76)) + 5) + 1); | |||
126 | ||||
127 | rtx_insn *insn = as_a<rtx_insn *> (rtx_alloc (INSN)); | |||
128 | pat = gen_rtx_SET (NULL_RTX, NULL_RTX)gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), (((rtx) 0)), (((rtx) 0)) ); | |||
129 | PATTERN (insn) = pat; | |||
130 | ||||
131 | for (machine_mode mode = VOIDmode((void) 0, E_VOIDmode); (int) mode < NUM_MACHINE_MODES; | |||
132 | mode = (machine_mode) ((int) mode + 1)) | |||
133 | { | |||
134 | int regno; | |||
135 | ||||
136 | direct_load(this_target_regs->x_direct_load)[(int) mode] = direct_store(this_target_regs->x_direct_store)[(int) mode] = 0; | |||
137 | PUT_MODE (mem, mode); | |||
138 | PUT_MODE (mem1, mode); | |||
139 | ||||
140 | /* See if there is some register that can be used in this mode and | |||
141 | directly loaded or stored from memory. */ | |||
142 | ||||
143 | if (mode != VOIDmode((void) 0, E_VOIDmode) && mode != BLKmode((void) 0, E_BLKmode)) | |||
144 | for (regno = 0; regno < FIRST_PSEUDO_REGISTER76 | |||
145 | && (direct_load(this_target_regs->x_direct_load)[(int) mode] == 0 || direct_store(this_target_regs->x_direct_store)[(int) mode] == 0); | |||
146 | regno++) | |||
147 | { | |||
148 | if (!targetm.hard_regno_mode_ok (regno, mode)) | |||
149 | continue; | |||
150 | ||||
151 | set_mode_and_regno (reg, mode, regno); | |||
152 | ||||
153 | SET_SRC (pat)(((pat)->u.fld[1]).rt_rtx) = mem; | |||
154 | SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx) = reg; | |||
155 | if (recog (pat, insn, &num_clobbers) >= 0) | |||
156 | direct_load(this_target_regs->x_direct_load)[(int) mode] = 1; | |||
157 | ||||
158 | SET_SRC (pat)(((pat)->u.fld[1]).rt_rtx) = mem1; | |||
159 | SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx) = reg; | |||
160 | if (recog (pat, insn, &num_clobbers) >= 0) | |||
161 | direct_load(this_target_regs->x_direct_load)[(int) mode] = 1; | |||
162 | ||||
163 | SET_SRC (pat)(((pat)->u.fld[1]).rt_rtx) = reg; | |||
164 | SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx) = mem; | |||
165 | if (recog (pat, insn, &num_clobbers) >= 0) | |||
166 | direct_store(this_target_regs->x_direct_store)[(int) mode] = 1; | |||
167 | ||||
168 | SET_SRC (pat)(((pat)->u.fld[1]).rt_rtx) = reg; | |||
169 | SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx) = mem1; | |||
170 | if (recog (pat, insn, &num_clobbers) >= 0) | |||
171 | direct_store(this_target_regs->x_direct_store)[(int) mode] = 1; | |||
172 | } | |||
173 | } | |||
174 | ||||
175 | mem = gen_rtx_MEM (VOIDmode((void) 0, E_VOIDmode), gen_raw_REG (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), LAST_VIRTUAL_REGISTER(((76)) + 5) + 1)); | |||
176 | ||||
177 | opt_scalar_float_mode mode_iter; | |||
178 | FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_FLOAT)for (mode_iterator::start (&(mode_iter), MODE_FLOAT); mode_iterator ::iterate_p (&(mode_iter)); mode_iterator::get_next (& (mode_iter))) | |||
179 | { | |||
180 | scalar_float_mode mode = mode_iter.require (); | |||
181 | scalar_float_mode srcmode; | |||
182 | FOR_EACH_MODE_UNTIL (srcmode, mode)for ((srcmode) = (get_narrowest_mode (mode)); (srcmode) != (mode ); mode_iterator::get_known_next (&(srcmode))) | |||
183 | { | |||
184 | enum insn_code ic; | |||
185 | ||||
186 | ic = can_extend_p (mode, srcmode, 0); | |||
187 | if (ic == CODE_FOR_nothing) | |||
188 | continue; | |||
189 | ||||
190 | PUT_MODE (mem, srcmode); | |||
191 | ||||
192 | if (insn_operand_matches (ic, 1, mem)) | |||
193 | float_extend_from_mem(this_target_regs->x_float_extend_from_mem)[mode][srcmode] = true; | |||
194 | } | |||
195 | } | |||
196 | } | |||
197 | ||||
198 | /* This is run at the start of compiling a function. */ | |||
199 | ||||
200 | void | |||
201 | init_expr (void) | |||
202 | { | |||
203 | memset (&crtl(&x_rtl)->expr, 0, sizeof (crtl(&x_rtl)->expr)); | |||
204 | } | |||
205 | ||||
206 | /* Copy data from FROM to TO, where the machine modes are not the same. | |||
207 | Both modes may be integer, or both may be floating, or both may be | |||
208 | fixed-point. | |||
209 | UNSIGNEDP should be nonzero if FROM is an unsigned type. | |||
210 | This causes zero-extension instead of sign-extension. */ | |||
211 | ||||
212 | void | |||
213 | convert_move (rtx to, rtx from, int unsignedp) | |||
214 | { | |||
215 | machine_mode to_mode = GET_MODE (to)((machine_mode) (to)->mode); | |||
216 | machine_mode from_mode = GET_MODE (from)((machine_mode) (from)->mode); | |||
217 | ||||
218 | gcc_assert (to_mode != BLKmode)((void)(!(to_mode != ((void) 0, E_BLKmode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 218, __FUNCTION__), 0 : 0)); | |||
219 | gcc_assert (from_mode != BLKmode)((void)(!(from_mode != ((void) 0, E_BLKmode)) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 219, __FUNCTION__), 0 : 0)); | |||
220 | ||||
221 | /* If the source and destination are already the same, then there's | |||
222 | nothing to do. */ | |||
223 | if (to == from) | |||
224 | return; | |||
225 | ||||
226 | /* If FROM is a SUBREG that indicates that we have already done at least | |||
227 | the required extension, strip it. We don't handle such SUBREGs as | |||
228 | TO here. */ | |||
229 | ||||
230 | scalar_int_mode to_int_mode; | |||
231 | if (GET_CODE (from)((enum rtx_code) (from)->code) == SUBREG | |||
232 | && SUBREG_PROMOTED_VAR_P (from)(__extension__ ({ __typeof ((from)) const _rtx = ((from)); if (((enum rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag ("SUBREG_PROMOTED", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 232, __FUNCTION__); _rtx; })->in_struct) | |||
233 | && is_a <scalar_int_mode> (to_mode, &to_int_mode) | |||
234 | && (GET_MODE_PRECISION (subreg_promoted_mode (from)) | |||
235 | >= GET_MODE_PRECISION (to_int_mode)) | |||
236 | && SUBREG_CHECK_PROMOTED_SIGN (from, unsignedp)((unsignedp) == SRP_POINTER ? (2 * (__extension__ ({ __typeof ((from)) const _rtx = ((from)); if (((enum rtx_code) (_rtx)-> code) != SUBREG) rtl_check_failed_flag ("SUBREG_PROMOTED_GET" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 236, __FUNCTION__); _rtx; })->volatil) + (from)->unchanging - 1) == SRP_POINTER : (unsignedp) == SRP_SIGNED ? (__extension__ ({ __typeof ((from)) const _rtx = ((from)); if (((enum rtx_code ) (_rtx)->code) != SUBREG) rtl_check_failed_flag ("SUBREG_PROMOTED_SIGNED_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 236, __FUNCTION__); _rtx; })->unchanging) : (__extension__ ({ __typeof ((from)) const _rtx = ((from)); if (((enum rtx_code ) (_rtx)->code) != SUBREG) rtl_check_failed_flag ("SUBREG_PROMOTED_UNSIGNED_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 236, __FUNCTION__); _rtx; })->volatil))) | |||
237 | { | |||
238 | scalar_int_mode int_orig_mode; | |||
239 | scalar_int_mode int_inner_mode; | |||
240 | machine_mode orig_mode = GET_MODE (from)((machine_mode) (from)->mode); | |||
241 | ||||
242 | from = gen_lowpartrtl_hooks.gen_lowpart (to_int_mode, SUBREG_REG (from)(((from)->u.fld[0]).rt_rtx)); | |||
243 | from_mode = to_int_mode; | |||
244 | ||||
245 | /* Preserve SUBREG_PROMOTED_VAR_P if the new mode is wider than | |||
246 | the original mode, but narrower than the inner mode. */ | |||
247 | if (GET_CODE (from)((enum rtx_code) (from)->code) == SUBREG | |||
248 | && is_a <scalar_int_mode> (orig_mode, &int_orig_mode) | |||
249 | && GET_MODE_PRECISION (to_int_mode) | |||
250 | > GET_MODE_PRECISION (int_orig_mode) | |||
251 | && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (from))((machine_mode) ((((from)->u.fld[0]).rt_rtx))->mode), | |||
252 | &int_inner_mode) | |||
253 | && GET_MODE_PRECISION (int_inner_mode) | |||
254 | > GET_MODE_PRECISION (to_int_mode)) | |||
255 | { | |||
256 | SUBREG_PROMOTED_VAR_P (from)(__extension__ ({ __typeof ((from)) const _rtx = ((from)); if (((enum rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag ("SUBREG_PROMOTED", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 256, __FUNCTION__); _rtx; })->in_struct) = 1; | |||
257 | SUBREG_PROMOTED_SET (from, unsignedp)do { rtx const _rtx = __extension__ ({ __typeof ((from)) const _rtx = ((from)); if (((enum rtx_code) (_rtx)->code) != SUBREG ) rtl_check_failed_flag ("SUBREG_PROMOTED_SET", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 257, __FUNCTION__); _rtx; }); switch (unsignedp) { case SRP_POINTER : _rtx->volatil = 0; _rtx->unchanging = 0; break; case SRP_SIGNED : _rtx->volatil = 0; _rtx->unchanging = 1; break; case SRP_UNSIGNED : _rtx->volatil = 1; _rtx->unchanging = 0; break; case SRP_SIGNED_AND_UNSIGNED : _rtx->volatil = 1; _rtx->unchanging = 1; break; } } while (0); | |||
258 | } | |||
259 | } | |||
260 | ||||
261 | gcc_assert (GET_CODE (to) != SUBREG || !SUBREG_PROMOTED_VAR_P (to))((void)(!(((enum rtx_code) (to)->code) != SUBREG || !(__extension__ ({ __typeof ((to)) const _rtx = ((to)); if (((enum rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag ("SUBREG_PROMOTED" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 261, __FUNCTION__); _rtx; })->in_struct)) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 261, __FUNCTION__), 0 : 0)); | |||
262 | ||||
263 | if (to_mode == from_mode | |||
264 | || (from_mode == VOIDmode((void) 0, E_VOIDmode) && CONSTANT_P (from)((rtx_class[(int) (((enum rtx_code) (from)->code))]) == RTX_CONST_OBJ ))) | |||
265 | { | |||
266 | emit_move_insn (to, from); | |||
267 | return; | |||
268 | } | |||
269 | ||||
270 | if (VECTOR_MODE_P (to_mode)(((enum mode_class) mode_class[to_mode]) == MODE_VECTOR_BOOL || ((enum mode_class) mode_class[to_mode]) == MODE_VECTOR_INT || ((enum mode_class) mode_class[to_mode]) == MODE_VECTOR_FLOAT || ((enum mode_class) mode_class[to_mode]) == MODE_VECTOR_FRACT || ((enum mode_class) mode_class[to_mode]) == MODE_VECTOR_UFRACT || ((enum mode_class) mode_class[to_mode]) == MODE_VECTOR_ACCUM || ((enum mode_class) mode_class[to_mode]) == MODE_VECTOR_UACCUM ) || VECTOR_MODE_P (from_mode)(((enum mode_class) mode_class[from_mode]) == MODE_VECTOR_BOOL || ((enum mode_class) mode_class[from_mode]) == MODE_VECTOR_INT || ((enum mode_class) mode_class[from_mode]) == MODE_VECTOR_FLOAT || ((enum mode_class) mode_class[from_mode]) == MODE_VECTOR_FRACT || ((enum mode_class) mode_class[from_mode]) == MODE_VECTOR_UFRACT || ((enum mode_class) mode_class[from_mode]) == MODE_VECTOR_ACCUM || ((enum mode_class) mode_class[from_mode]) == MODE_VECTOR_UACCUM )) | |||
271 | { | |||
272 | if (GET_MODE_UNIT_PRECISION (to_mode)(mode_to_unit_precision (to_mode)) | |||
273 | > GET_MODE_UNIT_PRECISION (from_mode)(mode_to_unit_precision (from_mode))) | |||
274 | { | |||
275 | optab op = unsignedp ? zext_optab : sext_optab; | |||
276 | insn_code icode = convert_optab_handler (op, to_mode, from_mode); | |||
277 | if (icode != CODE_FOR_nothing) | |||
278 | { | |||
279 | emit_unop_insn (icode, to, from, | |||
280 | unsignedp ? ZERO_EXTEND : SIGN_EXTEND); | |||
281 | return; | |||
282 | } | |||
283 | } | |||
284 | ||||
285 | if (GET_MODE_UNIT_PRECISION (to_mode)(mode_to_unit_precision (to_mode)) | |||
286 | < GET_MODE_UNIT_PRECISION (from_mode)(mode_to_unit_precision (from_mode))) | |||
287 | { | |||
288 | insn_code icode = convert_optab_handler (trunc_optab, | |||
289 | to_mode, from_mode); | |||
290 | if (icode != CODE_FOR_nothing) | |||
291 | { | |||
292 | emit_unop_insn (icode, to, from, TRUNCATE); | |||
293 | return; | |||
294 | } | |||
295 | } | |||
296 | ||||
297 | gcc_assert (known_eq (GET_MODE_BITSIZE (from_mode),((void)(!((!maybe_ne (GET_MODE_BITSIZE (from_mode), GET_MODE_BITSIZE (to_mode)))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 298, __FUNCTION__), 0 : 0)) | |||
298 | GET_MODE_BITSIZE (to_mode)))((void)(!((!maybe_ne (GET_MODE_BITSIZE (from_mode), GET_MODE_BITSIZE (to_mode)))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 298, __FUNCTION__), 0 : 0)); | |||
299 | ||||
300 | if (VECTOR_MODE_P (to_mode)(((enum mode_class) mode_class[to_mode]) == MODE_VECTOR_BOOL || ((enum mode_class) mode_class[to_mode]) == MODE_VECTOR_INT || ((enum mode_class) mode_class[to_mode]) == MODE_VECTOR_FLOAT || ((enum mode_class) mode_class[to_mode]) == MODE_VECTOR_FRACT || ((enum mode_class) mode_class[to_mode]) == MODE_VECTOR_UFRACT || ((enum mode_class) mode_class[to_mode]) == MODE_VECTOR_ACCUM || ((enum mode_class) mode_class[to_mode]) == MODE_VECTOR_UACCUM )) | |||
301 | from = simplify_gen_subreg (to_mode, from, GET_MODE (from)((machine_mode) (from)->mode), 0); | |||
302 | else | |||
303 | to = simplify_gen_subreg (from_mode, to, GET_MODE (to)((machine_mode) (to)->mode), 0); | |||
304 | ||||
305 | emit_move_insn (to, from); | |||
306 | return; | |||
307 | } | |||
308 | ||||
309 | if (GET_CODE (to)((enum rtx_code) (to)->code) == CONCAT && GET_CODE (from)((enum rtx_code) (from)->code) == CONCAT) | |||
310 | { | |||
311 | convert_move (XEXP (to, 0)(((to)->u.fld[0]).rt_rtx), XEXP (from, 0)(((from)->u.fld[0]).rt_rtx), unsignedp); | |||
312 | convert_move (XEXP (to, 1)(((to)->u.fld[1]).rt_rtx), XEXP (from, 1)(((from)->u.fld[1]).rt_rtx), unsignedp); | |||
313 | return; | |||
314 | } | |||
315 | ||||
316 | convert_mode_scalar (to, from, unsignedp); | |||
317 | } | |||
318 | ||||
319 | /* Like convert_move, but deals only with scalar modes. */ | |||
320 | ||||
321 | static void | |||
322 | convert_mode_scalar (rtx to, rtx from, int unsignedp) | |||
323 | { | |||
324 | /* Both modes should be scalar types. */ | |||
325 | scalar_mode from_mode = as_a <scalar_mode> (GET_MODE (from)((machine_mode) (from)->mode)); | |||
326 | scalar_mode to_mode = as_a <scalar_mode> (GET_MODE (to)((machine_mode) (to)->mode)); | |||
327 | bool to_real = SCALAR_FLOAT_MODE_P (to_mode)(((enum mode_class) mode_class[to_mode]) == MODE_FLOAT || ((enum mode_class) mode_class[to_mode]) == MODE_DECIMAL_FLOAT); | |||
328 | bool from_real = SCALAR_FLOAT_MODE_P (from_mode)(((enum mode_class) mode_class[from_mode]) == MODE_FLOAT || ( (enum mode_class) mode_class[from_mode]) == MODE_DECIMAL_FLOAT ); | |||
329 | enum insn_code code; | |||
330 | rtx libcall; | |||
331 | ||||
332 | gcc_assert (to_real == from_real)((void)(!(to_real == from_real) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 332, __FUNCTION__), 0 : 0)); | |||
333 | ||||
334 | /* rtx code for making an equivalent value. */ | |||
335 | enum rtx_code equiv_code = (unsignedp < 0 ? UNKNOWN | |||
336 | : (unsignedp ? ZERO_EXTEND : SIGN_EXTEND)); | |||
337 | ||||
338 | if (to_real) | |||
339 | { | |||
340 | rtx value; | |||
341 | rtx_insn *insns; | |||
342 | convert_optab tab; | |||
343 | ||||
344 | gcc_assert ((GET_MODE_PRECISION (from_mode)((void)(!((GET_MODE_PRECISION (from_mode) != GET_MODE_PRECISION (to_mode)) || ((((enum mode_class) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) != (((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT)) || ((real_format_for_mode[(((enum mode_class ) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) ? (((from_mode ) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 348, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[to_mode]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 349, __FUNCTION__)), 0)]) == &ieee_half_format) || ((real_format_for_mode [(((enum mode_class) mode_class[to_mode]) == MODE_DECIMAL_FLOAT ) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[to_mode ]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 350, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[from_mode ]) == MODE_DECIMAL_FLOAT) ? (((from_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__)), 0)]) == &ieee_half_format)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__), 0 : 0)) | |||
345 | != GET_MODE_PRECISION (to_mode))((void)(!((GET_MODE_PRECISION (from_mode) != GET_MODE_PRECISION (to_mode)) || ((((enum mode_class) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) != (((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT)) || ((real_format_for_mode[(((enum mode_class ) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) ? (((from_mode ) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 348, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[to_mode]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 349, __FUNCTION__)), 0)]) == &ieee_half_format) || ((real_format_for_mode [(((enum mode_class) mode_class[to_mode]) == MODE_DECIMAL_FLOAT ) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[to_mode ]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 350, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[from_mode ]) == MODE_DECIMAL_FLOAT) ? (((from_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__)), 0)]) == &ieee_half_format)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__), 0 : 0)) | |||
346 | || (DECIMAL_FLOAT_MODE_P (from_mode)((void)(!((GET_MODE_PRECISION (from_mode) != GET_MODE_PRECISION (to_mode)) || ((((enum mode_class) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) != (((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT)) || ((real_format_for_mode[(((enum mode_class ) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) ? (((from_mode ) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 348, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[to_mode]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 349, __FUNCTION__)), 0)]) == &ieee_half_format) || ((real_format_for_mode [(((enum mode_class) mode_class[to_mode]) == MODE_DECIMAL_FLOAT ) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[to_mode ]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 350, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[from_mode ]) == MODE_DECIMAL_FLOAT) ? (((from_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__)), 0)]) == &ieee_half_format)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__), 0 : 0)) | |||
347 | != DECIMAL_FLOAT_MODE_P (to_mode))((void)(!((GET_MODE_PRECISION (from_mode) != GET_MODE_PRECISION (to_mode)) || ((((enum mode_class) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) != (((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT)) || ((real_format_for_mode[(((enum mode_class ) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) ? (((from_mode ) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 348, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[to_mode]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 349, __FUNCTION__)), 0)]) == &ieee_half_format) || ((real_format_for_mode [(((enum mode_class) mode_class[to_mode]) == MODE_DECIMAL_FLOAT ) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[to_mode ]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 350, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[from_mode ]) == MODE_DECIMAL_FLOAT) ? (((from_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__)), 0)]) == &ieee_half_format)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__), 0 : 0)) | |||
348 | || (REAL_MODE_FORMAT (from_mode) == &arm_bfloat_half_format((void)(!((GET_MODE_PRECISION (from_mode) != GET_MODE_PRECISION (to_mode)) || ((((enum mode_class) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) != (((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT)) || ((real_format_for_mode[(((enum mode_class ) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) ? (((from_mode ) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 348, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[to_mode]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 349, __FUNCTION__)), 0)]) == &ieee_half_format) || ((real_format_for_mode [(((enum mode_class) mode_class[to_mode]) == MODE_DECIMAL_FLOAT ) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[to_mode ]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 350, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[from_mode ]) == MODE_DECIMAL_FLOAT) ? (((from_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__)), 0)]) == &ieee_half_format)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__), 0 : 0)) | |||
349 | && REAL_MODE_FORMAT (to_mode) == &ieee_half_format)((void)(!((GET_MODE_PRECISION (from_mode) != GET_MODE_PRECISION (to_mode)) || ((((enum mode_class) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) != (((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT)) || ((real_format_for_mode[(((enum mode_class ) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) ? (((from_mode ) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 348, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[to_mode]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 349, __FUNCTION__)), 0)]) == &ieee_half_format) || ((real_format_for_mode [(((enum mode_class) mode_class[to_mode]) == MODE_DECIMAL_FLOAT ) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[to_mode ]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 350, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[from_mode ]) == MODE_DECIMAL_FLOAT) ? (((from_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__)), 0)]) == &ieee_half_format)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__), 0 : 0)) | |||
350 | || (REAL_MODE_FORMAT (to_mode) == &arm_bfloat_half_format((void)(!((GET_MODE_PRECISION (from_mode) != GET_MODE_PRECISION (to_mode)) || ((((enum mode_class) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) != (((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT)) || ((real_format_for_mode[(((enum mode_class ) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) ? (((from_mode ) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 348, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[to_mode]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 349, __FUNCTION__)), 0)]) == &ieee_half_format) || ((real_format_for_mode [(((enum mode_class) mode_class[to_mode]) == MODE_DECIMAL_FLOAT ) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[to_mode ]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 350, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[from_mode ]) == MODE_DECIMAL_FLOAT) ? (((from_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__)), 0)]) == &ieee_half_format)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__), 0 : 0)) | |||
351 | && REAL_MODE_FORMAT (from_mode) == &ieee_half_format))((void)(!((GET_MODE_PRECISION (from_mode) != GET_MODE_PRECISION (to_mode)) || ((((enum mode_class) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) != (((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT)) || ((real_format_for_mode[(((enum mode_class ) mode_class[from_mode]) == MODE_DECIMAL_FLOAT) ? (((from_mode ) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 348, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[to_mode ]) == MODE_DECIMAL_FLOAT) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[to_mode]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 349, __FUNCTION__)), 0)]) == &ieee_half_format) || ((real_format_for_mode [(((enum mode_class) mode_class[to_mode]) == MODE_DECIMAL_FLOAT ) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[to_mode ]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 350, __FUNCTION__)), 0)]) == &arm_bfloat_half_format && (real_format_for_mode[(((enum mode_class) mode_class[from_mode ]) == MODE_DECIMAL_FLOAT) ? (((from_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__)), 0)]) == &ieee_half_format)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 351, __FUNCTION__), 0 : 0)); | |||
352 | ||||
353 | if (GET_MODE_PRECISION (from_mode) == GET_MODE_PRECISION (to_mode)) | |||
354 | /* Conversion between decimal float and binary float, same size. */ | |||
355 | tab = DECIMAL_FLOAT_MODE_P (from_mode)(((enum mode_class) mode_class[from_mode]) == MODE_DECIMAL_FLOAT ) ? trunc_optab : sext_optab; | |||
356 | else if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode)) | |||
357 | tab = sext_optab; | |||
358 | else | |||
359 | tab = trunc_optab; | |||
360 | ||||
361 | /* Try converting directly if the insn is supported. */ | |||
362 | ||||
363 | code = convert_optab_handler (tab, to_mode, from_mode); | |||
364 | if (code != CODE_FOR_nothing) | |||
365 | { | |||
366 | emit_unop_insn (code, to, from, | |||
367 | tab == sext_optab ? FLOAT_EXTEND : FLOAT_TRUNCATE); | |||
368 | return; | |||
369 | } | |||
370 | ||||
371 | #ifdef HAVE_SFmode | |||
372 | if (REAL_MODE_FORMAT (from_mode)(real_format_for_mode[(((enum mode_class) mode_class[from_mode ]) == MODE_DECIMAL_FLOAT) ? (((from_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 372, __FUNCTION__)), 0)]) == &arm_bfloat_half_format | |||
373 | && REAL_MODE_FORMAT (SFmode)(real_format_for_mode[(((enum mode_class) mode_class[(scalar_float_mode ((scalar_float_mode::from_int) E_SFmode))]) == MODE_DECIMAL_FLOAT ) ? ((((scalar_float_mode ((scalar_float_mode::from_int) E_SFmode ))) - MIN_MODE_DECIMAL_FLOAT) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class) mode_class[(scalar_float_mode ((scalar_float_mode ::from_int) E_SFmode))]) == MODE_FLOAT ? (((scalar_float_mode ((scalar_float_mode::from_int) E_SFmode))) - MIN_MODE_FLOAT) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 373, __FUNCTION__)), 0)]) == &ieee_single_format) | |||
374 | { | |||
375 | if (GET_MODE_PRECISION (to_mode) > GET_MODE_PRECISION (SFmode(scalar_float_mode ((scalar_float_mode::from_int) E_SFmode)))) | |||
376 | { | |||
377 | /* To cut down on libgcc size, implement | |||
378 | BFmode -> {DF,XF,TF}mode conversions by | |||
379 | BFmode -> SFmode -> {DF,XF,TF}mode conversions. */ | |||
380 | rtx temp = gen_reg_rtx (SFmode(scalar_float_mode ((scalar_float_mode::from_int) E_SFmode))); | |||
381 | convert_mode_scalar (temp, from, unsignedp); | |||
382 | convert_mode_scalar (to, temp, unsignedp); | |||
383 | return; | |||
384 | } | |||
385 | if (REAL_MODE_FORMAT (to_mode)(real_format_for_mode[(((enum mode_class) mode_class[to_mode] ) == MODE_DECIMAL_FLOAT) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[to_mode]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 385, __FUNCTION__)), 0)]) == &ieee_half_format) | |||
386 | { | |||
387 | /* Similarly, implement BFmode -> HFmode as | |||
388 | BFmode -> SFmode -> HFmode conversion where SFmode | |||
389 | has superset of BFmode values. We don't need | |||
390 | to handle sNaNs by raising exception and turning | |||
391 | into into qNaN though, as that can be done in the | |||
392 | SFmode -> HFmode conversion too. */ | |||
393 | rtx temp = gen_reg_rtx (SFmode(scalar_float_mode ((scalar_float_mode::from_int) E_SFmode))); | |||
394 | int save_flag_finite_math_only = flag_finite_math_onlyglobal_options.x_flag_finite_math_only; | |||
395 | flag_finite_math_onlyglobal_options.x_flag_finite_math_only = true; | |||
396 | convert_mode_scalar (temp, from, unsignedp); | |||
397 | flag_finite_math_onlyglobal_options.x_flag_finite_math_only = save_flag_finite_math_only; | |||
398 | convert_mode_scalar (to, temp, unsignedp); | |||
399 | return; | |||
400 | } | |||
401 | if (to_mode == SFmode(scalar_float_mode ((scalar_float_mode::from_int) E_SFmode)) | |||
402 | && !HONOR_NANS (from_mode) | |||
403 | && !HONOR_NANS (to_mode) | |||
404 | && optimize_insn_for_speed_p ()) | |||
405 | { | |||
406 | /* If we don't expect sNaNs, for BFmode -> SFmode we can just | |||
407 | shift the bits up. */ | |||
408 | machine_mode fromi_mode, toi_mode; | |||
409 | if (int_mode_for_size (GET_MODE_BITSIZE (from_mode), | |||
410 | 0).exists (&fromi_mode) | |||
411 | && int_mode_for_size (GET_MODE_BITSIZE (to_mode), | |||
412 | 0).exists (&toi_mode)) | |||
413 | { | |||
414 | start_sequence (); | |||
415 | rtx fromi = lowpart_subreg (fromi_mode, from, from_mode); | |||
416 | rtx tof = NULL_RTX(rtx) 0; | |||
417 | if (fromi) | |||
418 | { | |||
419 | rtx toi; | |||
420 | if (GET_MODE (fromi)((machine_mode) (fromi)->mode) == VOIDmode((void) 0, E_VOIDmode)) | |||
421 | toi = simplify_unary_operation (ZERO_EXTEND, toi_mode, | |||
422 | fromi, fromi_mode); | |||
423 | else | |||
424 | { | |||
425 | toi = gen_reg_rtx (toi_mode); | |||
426 | convert_mode_scalar (toi, fromi, 1); | |||
427 | } | |||
428 | toi | |||
429 | = maybe_expand_shift (LSHIFT_EXPR, toi_mode, toi, | |||
430 | GET_MODE_PRECISION (to_mode) | |||
431 | - GET_MODE_PRECISION (from_mode), | |||
432 | NULL_RTX(rtx) 0, 1); | |||
433 | if (toi) | |||
434 | { | |||
435 | tof = lowpart_subreg (to_mode, toi, toi_mode); | |||
436 | if (tof) | |||
437 | emit_move_insn (to, tof); | |||
438 | } | |||
439 | } | |||
440 | insns = get_insns (); | |||
441 | end_sequence (); | |||
442 | if (tof) | |||
443 | { | |||
444 | emit_insn (insns); | |||
445 | return; | |||
446 | } | |||
447 | } | |||
448 | } | |||
449 | } | |||
450 | if (REAL_MODE_FORMAT (from_mode)(real_format_for_mode[(((enum mode_class) mode_class[from_mode ]) == MODE_DECIMAL_FLOAT) ? (((from_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[from_mode]) == MODE_FLOAT ? ((from_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 450, __FUNCTION__)), 0)]) == &ieee_single_format | |||
451 | && REAL_MODE_FORMAT (to_mode)(real_format_for_mode[(((enum mode_class) mode_class[to_mode] ) == MODE_DECIMAL_FLOAT) ? (((to_mode) - MIN_MODE_DECIMAL_FLOAT ) + (MAX_MODE_FLOAT - MIN_MODE_FLOAT + 1)) : ((enum mode_class ) mode_class[to_mode]) == MODE_FLOAT ? ((to_mode) - MIN_MODE_FLOAT ) : ((fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 451, __FUNCTION__)), 0)]) == &arm_bfloat_half_format | |||
452 | && !HONOR_NANS (from_mode) | |||
453 | && !HONOR_NANS (to_mode) | |||
454 | && !flag_rounding_mathglobal_options.x_flag_rounding_math | |||
455 | && optimize_insn_for_speed_p ()) | |||
456 | { | |||
457 | /* If we don't expect qNaNs nor sNaNs and can assume rounding | |||
458 | to nearest, we can expand the conversion inline as | |||
459 | (fromi + 0x7fff + ((fromi >> 16) & 1)) >> 16. */ | |||
460 | machine_mode fromi_mode, toi_mode; | |||
461 | if (int_mode_for_size (GET_MODE_BITSIZE (from_mode), | |||
462 | 0).exists (&fromi_mode) | |||
463 | && int_mode_for_size (GET_MODE_BITSIZE (to_mode), | |||
464 | 0).exists (&toi_mode)) | |||
465 | { | |||
466 | start_sequence (); | |||
467 | rtx fromi = lowpart_subreg (fromi_mode, from, from_mode); | |||
468 | rtx tof = NULL_RTX(rtx) 0; | |||
469 | do | |||
470 | { | |||
471 | if (!fromi) | |||
472 | break; | |||
473 | int shift = (GET_MODE_PRECISION (from_mode) | |||
474 | - GET_MODE_PRECISION (to_mode)); | |||
475 | rtx temp1 | |||
476 | = maybe_expand_shift (RSHIFT_EXPR, fromi_mode, fromi, | |||
477 | shift, NULL_RTX(rtx) 0, 1); | |||
478 | if (!temp1) | |||
479 | break; | |||
480 | rtx temp2 | |||
481 | = expand_binop (fromi_mode, and_optab, temp1, const1_rtx(const_int_rtx[64 +1]), | |||
482 | NULL_RTX(rtx) 0, 1, OPTAB_DIRECT); | |||
483 | if (!temp2) | |||
484 | break; | |||
485 | rtx temp3 | |||
486 | = expand_binop (fromi_mode, add_optab, fromi, | |||
487 | gen_int_mode ((HOST_WIDE_INT_1U1UL | |||
488 | << (shift - 1)) - 1, | |||
489 | fromi_mode), NULL_RTX(rtx) 0, | |||
490 | 1, OPTAB_DIRECT); | |||
491 | if (!temp3) | |||
492 | break; | |||
493 | rtx temp4 | |||
494 | = expand_binop (fromi_mode, add_optab, temp3, temp2, | |||
495 | NULL_RTX(rtx) 0, 1, OPTAB_DIRECT); | |||
496 | if (!temp4) | |||
497 | break; | |||
498 | rtx temp5 = maybe_expand_shift (RSHIFT_EXPR, fromi_mode, | |||
499 | temp4, shift, NULL_RTX(rtx) 0, 1); | |||
500 | if (!temp5) | |||
501 | break; | |||
502 | rtx temp6 = lowpart_subreg (toi_mode, temp5, fromi_mode); | |||
503 | if (!temp6) | |||
504 | break; | |||
505 | tof = lowpart_subreg (to_mode, force_reg (toi_mode, temp6), | |||
506 | toi_mode); | |||
507 | if (tof) | |||
508 | emit_move_insn (to, tof); | |||
509 | } | |||
510 | while (0); | |||
511 | insns = get_insns (); | |||
512 | end_sequence (); | |||
513 | if (tof) | |||
514 | { | |||
515 | emit_insn (insns); | |||
516 | return; | |||
517 | } | |||
518 | } | |||
519 | } | |||
520 | #endif | |||
521 | ||||
522 | /* Otherwise use a libcall. */ | |||
523 | libcall = convert_optab_libfunc (tab, to_mode, from_mode); | |||
524 | ||||
525 | /* Is this conversion implemented yet? */ | |||
526 | gcc_assert (libcall)((void)(!(libcall) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 526, __FUNCTION__), 0 : 0)); | |||
527 | ||||
528 | start_sequence (); | |||
529 | value = emit_library_call_value (libcall, NULL_RTX(rtx) 0, LCT_CONST, to_mode, | |||
530 | from, from_mode); | |||
531 | insns = get_insns (); | |||
532 | end_sequence (); | |||
533 | emit_libcall_block (insns, to, value, | |||
534 | tab == trunc_optab ? gen_rtx_FLOAT_TRUNCATE (to_mode,gen_rtx_fmt_e_stat ((FLOAT_TRUNCATE), ((to_mode)), ((from)) ) | |||
535 | from)gen_rtx_fmt_e_stat ((FLOAT_TRUNCATE), ((to_mode)), ((from)) ) | |||
536 | : gen_rtx_FLOAT_EXTEND (to_mode, from)gen_rtx_fmt_e_stat ((FLOAT_EXTEND), ((to_mode)), ((from)) )); | |||
537 | return; | |||
538 | } | |||
539 | ||||
540 | /* Handle pointer conversion. */ /* SPEE 900220. */ | |||
541 | /* If the target has a converter from FROM_MODE to TO_MODE, use it. */ | |||
542 | { | |||
543 | convert_optab ctab; | |||
544 | ||||
545 | if (GET_MODE_PRECISION (from_mode) > GET_MODE_PRECISION (to_mode)) | |||
546 | ctab = trunc_optab; | |||
547 | else if (unsignedp) | |||
548 | ctab = zext_optab; | |||
549 | else | |||
550 | ctab = sext_optab; | |||
551 | ||||
552 | if (convert_optab_handler (ctab, to_mode, from_mode) | |||
553 | != CODE_FOR_nothing) | |||
554 | { | |||
555 | emit_unop_insn (convert_optab_handler (ctab, to_mode, from_mode), | |||
556 | to, from, UNKNOWN); | |||
557 | return; | |||
558 | } | |||
559 | } | |||
560 | ||||
561 | /* Targets are expected to provide conversion insns between PxImode and | |||
562 | xImode for all MODE_PARTIAL_INT modes they use, but no others. */ | |||
563 | if (GET_MODE_CLASS (to_mode)((enum mode_class) mode_class[to_mode]) == MODE_PARTIAL_INT) | |||
564 | { | |||
565 | scalar_int_mode full_mode | |||
566 | = smallest_int_mode_for_size (GET_MODE_BITSIZE (to_mode)); | |||
567 | ||||
568 | gcc_assert (convert_optab_handler (trunc_optab, to_mode, full_mode)((void)(!(convert_optab_handler (trunc_optab, to_mode, full_mode ) != CODE_FOR_nothing) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 569, __FUNCTION__), 0 : 0)) | |||
569 | != CODE_FOR_nothing)((void)(!(convert_optab_handler (trunc_optab, to_mode, full_mode ) != CODE_FOR_nothing) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 569, __FUNCTION__), 0 : 0)); | |||
570 | ||||
571 | if (full_mode != from_mode) | |||
572 | from = convert_to_mode (full_mode, from, unsignedp); | |||
573 | emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, full_mode), | |||
574 | to, from, UNKNOWN); | |||
575 | return; | |||
576 | } | |||
577 | if (GET_MODE_CLASS (from_mode)((enum mode_class) mode_class[from_mode]) == MODE_PARTIAL_INT) | |||
578 | { | |||
579 | rtx new_from; | |||
580 | scalar_int_mode full_mode | |||
581 | = smallest_int_mode_for_size (GET_MODE_BITSIZE (from_mode)); | |||
582 | convert_optab ctab = unsignedp ? zext_optab : sext_optab; | |||
583 | enum insn_code icode; | |||
584 | ||||
585 | icode = convert_optab_handler (ctab, full_mode, from_mode); | |||
586 | gcc_assert (icode != CODE_FOR_nothing)((void)(!(icode != CODE_FOR_nothing) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 586, __FUNCTION__), 0 : 0)); | |||
587 | ||||
588 | if (to_mode == full_mode) | |||
589 | { | |||
590 | emit_unop_insn (icode, to, from, UNKNOWN); | |||
591 | return; | |||
592 | } | |||
593 | ||||
594 | new_from = gen_reg_rtx (full_mode); | |||
595 | emit_unop_insn (icode, new_from, from, UNKNOWN); | |||
596 | ||||
597 | /* else proceed to integer conversions below. */ | |||
598 | from_mode = full_mode; | |||
599 | from = new_from; | |||
600 | } | |||
601 | ||||
602 | /* Make sure both are fixed-point modes or both are not. */ | |||
603 | gcc_assert (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode) ==((void)(!((((((enum mode_class) mode_class[from_mode]) == MODE_FRACT ) || (((enum mode_class) mode_class[from_mode]) == MODE_ACCUM )) || ((((enum mode_class) mode_class[from_mode]) == MODE_UFRACT ) || (((enum mode_class) mode_class[from_mode]) == MODE_UACCUM ))) == (((((enum mode_class) mode_class[to_mode]) == MODE_FRACT ) || (((enum mode_class) mode_class[to_mode]) == MODE_ACCUM)) || ((((enum mode_class) mode_class[to_mode]) == MODE_UFRACT) || (((enum mode_class) mode_class[to_mode]) == MODE_UACCUM)) )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 604, __FUNCTION__), 0 : 0)) | |||
604 | ALL_SCALAR_FIXED_POINT_MODE_P (to_mode))((void)(!((((((enum mode_class) mode_class[from_mode]) == MODE_FRACT ) || (((enum mode_class) mode_class[from_mode]) == MODE_ACCUM )) || ((((enum mode_class) mode_class[from_mode]) == MODE_UFRACT ) || (((enum mode_class) mode_class[from_mode]) == MODE_UACCUM ))) == (((((enum mode_class) mode_class[to_mode]) == MODE_FRACT ) || (((enum mode_class) mode_class[to_mode]) == MODE_ACCUM)) || ((((enum mode_class) mode_class[to_mode]) == MODE_UFRACT) || (((enum mode_class) mode_class[to_mode]) == MODE_UACCUM)) )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 604, __FUNCTION__), 0 : 0)); | |||
605 | if (ALL_SCALAR_FIXED_POINT_MODE_P (from_mode)(((((enum mode_class) mode_class[from_mode]) == MODE_FRACT) || (((enum mode_class) mode_class[from_mode]) == MODE_ACCUM)) || ((((enum mode_class) mode_class[from_mode]) == MODE_UFRACT) || (((enum mode_class) mode_class[from_mode]) == MODE_UACCUM)))) | |||
606 | { | |||
607 | /* If we widen from_mode to to_mode and they are in the same class, | |||
608 | we won't saturate the result. | |||
609 | Otherwise, always saturate the result to play safe. */ | |||
610 | if (GET_MODE_CLASS (from_mode)((enum mode_class) mode_class[from_mode]) == GET_MODE_CLASS (to_mode)((enum mode_class) mode_class[to_mode]) | |||
611 | && GET_MODE_SIZE (from_mode) < GET_MODE_SIZE (to_mode)) | |||
612 | expand_fixed_convert (to, from, 0, 0); | |||
613 | else | |||
614 | expand_fixed_convert (to, from, 0, 1); | |||
615 | return; | |||
616 | } | |||
617 | ||||
618 | /* Now both modes are integers. */ | |||
619 | ||||
620 | /* Handle expanding beyond a word. */ | |||
621 | if (GET_MODE_PRECISION (from_mode) < GET_MODE_PRECISION (to_mode) | |||
622 | && GET_MODE_PRECISION (to_mode) > BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))) | |||
623 | { | |||
624 | rtx_insn *insns; | |||
625 | rtx lowpart; | |||
626 | rtx fill_value; | |||
627 | rtx lowfrom; | |||
628 | int i; | |||
629 | scalar_mode lowpart_mode; | |||
630 | int nwords = CEIL (GET_MODE_SIZE (to_mode), UNITS_PER_WORD)(((GET_MODE_SIZE (to_mode)) + ((((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) - 1) / ((((global_options .x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))); | |||
631 | ||||
632 | /* Try converting directly if the insn is supported. */ | |||
633 | if ((code = can_extend_p (to_mode, from_mode, unsignedp)) | |||
634 | != CODE_FOR_nothing) | |||
635 | { | |||
636 | /* If FROM is a SUBREG, put it into a register. Do this | |||
637 | so that we always generate the same set of insns for | |||
638 | better cse'ing; if an intermediate assignment occurred, | |||
639 | we won't be doing the operation directly on the SUBREG. */ | |||
640 | if (optimizeglobal_options.x_optimize > 0 && GET_CODE (from)((enum rtx_code) (from)->code) == SUBREG) | |||
641 | from = force_reg (from_mode, from); | |||
642 | emit_unop_insn (code, to, from, equiv_code); | |||
643 | return; | |||
644 | } | |||
645 | /* Next, try converting via full word. */ | |||
646 | else if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) | |||
647 | && ((code = can_extend_p (to_mode, word_mode, unsignedp)) | |||
648 | != CODE_FOR_nothing)) | |||
649 | { | |||
650 | rtx word_to = gen_reg_rtx (word_mode); | |||
651 | if (REG_P (to)(((enum rtx_code) (to)->code) == REG)) | |||
652 | { | |||
653 | if (reg_overlap_mentioned_p (to, from)) | |||
654 | from = force_reg (from_mode, from); | |||
655 | emit_clobber (to); | |||
656 | } | |||
657 | convert_move (word_to, from, unsignedp); | |||
658 | emit_unop_insn (code, to, word_to, equiv_code); | |||
659 | return; | |||
660 | } | |||
661 | ||||
662 | /* No special multiword conversion insn; do it by hand. */ | |||
663 | start_sequence (); | |||
664 | ||||
665 | /* Since we will turn this into a no conflict block, we must ensure | |||
666 | the source does not overlap the target so force it into an isolated | |||
667 | register when maybe so. Likewise for any MEM input, since the | |||
668 | conversion sequence might require several references to it and we | |||
669 | must ensure we're getting the same value every time. */ | |||
670 | ||||
671 | if (MEM_P (from)(((enum rtx_code) (from)->code) == MEM) || reg_overlap_mentioned_p (to, from)) | |||
672 | from = force_reg (from_mode, from); | |||
673 | ||||
674 | /* Get a copy of FROM widened to a word, if necessary. */ | |||
675 | if (GET_MODE_PRECISION (from_mode) < BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))) | |||
676 | lowpart_mode = word_mode; | |||
677 | else | |||
678 | lowpart_mode = from_mode; | |||
679 | ||||
680 | lowfrom = convert_to_mode (lowpart_mode, from, unsignedp); | |||
681 | ||||
682 | lowpart = gen_lowpartrtl_hooks.gen_lowpart (lowpart_mode, to); | |||
683 | emit_move_insn (lowpart, lowfrom); | |||
684 | ||||
685 | /* Compute the value to put in each remaining word. */ | |||
686 | if (unsignedp) | |||
687 | fill_value = const0_rtx(const_int_rtx[64]); | |||
688 | else | |||
689 | fill_value = emit_store_flag_force (gen_reg_rtx (word_mode), | |||
690 | LT, lowfrom, const0_rtx(const_int_rtx[64]), | |||
691 | lowpart_mode, 0, -1); | |||
692 | ||||
693 | /* Fill the remaining words. */ | |||
694 | for (i = GET_MODE_SIZE (lowpart_mode) / UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4); i < nwords; i++) | |||
695 | { | |||
696 | int index = (WORDS_BIG_ENDIAN0 ? nwords - i - 1 : i); | |||
697 | rtx subword = operand_subword (to, index, 1, to_mode); | |||
698 | ||||
699 | gcc_assert (subword)((void)(!(subword) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 699, __FUNCTION__), 0 : 0)); | |||
700 | ||||
701 | if (fill_value != subword) | |||
702 | emit_move_insn (subword, fill_value); | |||
703 | } | |||
704 | ||||
705 | insns = get_insns (); | |||
706 | end_sequence (); | |||
707 | ||||
708 | emit_insn (insns); | |||
709 | return; | |||
710 | } | |||
711 | ||||
712 | /* Truncating multi-word to a word or less. */ | |||
713 | if (GET_MODE_PRECISION (from_mode) > BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) | |||
714 | && GET_MODE_PRECISION (to_mode) <= BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))) | |||
715 | { | |||
716 | if (!((MEM_P (from)(((enum rtx_code) (from)->code) == MEM) | |||
717 | && ! MEM_VOLATILE_P (from)(__extension__ ({ __typeof ((from)) const _rtx = ((from)); if (((enum rtx_code) (_rtx)->code) != MEM && ((enum rtx_code ) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code ) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 717, __FUNCTION__); _rtx; })->volatil) | |||
718 | && direct_load(this_target_regs->x_direct_load)[(int) to_mode] | |||
719 | && ! mode_dependent_address_p (XEXP (from, 0)(((from)->u.fld[0]).rt_rtx), | |||
720 | MEM_ADDR_SPACE (from)(get_mem_attrs (from)->addrspace))) | |||
721 | || REG_P (from)(((enum rtx_code) (from)->code) == REG) | |||
722 | || GET_CODE (from)((enum rtx_code) (from)->code) == SUBREG)) | |||
723 | from = force_reg (from_mode, from); | |||
724 | convert_move (to, gen_lowpartrtl_hooks.gen_lowpart (word_mode, from), 0); | |||
725 | return; | |||
726 | } | |||
727 | ||||
728 | /* Now follow all the conversions between integers | |||
729 | no more than a word long. */ | |||
730 | ||||
731 | /* For truncation, usually we can just refer to FROM in a narrower mode. */ | |||
732 | if (GET_MODE_BITSIZE (to_mode) < GET_MODE_BITSIZE (from_mode) | |||
733 | && TRULY_NOOP_TRUNCATION_MODES_P (to_mode, from_mode)(targetm.truly_noop_truncation (GET_MODE_PRECISION (to_mode), GET_MODE_PRECISION (from_mode)))) | |||
734 | { | |||
735 | if (!((MEM_P (from)(((enum rtx_code) (from)->code) == MEM) | |||
736 | && ! MEM_VOLATILE_P (from)(__extension__ ({ __typeof ((from)) const _rtx = ((from)); if (((enum rtx_code) (_rtx)->code) != MEM && ((enum rtx_code ) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code ) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 736, __FUNCTION__); _rtx; })->volatil) | |||
737 | && direct_load(this_target_regs->x_direct_load)[(int) to_mode] | |||
738 | && ! mode_dependent_address_p (XEXP (from, 0)(((from)->u.fld[0]).rt_rtx), | |||
739 | MEM_ADDR_SPACE (from)(get_mem_attrs (from)->addrspace))) | |||
740 | || REG_P (from)(((enum rtx_code) (from)->code) == REG) | |||
741 | || GET_CODE (from)((enum rtx_code) (from)->code) == SUBREG)) | |||
742 | from = force_reg (from_mode, from); | |||
743 | if (REG_P (from)(((enum rtx_code) (from)->code) == REG) && REGNO (from)(rhs_regno(from)) < FIRST_PSEUDO_REGISTER76 | |||
744 | && !targetm.hard_regno_mode_ok (REGNO (from)(rhs_regno(from)), to_mode)) | |||
745 | from = copy_to_reg (from); | |||
746 | emit_move_insn (to, gen_lowpartrtl_hooks.gen_lowpart (to_mode, from)); | |||
747 | return; | |||
748 | } | |||
749 | ||||
750 | /* Handle extension. */ | |||
751 | if (GET_MODE_PRECISION (to_mode) > GET_MODE_PRECISION (from_mode)) | |||
752 | { | |||
753 | /* Convert directly if that works. */ | |||
754 | if ((code = can_extend_p (to_mode, from_mode, unsignedp)) | |||
755 | != CODE_FOR_nothing) | |||
756 | { | |||
757 | emit_unop_insn (code, to, from, equiv_code); | |||
758 | return; | |||
759 | } | |||
760 | else | |||
761 | { | |||
762 | rtx tmp; | |||
763 | int shift_amount; | |||
764 | ||||
765 | /* Search for a mode to convert via. */ | |||
766 | opt_scalar_mode intermediate_iter; | |||
767 | FOR_EACH_MODE_FROM (intermediate_iter, from_mode)for ((intermediate_iter) = (from_mode); mode_iterator::iterate_p (&(intermediate_iter)); mode_iterator::get_next (&(intermediate_iter ))) | |||
768 | { | |||
769 | scalar_mode intermediate = intermediate_iter.require (); | |||
770 | if (((can_extend_p (to_mode, intermediate, unsignedp) | |||
771 | != CODE_FOR_nothing) | |||
772 | || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate) | |||
773 | && TRULY_NOOP_TRUNCATION_MODES_P (to_mode,(targetm.truly_noop_truncation (GET_MODE_PRECISION (to_mode), GET_MODE_PRECISION (intermediate))) | |||
774 | intermediate)(targetm.truly_noop_truncation (GET_MODE_PRECISION (to_mode), GET_MODE_PRECISION (intermediate))))) | |||
775 | && (can_extend_p (intermediate, from_mode, unsignedp) | |||
776 | != CODE_FOR_nothing)) | |||
777 | { | |||
778 | convert_move (to, convert_to_mode (intermediate, from, | |||
779 | unsignedp), unsignedp); | |||
780 | return; | |||
781 | } | |||
782 | } | |||
783 | ||||
784 | /* No suitable intermediate mode. | |||
785 | Generate what we need with shifts. */ | |||
786 | shift_amount = (GET_MODE_PRECISION (to_mode) | |||
787 | - GET_MODE_PRECISION (from_mode)); | |||
788 | from = gen_lowpartrtl_hooks.gen_lowpart (to_mode, force_reg (from_mode, from)); | |||
789 | tmp = expand_shift (LSHIFT_EXPR, to_mode, from, shift_amount, | |||
790 | to, unsignedp); | |||
791 | tmp = expand_shift (RSHIFT_EXPR, to_mode, tmp, shift_amount, | |||
792 | to, unsignedp); | |||
793 | if (tmp != to) | |||
794 | emit_move_insn (to, tmp); | |||
795 | return; | |||
796 | } | |||
797 | } | |||
798 | ||||
799 | /* Support special truncate insns for certain modes. */ | |||
800 | if (convert_optab_handler (trunc_optab, to_mode, | |||
801 | from_mode) != CODE_FOR_nothing) | |||
802 | { | |||
803 | emit_unop_insn (convert_optab_handler (trunc_optab, to_mode, from_mode), | |||
804 | to, from, UNKNOWN); | |||
805 | return; | |||
806 | } | |||
807 | ||||
808 | /* Handle truncation of volatile memrefs, and so on; | |||
809 | the things that couldn't be truncated directly, | |||
810 | and for which there was no special instruction. | |||
811 | ||||
812 | ??? Code above formerly short-circuited this, for most integer | |||
813 | mode pairs, with a force_reg in from_mode followed by a recursive | |||
814 | call to this routine. Appears always to have been wrong. */ | |||
815 | if (GET_MODE_PRECISION (to_mode) < GET_MODE_PRECISION (from_mode)) | |||
816 | { | |||
817 | rtx temp = force_reg (to_mode, gen_lowpartrtl_hooks.gen_lowpart (to_mode, from)); | |||
818 | emit_move_insn (to, temp); | |||
819 | return; | |||
820 | } | |||
821 | ||||
822 | /* Mode combination is not recognized. */ | |||
823 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 823, __FUNCTION__)); | |||
824 | } | |||
825 | ||||
826 | /* Return an rtx for a value that would result | |||
827 | from converting X to mode MODE. | |||
828 | Both X and MODE may be floating, or both integer. | |||
829 | UNSIGNEDP is nonzero if X is an unsigned value. | |||
830 | This can be done by referring to a part of X in place | |||
831 | or by copying to a new temporary with conversion. */ | |||
832 | ||||
833 | rtx | |||
834 | convert_to_mode (machine_mode mode, rtx x, int unsignedp) | |||
835 | { | |||
836 | return convert_modes (mode, VOIDmode((void) 0, E_VOIDmode), x, unsignedp); | |||
837 | } | |||
838 | ||||
839 | /* Return an rtx for a value that would result | |||
840 | from converting X from mode OLDMODE to mode MODE. | |||
841 | Both modes may be floating, or both integer. | |||
842 | UNSIGNEDP is nonzero if X is an unsigned value. | |||
843 | ||||
844 | This can be done by referring to a part of X in place | |||
845 | or by copying to a new temporary with conversion. | |||
846 | ||||
847 | You can give VOIDmode for OLDMODE, if you are sure X has a nonvoid mode. */ | |||
848 | ||||
849 | rtx | |||
850 | convert_modes (machine_mode mode, machine_mode oldmode, rtx x, int unsignedp) | |||
851 | { | |||
852 | rtx temp; | |||
853 | scalar_int_mode int_mode; | |||
854 | ||||
855 | /* If FROM is a SUBREG that indicates that we have already done at least | |||
856 | the required extension, strip it. */ | |||
857 | ||||
858 | if (GET_CODE (x)((enum rtx_code) (x)->code) == SUBREG | |||
859 | && SUBREG_PROMOTED_VAR_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag ( "SUBREG_PROMOTED", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 859, __FUNCTION__); _rtx; })->in_struct) | |||
860 | && is_a <scalar_int_mode> (mode, &int_mode) | |||
861 | && (GET_MODE_PRECISION (subreg_promoted_mode (x)) | |||
862 | >= GET_MODE_PRECISION (int_mode)) | |||
863 | && SUBREG_CHECK_PROMOTED_SIGN (x, unsignedp)((unsignedp) == SRP_POINTER ? (2 * (__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code ) != SUBREG) rtl_check_failed_flag ("SUBREG_PROMOTED_GET", _rtx , "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 863, __FUNCTION__); _rtx; })->volatil) + (x)->unchanging - 1) == SRP_POINTER : (unsignedp) == SRP_SIGNED ? (__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) ( _rtx)->code) != SUBREG) rtl_check_failed_flag ("SUBREG_PROMOTED_SIGNED_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 863, __FUNCTION__); _rtx; })->unchanging) : (__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) ( _rtx)->code) != SUBREG) rtl_check_failed_flag ("SUBREG_PROMOTED_UNSIGNED_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 863, __FUNCTION__); _rtx; })->volatil))) | |||
864 | { | |||
865 | scalar_int_mode int_orig_mode; | |||
866 | scalar_int_mode int_inner_mode; | |||
867 | machine_mode orig_mode = GET_MODE (x)((machine_mode) (x)->mode); | |||
868 | x = gen_lowpartrtl_hooks.gen_lowpart (int_mode, SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx)); | |||
869 | ||||
870 | /* Preserve SUBREG_PROMOTED_VAR_P if the new mode is wider than | |||
871 | the original mode, but narrower than the inner mode. */ | |||
872 | if (GET_CODE (x)((enum rtx_code) (x)->code) == SUBREG | |||
873 | && is_a <scalar_int_mode> (orig_mode, &int_orig_mode) | |||
874 | && GET_MODE_PRECISION (int_mode) | |||
875 | > GET_MODE_PRECISION (int_orig_mode) | |||
876 | && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (x))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode), | |||
877 | &int_inner_mode) | |||
878 | && GET_MODE_PRECISION (int_inner_mode) | |||
879 | > GET_MODE_PRECISION (int_mode)) | |||
880 | { | |||
881 | SUBREG_PROMOTED_VAR_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag ( "SUBREG_PROMOTED", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 881, __FUNCTION__); _rtx; })->in_struct) = 1; | |||
882 | SUBREG_PROMOTED_SET (x, unsignedp)do { rtx const _rtx = __extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag ("SUBREG_PROMOTED_SET", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 882, __FUNCTION__); _rtx; }); switch (unsignedp) { case SRP_POINTER : _rtx->volatil = 0; _rtx->unchanging = 0; break; case SRP_SIGNED : _rtx->volatil = 0; _rtx->unchanging = 1; break; case SRP_UNSIGNED : _rtx->volatil = 1; _rtx->unchanging = 0; break; case SRP_SIGNED_AND_UNSIGNED : _rtx->volatil = 1; _rtx->unchanging = 1; break; } } while (0); | |||
883 | } | |||
884 | } | |||
885 | ||||
886 | if (GET_MODE (x)((machine_mode) (x)->mode) != VOIDmode((void) 0, E_VOIDmode)) | |||
887 | oldmode = GET_MODE (x)((machine_mode) (x)->mode); | |||
888 | ||||
889 | if (mode == oldmode) | |||
890 | return x; | |||
891 | ||||
892 | if (CONST_SCALAR_INT_P (x)((((enum rtx_code) (x)->code) == CONST_INT) || (((enum rtx_code ) (x)->code) == CONST_WIDE_INT)) | |||
893 | && is_a <scalar_int_mode> (mode, &int_mode)) | |||
894 | { | |||
895 | /* If the caller did not tell us the old mode, then there is not | |||
896 | much to do with respect to canonicalization. We have to | |||
897 | assume that all the bits are significant. */ | |||
898 | if (!is_a <scalar_int_mode> (oldmode)) | |||
899 | oldmode = MAX_MODE_INT; | |||
900 | wide_int w = wide_int::from (rtx_mode_t (x, oldmode), | |||
901 | GET_MODE_PRECISION (int_mode), | |||
902 | unsignedp ? UNSIGNED : SIGNED); | |||
903 | return immed_wide_int_const (w, int_mode); | |||
904 | } | |||
905 | ||||
906 | /* We can do this with a gen_lowpart if both desired and current modes | |||
907 | are integer, and this is either a constant integer, a register, or a | |||
908 | non-volatile MEM. */ | |||
909 | scalar_int_mode int_oldmode; | |||
910 | if (is_int_mode (mode, &int_mode) | |||
911 | && is_int_mode (oldmode, &int_oldmode) | |||
912 | && GET_MODE_PRECISION (int_mode) <= GET_MODE_PRECISION (int_oldmode) | |||
913 | && ((MEM_P (x)(((enum rtx_code) (x)->code) == MEM) && !MEM_VOLATILE_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != MEM && ((enum rtx_code ) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code ) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 913, __FUNCTION__); _rtx; })->volatil) && direct_load(this_target_regs->x_direct_load)[(int) int_mode]) | |||
914 | || CONST_POLY_INT_P (x)(1 > 1 && ((enum rtx_code) (x)->code) == CONST_POLY_INT ) | |||
915 | || (REG_P (x)(((enum rtx_code) (x)->code) == REG) | |||
916 | && (!HARD_REGISTER_P (x)((((rhs_regno(x))) < 76)) | |||
917 | || targetm.hard_regno_mode_ok (REGNO (x)(rhs_regno(x)), int_mode)) | |||
918 | && TRULY_NOOP_TRUNCATION_MODES_P (int_mode, GET_MODE (x))(targetm.truly_noop_truncation (GET_MODE_PRECISION (int_mode) , GET_MODE_PRECISION (((machine_mode) (x)->mode))))))) | |||
919 | return gen_lowpartrtl_hooks.gen_lowpart (int_mode, x); | |||
920 | ||||
921 | /* Converting from integer constant into mode is always equivalent to an | |||
922 | subreg operation. */ | |||
923 | if (VECTOR_MODE_P (mode)(((enum mode_class) mode_class[mode]) == MODE_VECTOR_BOOL || ( (enum mode_class) mode_class[mode]) == MODE_VECTOR_INT || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_FLOAT || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_FRACT || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_UFRACT || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_ACCUM || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_UACCUM) && GET_MODE (x)((machine_mode) (x)->mode) == VOIDmode((void) 0, E_VOIDmode)) | |||
924 | { | |||
925 | gcc_assert (known_eq (GET_MODE_BITSIZE (mode),((void)(!((!maybe_ne (GET_MODE_BITSIZE (mode), GET_MODE_BITSIZE (oldmode)))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 926, __FUNCTION__), 0 : 0)) | |||
926 | GET_MODE_BITSIZE (oldmode)))((void)(!((!maybe_ne (GET_MODE_BITSIZE (mode), GET_MODE_BITSIZE (oldmode)))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 926, __FUNCTION__), 0 : 0)); | |||
927 | return simplify_gen_subreg (mode, x, oldmode, 0); | |||
928 | } | |||
929 | ||||
930 | temp = gen_reg_rtx (mode); | |||
931 | convert_move (temp, x, unsignedp); | |||
932 | return temp; | |||
933 | } | |||
934 | ||||
935 | /* Variant of convert_modes for ABI parameter passing/return. | |||
936 | Return an rtx for a value that would result from converting X from | |||
937 | a floating point mode FMODE to wider integer mode MODE. */ | |||
938 | ||||
939 | rtx | |||
940 | convert_float_to_wider_int (machine_mode mode, machine_mode fmode, rtx x) | |||
941 | { | |||
942 | gcc_assert (SCALAR_INT_MODE_P (mode) && SCALAR_FLOAT_MODE_P (fmode))((void)(!((((enum mode_class) mode_class[mode]) == MODE_INT || ((enum mode_class) mode_class[mode]) == MODE_PARTIAL_INT) && (((enum mode_class) mode_class[fmode]) == MODE_FLOAT || ((enum mode_class) mode_class[fmode]) == MODE_DECIMAL_FLOAT)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 942, __FUNCTION__), 0 : 0)); | |||
943 | scalar_int_mode tmp_mode = int_mode_for_mode (fmode).require (); | |||
944 | rtx tmp = force_reg (tmp_mode, gen_lowpartrtl_hooks.gen_lowpart (tmp_mode, x)); | |||
945 | return convert_modes (mode, tmp_mode, tmp, 1); | |||
946 | } | |||
947 | ||||
948 | /* Variant of convert_modes for ABI parameter passing/return. | |||
949 | Return an rtx for a value that would result from converting X from | |||
950 | an integer mode IMODE to a narrower floating point mode MODE. */ | |||
951 | ||||
952 | rtx | |||
953 | convert_wider_int_to_float (machine_mode mode, machine_mode imode, rtx x) | |||
954 | { | |||
955 | gcc_assert (SCALAR_FLOAT_MODE_P (mode) && SCALAR_INT_MODE_P (imode))((void)(!((((enum mode_class) mode_class[mode]) == MODE_FLOAT || ((enum mode_class) mode_class[mode]) == MODE_DECIMAL_FLOAT ) && (((enum mode_class) mode_class[imode]) == MODE_INT || ((enum mode_class) mode_class[imode]) == MODE_PARTIAL_INT )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 955, __FUNCTION__), 0 : 0)); | |||
956 | scalar_int_mode tmp_mode = int_mode_for_mode (mode).require (); | |||
957 | rtx tmp = force_reg (tmp_mode, gen_lowpartrtl_hooks.gen_lowpart (tmp_mode, x)); | |||
958 | return gen_lowpart_SUBREG (mode, tmp); | |||
959 | } | |||
960 | ||||
961 | /* Return the largest alignment we can use for doing a move (or store) | |||
962 | of MAX_PIECES. ALIGN is the largest alignment we could use. */ | |||
963 | ||||
964 | static unsigned int | |||
965 | alignment_for_piecewise_move (unsigned int max_pieces, unsigned int align) | |||
966 | { | |||
967 | scalar_int_mode tmode | |||
968 | = int_mode_for_size (max_pieces * BITS_PER_UNIT(8), 0).require (); | |||
969 | ||||
970 | if (align >= GET_MODE_ALIGNMENT (tmode)get_mode_alignment (tmode)) | |||
971 | align = GET_MODE_ALIGNMENT (tmode)get_mode_alignment (tmode); | |||
972 | else | |||
973 | { | |||
974 | scalar_int_mode xmode = NARROWEST_INT_MODE(scalar_int_mode (scalar_int_mode::from_int (class_narrowest_mode [MODE_INT]))); | |||
975 | opt_scalar_int_mode mode_iter; | |||
976 | FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)for (mode_iterator::start (&(mode_iter), MODE_INT); mode_iterator ::iterate_p (&(mode_iter)); mode_iterator::get_next (& (mode_iter))) | |||
977 | { | |||
978 | tmode = mode_iter.require (); | |||
979 | if (GET_MODE_SIZE (tmode) > max_pieces | |||
980 | || targetm.slow_unaligned_access (tmode, align)) | |||
981 | break; | |||
982 | xmode = tmode; | |||
983 | } | |||
984 | ||||
985 | align = MAX (align, GET_MODE_ALIGNMENT (xmode))((align) > (get_mode_alignment (xmode)) ? (align) : (get_mode_alignment (xmode))); | |||
986 | } | |||
987 | ||||
988 | return align; | |||
989 | } | |||
990 | ||||
991 | /* Return the widest QI vector, if QI_MODE is true, or integer mode | |||
992 | that is narrower than SIZE bytes. */ | |||
993 | ||||
994 | static fixed_size_mode | |||
995 | widest_fixed_size_mode_for_size (unsigned int size, bool qi_vector) | |||
996 | { | |||
997 | fixed_size_mode result = NARROWEST_INT_MODE(scalar_int_mode (scalar_int_mode::from_int (class_narrowest_mode [MODE_INT]))); | |||
998 | ||||
999 | gcc_checking_assert (size > 1)((void)(!(size > 1) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 999, __FUNCTION__), 0 : 0)); | |||
1000 | ||||
1001 | /* Use QI vector only if size is wider than a WORD. */ | |||
1002 | if (qi_vector && size > UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) | |||
1003 | { | |||
1004 | machine_mode mode; | |||
1005 | fixed_size_mode candidate; | |||
1006 | FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT)for (mode_iterator::start (&(mode), MODE_VECTOR_INT); mode_iterator ::iterate_p (&(mode)); mode_iterator::get_next (&(mode ))) | |||
1007 | if (is_a<fixed_size_mode> (mode, &candidate) | |||
1008 | && GET_MODE_INNER (candidate)(mode_to_inner (candidate)) == QImode(scalar_int_mode ((scalar_int_mode::from_int) E_QImode))) | |||
1009 | { | |||
1010 | if (GET_MODE_SIZE (candidate) >= size) | |||
1011 | break; | |||
1012 | if (optab_handler (vec_duplicate_optab, candidate) | |||
1013 | != CODE_FOR_nothing) | |||
1014 | result = candidate; | |||
1015 | } | |||
1016 | ||||
1017 | if (result != NARROWEST_INT_MODE(scalar_int_mode (scalar_int_mode::from_int (class_narrowest_mode [MODE_INT])))) | |||
1018 | return result; | |||
1019 | } | |||
1020 | ||||
1021 | opt_scalar_int_mode tmode; | |||
1022 | FOR_EACH_MODE_IN_CLASS (tmode, MODE_INT)for (mode_iterator::start (&(tmode), MODE_INT); mode_iterator ::iterate_p (&(tmode)); mode_iterator::get_next (&(tmode ))) | |||
1023 | if (GET_MODE_SIZE (tmode.require ()) < size) | |||
1024 | result = tmode.require (); | |||
1025 | ||||
1026 | return result; | |||
1027 | } | |||
1028 | ||||
1029 | /* Determine whether an operation OP on LEN bytes with alignment ALIGN can | |||
1030 | and should be performed piecewise. */ | |||
1031 | ||||
1032 | static bool | |||
1033 | can_do_by_pieces (unsigned HOST_WIDE_INTlong len, unsigned int align, | |||
1034 | enum by_pieces_operation op) | |||
1035 | { | |||
1036 | return targetm.use_by_pieces_infrastructure_p (len, align, op, | |||
1037 | optimize_insn_for_speed_p ()); | |||
1038 | } | |||
1039 | ||||
1040 | /* Determine whether the LEN bytes can be moved by using several move | |||
1041 | instructions. Return nonzero if a call to move_by_pieces should | |||
1042 | succeed. */ | |||
1043 | ||||
1044 | bool | |||
1045 | can_move_by_pieces (unsigned HOST_WIDE_INTlong len, unsigned int align) | |||
1046 | { | |||
1047 | return can_do_by_pieces (len, align, MOVE_BY_PIECES); | |||
1048 | } | |||
1049 | ||||
1050 | /* Return number of insns required to perform operation OP by pieces | |||
1051 | for L bytes. ALIGN (in bits) is maximum alignment we can assume. */ | |||
1052 | ||||
1053 | unsigned HOST_WIDE_INTlong | |||
1054 | by_pieces_ninsns (unsigned HOST_WIDE_INTlong l, unsigned int align, | |||
1055 | unsigned int max_size, by_pieces_operation op) | |||
1056 | { | |||
1057 | unsigned HOST_WIDE_INTlong n_insns = 0; | |||
1058 | fixed_size_mode mode; | |||
1059 | ||||
1060 | if (targetm.overlap_op_by_pieces_p () && op != COMPARE_BY_PIECES) | |||
1061 | { | |||
1062 | /* NB: Round up L and ALIGN to the widest integer mode for | |||
1063 | MAX_SIZE. */ | |||
1064 | mode = widest_fixed_size_mode_for_size (max_size, | |||
1065 | op == SET_BY_PIECES); | |||
1066 | if (optab_handler (mov_optab, mode) != CODE_FOR_nothing) | |||
1067 | { | |||
1068 | unsigned HOST_WIDE_INTlong up = ROUND_UP (l, GET_MODE_SIZE (mode))(((l) + (GET_MODE_SIZE (mode)) - 1) & ~((GET_MODE_SIZE (mode )) - 1)); | |||
1069 | if (up > l) | |||
1070 | l = up; | |||
1071 | align = GET_MODE_ALIGNMENT (mode)get_mode_alignment (mode); | |||
1072 | } | |||
1073 | } | |||
1074 | ||||
1075 | align = alignment_for_piecewise_move (MOVE_MAX_PIECES((((global_options.x_ix86_isa_flags & (1UL << 15)) != 0) && (global_options.x_ix86_move_max == PVW_AVX512 || global_options.x_ix86_store_max == PVW_AVX512)) ? 64 : ((((global_options .x_ix86_isa_flags & (1UL << 8)) != 0) && (global_options .x_ix86_move_max >= PVW_AVX256 || global_options.x_ix86_store_max >= PVW_AVX256)) ? 32 : ((((global_options.x_ix86_isa_flags & (1UL << 51)) != 0) && ix86_tune_features [X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL] && ix86_tune_features [X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL]) ? 16 : (((global_options .x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)))), align); | |||
1076 | ||||
1077 | while (max_size > 1 && l > 0) | |||
1078 | { | |||
1079 | mode = widest_fixed_size_mode_for_size (max_size, | |||
1080 | op == SET_BY_PIECES); | |||
1081 | enum insn_code icode; | |||
1082 | ||||
1083 | unsigned int modesize = GET_MODE_SIZE (mode); | |||
1084 | ||||
1085 | icode = optab_handler (mov_optab, mode); | |||
1086 | if (icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode)get_mode_alignment (mode)) | |||
1087 | { | |||
1088 | unsigned HOST_WIDE_INTlong n_pieces = l / modesize; | |||
1089 | l %= modesize; | |||
1090 | switch (op) | |||
1091 | { | |||
1092 | default: | |||
1093 | n_insns += n_pieces; | |||
1094 | break; | |||
1095 | ||||
1096 | case COMPARE_BY_PIECES: | |||
1097 | int batch = targetm.compare_by_pieces_branch_ratio (mode); | |||
1098 | int batch_ops = 4 * batch - 1; | |||
1099 | unsigned HOST_WIDE_INTlong full = n_pieces / batch; | |||
1100 | n_insns += full * batch_ops; | |||
1101 | if (n_pieces % batch != 0) | |||
1102 | n_insns++; | |||
1103 | break; | |||
1104 | ||||
1105 | } | |||
1106 | } | |||
1107 | max_size = modesize; | |||
1108 | } | |||
1109 | ||||
1110 | gcc_assert (!l)((void)(!(!l) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1110, __FUNCTION__), 0 : 0)); | |||
1111 | return n_insns; | |||
1112 | } | |||
1113 | ||||
1114 | /* Used when performing piecewise block operations, holds information | |||
1115 | about one of the memory objects involved. The member functions | |||
1116 | can be used to generate code for loading from the object and | |||
1117 | updating the address when iterating. */ | |||
1118 | ||||
1119 | class pieces_addr | |||
1120 | { | |||
1121 | /* The object being referenced, a MEM. Can be NULL_RTX to indicate | |||
1122 | stack pushes. */ | |||
1123 | rtx m_obj; | |||
1124 | /* The address of the object. Can differ from that seen in the | |||
1125 | MEM rtx if we copied the address to a register. */ | |||
1126 | rtx m_addr; | |||
1127 | /* Nonzero if the address on the object has an autoincrement already, | |||
1128 | signifies whether that was an increment or decrement. */ | |||
1129 | signed char m_addr_inc; | |||
1130 | /* Nonzero if we intend to use autoinc without the address already | |||
1131 | having autoinc form. We will insert add insns around each memory | |||
1132 | reference, expecting later passes to form autoinc addressing modes. | |||
1133 | The only supported options are predecrement and postincrement. */ | |||
1134 | signed char m_explicit_inc; | |||
1135 | /* True if we have either of the two possible cases of using | |||
1136 | autoincrement. */ | |||
1137 | bool m_auto; | |||
1138 | /* True if this is an address to be used for load operations rather | |||
1139 | than stores. */ | |||
1140 | bool m_is_load; | |||
1141 | ||||
1142 | /* Optionally, a function to obtain constants for any given offset into | |||
1143 | the objects, and data associated with it. */ | |||
1144 | by_pieces_constfn m_constfn; | |||
1145 | void *m_cfndata; | |||
1146 | public: | |||
1147 | pieces_addr (rtx, bool, by_pieces_constfn, void *); | |||
1148 | rtx adjust (fixed_size_mode, HOST_WIDE_INTlong, by_pieces_prev * = nullptr); | |||
1149 | void increment_address (HOST_WIDE_INTlong); | |||
1150 | void maybe_predec (HOST_WIDE_INTlong); | |||
1151 | void maybe_postinc (HOST_WIDE_INTlong); | |||
1152 | void decide_autoinc (machine_mode, bool, HOST_WIDE_INTlong); | |||
1153 | int get_addr_inc () | |||
1154 | { | |||
1155 | return m_addr_inc; | |||
1156 | } | |||
1157 | }; | |||
1158 | ||||
1159 | /* Initialize a pieces_addr structure from an object OBJ. IS_LOAD is | |||
1160 | true if the operation to be performed on this object is a load | |||
1161 | rather than a store. For stores, OBJ can be NULL, in which case we | |||
1162 | assume the operation is a stack push. For loads, the optional | |||
1163 | CONSTFN and its associated CFNDATA can be used in place of the | |||
1164 | memory load. */ | |||
1165 | ||||
1166 | pieces_addr::pieces_addr (rtx obj, bool is_load, by_pieces_constfn constfn, | |||
1167 | void *cfndata) | |||
1168 | : m_obj (obj), m_is_load (is_load), m_constfn (constfn), m_cfndata (cfndata) | |||
1169 | { | |||
1170 | m_addr_inc = 0; | |||
1171 | m_auto = false; | |||
1172 | if (obj) | |||
1173 | { | |||
1174 | rtx addr = XEXP (obj, 0)(((obj)->u.fld[0]).rt_rtx); | |||
1175 | rtx_code code = GET_CODE (addr)((enum rtx_code) (addr)->code); | |||
1176 | m_addr = addr; | |||
1177 | bool dec = code == PRE_DEC || code == POST_DEC; | |||
1178 | bool inc = code == PRE_INC || code == POST_INC; | |||
1179 | m_auto = inc || dec; | |||
1180 | if (m_auto) | |||
1181 | m_addr_inc = dec ? -1 : 1; | |||
1182 | ||||
1183 | /* While we have always looked for these codes here, the code | |||
1184 | implementing the memory operation has never handled them. | |||
1185 | Support could be added later if necessary or beneficial. */ | |||
1186 | gcc_assert (code != PRE_INC && code != POST_DEC)((void)(!(code != PRE_INC && code != POST_DEC) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1186, __FUNCTION__), 0 : 0)); | |||
1187 | } | |||
1188 | else | |||
1189 | { | |||
1190 | m_addr = NULL_RTX(rtx) 0; | |||
1191 | if (!is_load) | |||
1192 | { | |||
1193 | m_auto = true; | |||
1194 | if (STACK_GROWS_DOWNWARD1) | |||
1195 | m_addr_inc = -1; | |||
1196 | else | |||
1197 | m_addr_inc = 1; | |||
1198 | } | |||
1199 | else | |||
1200 | gcc_assert (constfn != NULL)((void)(!(constfn != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1200, __FUNCTION__), 0 : 0)); | |||
1201 | } | |||
1202 | m_explicit_inc = 0; | |||
1203 | if (constfn) | |||
1204 | gcc_assert (is_load)((void)(!(is_load) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1204, __FUNCTION__), 0 : 0)); | |||
1205 | } | |||
1206 | ||||
1207 | /* Decide whether to use autoinc for an address involved in a memory op. | |||
1208 | MODE is the mode of the accesses, REVERSE is true if we've decided to | |||
1209 | perform the operation starting from the end, and LEN is the length of | |||
1210 | the operation. Don't override an earlier decision to set m_auto. */ | |||
1211 | ||||
1212 | void | |||
1213 | pieces_addr::decide_autoinc (machine_mode ARG_UNUSED (mode)mode __attribute__ ((__unused__)), bool reverse, | |||
1214 | HOST_WIDE_INTlong len) | |||
1215 | { | |||
1216 | if (m_auto || m_obj == NULL_RTX(rtx) 0) | |||
1217 | return; | |||
1218 | ||||
1219 | bool use_predec = (m_is_load | |||
1220 | ? USE_LOAD_PRE_DECREMENT (mode)0 | |||
1221 | : USE_STORE_PRE_DECREMENT (mode)0); | |||
1222 | bool use_postinc = (m_is_load | |||
1223 | ? USE_LOAD_POST_INCREMENT (mode)0 | |||
1224 | : USE_STORE_POST_INCREMENT (mode)0); | |||
1225 | machine_mode addr_mode = get_address_mode (m_obj); | |||
1226 | ||||
1227 | if (use_predec && reverse) | |||
1228 | { | |||
1229 | m_addr = copy_to_mode_reg (addr_mode, | |||
1230 | plus_constant (addr_mode, | |||
1231 | m_addr, len)); | |||
1232 | m_auto = true; | |||
1233 | m_explicit_inc = -1; | |||
1234 | } | |||
1235 | else if (use_postinc && !reverse) | |||
1236 | { | |||
1237 | m_addr = copy_to_mode_reg (addr_mode, m_addr); | |||
1238 | m_auto = true; | |||
1239 | m_explicit_inc = 1; | |||
1240 | } | |||
1241 | else if (CONSTANT_P (m_addr)((rtx_class[(int) (((enum rtx_code) (m_addr)->code))]) == RTX_CONST_OBJ )) | |||
1242 | m_addr = copy_to_mode_reg (addr_mode, m_addr); | |||
1243 | } | |||
1244 | ||||
1245 | /* Adjust the address to refer to the data at OFFSET in MODE. If we | |||
1246 | are using autoincrement for this address, we don't add the offset, | |||
1247 | but we still modify the MEM's properties. */ | |||
1248 | ||||
1249 | rtx | |||
1250 | pieces_addr::adjust (fixed_size_mode mode, HOST_WIDE_INTlong offset, | |||
1251 | by_pieces_prev *prev) | |||
1252 | { | |||
1253 | if (m_constfn) | |||
1254 | /* Pass the previous data to m_constfn. */ | |||
1255 | return m_constfn (m_cfndata, prev, offset, mode); | |||
1256 | if (m_obj == NULL_RTX(rtx) 0) | |||
1257 | return NULL_RTX(rtx) 0; | |||
1258 | if (m_auto) | |||
1259 | return adjust_automodify_address (m_obj, mode, m_addr, offset)adjust_automodify_address_1 (m_obj, mode, m_addr, offset, 1); | |||
1260 | else | |||
1261 | return adjust_address (m_obj, mode, offset)adjust_address_1 (m_obj, mode, offset, 1, 1, 0, 0); | |||
1262 | } | |||
1263 | ||||
1264 | /* Emit an add instruction to increment the address by SIZE. */ | |||
1265 | ||||
1266 | void | |||
1267 | pieces_addr::increment_address (HOST_WIDE_INTlong size) | |||
1268 | { | |||
1269 | rtx amount = gen_int_mode (size, GET_MODE (m_addr)((machine_mode) (m_addr)->mode)); | |||
1270 | emit_insn (gen_add2_insn (m_addr, amount)); | |||
1271 | } | |||
1272 | ||||
1273 | /* If we are supposed to decrement the address after each access, emit code | |||
1274 | to do so now. Increment by SIZE (which has should have the correct sign | |||
1275 | already). */ | |||
1276 | ||||
1277 | void | |||
1278 | pieces_addr::maybe_predec (HOST_WIDE_INTlong size) | |||
1279 | { | |||
1280 | if (m_explicit_inc >= 0) | |||
1281 | return; | |||
1282 | gcc_assert (HAVE_PRE_DECREMENT)((void)(!(0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1282, __FUNCTION__), 0 : 0)); | |||
1283 | increment_address (size); | |||
1284 | } | |||
1285 | ||||
1286 | /* If we are supposed to decrement the address after each access, emit code | |||
1287 | to do so now. Increment by SIZE. */ | |||
1288 | ||||
1289 | void | |||
1290 | pieces_addr::maybe_postinc (HOST_WIDE_INTlong size) | |||
1291 | { | |||
1292 | if (m_explicit_inc <= 0) | |||
1293 | return; | |||
1294 | gcc_assert (HAVE_POST_INCREMENT)((void)(!(0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1294, __FUNCTION__), 0 : 0)); | |||
1295 | increment_address (size); | |||
1296 | } | |||
1297 | ||||
1298 | /* This structure is used by do_op_by_pieces to describe the operation | |||
1299 | to be performed. */ | |||
1300 | ||||
1301 | class op_by_pieces_d | |||
1302 | { | |||
1303 | private: | |||
1304 | fixed_size_mode get_usable_mode (fixed_size_mode, unsigned int); | |||
1305 | fixed_size_mode smallest_fixed_size_mode_for_size (unsigned int); | |||
1306 | ||||
1307 | protected: | |||
1308 | pieces_addr m_to, m_from; | |||
1309 | /* Make m_len read-only so that smallest_fixed_size_mode_for_size can | |||
1310 | use it to check the valid mode size. */ | |||
1311 | const unsigned HOST_WIDE_INTlong m_len; | |||
1312 | HOST_WIDE_INTlong m_offset; | |||
1313 | unsigned int m_align; | |||
1314 | unsigned int m_max_size; | |||
1315 | bool m_reverse; | |||
1316 | /* True if this is a stack push. */ | |||
1317 | bool m_push; | |||
1318 | /* True if targetm.overlap_op_by_pieces_p () returns true. */ | |||
1319 | bool m_overlap_op_by_pieces; | |||
1320 | /* True if QI vector mode can be used. */ | |||
1321 | bool m_qi_vector_mode; | |||
1322 | ||||
1323 | /* Virtual functions, overriden by derived classes for the specific | |||
1324 | operation. */ | |||
1325 | virtual void generate (rtx, rtx, machine_mode) = 0; | |||
1326 | virtual bool prepare_mode (machine_mode, unsigned int) = 0; | |||
1327 | virtual void finish_mode (machine_mode) | |||
1328 | { | |||
1329 | } | |||
1330 | ||||
1331 | public: | |||
1332 | op_by_pieces_d (unsigned int, rtx, bool, rtx, bool, by_pieces_constfn, | |||
1333 | void *, unsigned HOST_WIDE_INTlong, unsigned int, bool, | |||
1334 | bool = false); | |||
1335 | void run (); | |||
1336 | }; | |||
1337 | ||||
1338 | /* The constructor for an op_by_pieces_d structure. We require two | |||
1339 | objects named TO and FROM, which are identified as loads or stores | |||
1340 | by TO_LOAD and FROM_LOAD. If FROM is a load, the optional FROM_CFN | |||
1341 | and its associated FROM_CFN_DATA can be used to replace loads with | |||
1342 | constant values. MAX_PIECES describes the maximum number of bytes | |||
1343 | at a time which can be moved efficiently. LEN describes the length | |||
1344 | of the operation. */ | |||
1345 | ||||
1346 | op_by_pieces_d::op_by_pieces_d (unsigned int max_pieces, rtx to, | |||
1347 | bool to_load, rtx from, bool from_load, | |||
1348 | by_pieces_constfn from_cfn, | |||
1349 | void *from_cfn_data, | |||
1350 | unsigned HOST_WIDE_INTlong len, | |||
1351 | unsigned int align, bool push, | |||
1352 | bool qi_vector_mode) | |||
1353 | : m_to (to, to_load, NULLnullptr, NULLnullptr), | |||
1354 | m_from (from, from_load, from_cfn, from_cfn_data), | |||
1355 | m_len (len), m_max_size (max_pieces + 1), | |||
1356 | m_push (push), m_qi_vector_mode (qi_vector_mode) | |||
1357 | { | |||
1358 | int toi = m_to.get_addr_inc (); | |||
1359 | int fromi = m_from.get_addr_inc (); | |||
1360 | if (toi >= 0 && fromi >= 0) | |||
1361 | m_reverse = false; | |||
1362 | else if (toi <= 0 && fromi <= 0) | |||
1363 | m_reverse = true; | |||
1364 | else | |||
1365 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1365, __FUNCTION__)); | |||
1366 | ||||
1367 | m_offset = m_reverse ? len : 0; | |||
1368 | align = MIN (to ? MEM_ALIGN (to) : align,((to ? (get_mem_attrs (to)->align) : align) < (from ? ( get_mem_attrs (from)->align) : align) ? (to ? (get_mem_attrs (to)->align) : align) : (from ? (get_mem_attrs (from)-> align) : align)) | |||
1369 | from ? MEM_ALIGN (from) : align)((to ? (get_mem_attrs (to)->align) : align) < (from ? ( get_mem_attrs (from)->align) : align) ? (to ? (get_mem_attrs (to)->align) : align) : (from ? (get_mem_attrs (from)-> align) : align)); | |||
1370 | ||||
1371 | /* If copying requires more than two move insns, | |||
1372 | copy addresses to registers (to make displacements shorter) | |||
1373 | and use post-increment if available. */ | |||
1374 | if (by_pieces_ninsns (len, align, m_max_size, MOVE_BY_PIECES) > 2) | |||
1375 | { | |||
1376 | /* Find the mode of the largest comparison. */ | |||
1377 | fixed_size_mode mode | |||
1378 | = widest_fixed_size_mode_for_size (m_max_size, | |||
1379 | m_qi_vector_mode); | |||
1380 | ||||
1381 | m_from.decide_autoinc (mode, m_reverse, len); | |||
1382 | m_to.decide_autoinc (mode, m_reverse, len); | |||
1383 | } | |||
1384 | ||||
1385 | align = alignment_for_piecewise_move (MOVE_MAX_PIECES((((global_options.x_ix86_isa_flags & (1UL << 15)) != 0) && (global_options.x_ix86_move_max == PVW_AVX512 || global_options.x_ix86_store_max == PVW_AVX512)) ? 64 : ((((global_options .x_ix86_isa_flags & (1UL << 8)) != 0) && (global_options .x_ix86_move_max >= PVW_AVX256 || global_options.x_ix86_store_max >= PVW_AVX256)) ? 32 : ((((global_options.x_ix86_isa_flags & (1UL << 51)) != 0) && ix86_tune_features [X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL] && ix86_tune_features [X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL]) ? 16 : (((global_options .x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)))), align); | |||
1386 | m_align = align; | |||
1387 | ||||
1388 | m_overlap_op_by_pieces = targetm.overlap_op_by_pieces_p (); | |||
1389 | } | |||
1390 | ||||
1391 | /* This function returns the largest usable integer mode for LEN bytes | |||
1392 | whose size is no bigger than size of MODE. */ | |||
1393 | ||||
1394 | fixed_size_mode | |||
1395 | op_by_pieces_d::get_usable_mode (fixed_size_mode mode, unsigned int len) | |||
1396 | { | |||
1397 | unsigned int size; | |||
1398 | do | |||
1399 | { | |||
1400 | size = GET_MODE_SIZE (mode); | |||
1401 | if (len >= size && prepare_mode (mode, m_align)) | |||
1402 | break; | |||
1403 | /* widest_fixed_size_mode_for_size checks SIZE > 1. */ | |||
1404 | mode = widest_fixed_size_mode_for_size (size, m_qi_vector_mode); | |||
1405 | } | |||
1406 | while (1); | |||
1407 | return mode; | |||
1408 | } | |||
1409 | ||||
1410 | /* Return the smallest integer or QI vector mode that is not narrower | |||
1411 | than SIZE bytes. */ | |||
1412 | ||||
1413 | fixed_size_mode | |||
1414 | op_by_pieces_d::smallest_fixed_size_mode_for_size (unsigned int size) | |||
1415 | { | |||
1416 | /* Use QI vector only for > size of WORD. */ | |||
1417 | if (m_qi_vector_mode && size > UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) | |||
1418 | { | |||
1419 | machine_mode mode; | |||
1420 | fixed_size_mode candidate; | |||
1421 | FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_INT)for (mode_iterator::start (&(mode), MODE_VECTOR_INT); mode_iterator ::iterate_p (&(mode)); mode_iterator::get_next (&(mode ))) | |||
1422 | if (is_a<fixed_size_mode> (mode, &candidate) | |||
1423 | && GET_MODE_INNER (candidate)(mode_to_inner (candidate)) == QImode(scalar_int_mode ((scalar_int_mode::from_int) E_QImode))) | |||
1424 | { | |||
1425 | /* Don't return a mode wider than M_LEN. */ | |||
1426 | if (GET_MODE_SIZE (candidate) > m_len) | |||
1427 | break; | |||
1428 | ||||
1429 | if (GET_MODE_SIZE (candidate) >= size | |||
1430 | && (optab_handler (vec_duplicate_optab, candidate) | |||
1431 | != CODE_FOR_nothing)) | |||
1432 | return candidate; | |||
1433 | } | |||
1434 | } | |||
1435 | ||||
1436 | return smallest_int_mode_for_size (size * BITS_PER_UNIT(8)); | |||
1437 | } | |||
1438 | ||||
1439 | /* This function contains the main loop used for expanding a block | |||
1440 | operation. First move what we can in the largest integer mode, | |||
1441 | then go to successively smaller modes. For every access, call | |||
1442 | GENFUN with the two operands and the EXTRA_DATA. */ | |||
1443 | ||||
1444 | void | |||
1445 | op_by_pieces_d::run () | |||
1446 | { | |||
1447 | if (m_len == 0) | |||
1448 | return; | |||
1449 | ||||
1450 | unsigned HOST_WIDE_INTlong length = m_len; | |||
1451 | ||||
1452 | /* widest_fixed_size_mode_for_size checks M_MAX_SIZE > 1. */ | |||
1453 | fixed_size_mode mode | |||
1454 | = widest_fixed_size_mode_for_size (m_max_size, m_qi_vector_mode); | |||
1455 | mode = get_usable_mode (mode, length); | |||
1456 | ||||
1457 | by_pieces_prev to_prev = { nullptr, mode }; | |||
1458 | by_pieces_prev from_prev = { nullptr, mode }; | |||
1459 | ||||
1460 | do | |||
1461 | { | |||
1462 | unsigned int size = GET_MODE_SIZE (mode); | |||
1463 | rtx to1 = NULL_RTX(rtx) 0, from1; | |||
1464 | ||||
1465 | while (length >= size) | |||
1466 | { | |||
1467 | if (m_reverse) | |||
1468 | m_offset -= size; | |||
1469 | ||||
1470 | to1 = m_to.adjust (mode, m_offset, &to_prev); | |||
1471 | to_prev.data = to1; | |||
1472 | to_prev.mode = mode; | |||
1473 | from1 = m_from.adjust (mode, m_offset, &from_prev); | |||
1474 | from_prev.data = from1; | |||
1475 | from_prev.mode = mode; | |||
1476 | ||||
1477 | m_to.maybe_predec (-(HOST_WIDE_INTlong)size); | |||
1478 | m_from.maybe_predec (-(HOST_WIDE_INTlong)size); | |||
1479 | ||||
1480 | generate (to1, from1, mode); | |||
1481 | ||||
1482 | m_to.maybe_postinc (size); | |||
1483 | m_from.maybe_postinc (size); | |||
1484 | ||||
1485 | if (!m_reverse) | |||
1486 | m_offset += size; | |||
1487 | ||||
1488 | length -= size; | |||
1489 | } | |||
1490 | ||||
1491 | finish_mode (mode); | |||
1492 | ||||
1493 | if (length == 0) | |||
1494 | return; | |||
1495 | ||||
1496 | if (!m_push && m_overlap_op_by_pieces) | |||
1497 | { | |||
1498 | /* NB: Generate overlapping operations if it is not a stack | |||
1499 | push since stack push must not overlap. Get the smallest | |||
1500 | fixed size mode for M_LEN bytes. */ | |||
1501 | mode = smallest_fixed_size_mode_for_size (length); | |||
1502 | mode = get_usable_mode (mode, GET_MODE_SIZE (mode)); | |||
1503 | int gap = GET_MODE_SIZE (mode) - length; | |||
1504 | if (gap > 0) | |||
1505 | { | |||
1506 | /* If size of MODE > M_LEN, generate the last operation | |||
1507 | in MODE for the remaining bytes with ovelapping memory | |||
1508 | from the previois operation. */ | |||
1509 | if (m_reverse) | |||
1510 | m_offset += gap; | |||
1511 | else | |||
1512 | m_offset -= gap; | |||
1513 | length += gap; | |||
1514 | } | |||
1515 | } | |||
1516 | else | |||
1517 | { | |||
1518 | /* widest_fixed_size_mode_for_size checks SIZE > 1. */ | |||
1519 | mode = widest_fixed_size_mode_for_size (size, | |||
1520 | m_qi_vector_mode); | |||
1521 | mode = get_usable_mode (mode, length); | |||
1522 | } | |||
1523 | } | |||
1524 | while (1); | |||
1525 | } | |||
1526 | ||||
1527 | /* Derived class from op_by_pieces_d, providing support for block move | |||
1528 | operations. */ | |||
1529 | ||||
1530 | #ifdef PUSH_ROUNDING | |||
1531 | #define PUSHG_P(to)((to) == nullptr) ((to) == nullptr) | |||
1532 | #else | |||
1533 | #define PUSHG_P(to)((to) == nullptr) false | |||
1534 | #endif | |||
1535 | ||||
1536 | class move_by_pieces_d : public op_by_pieces_d | |||
1537 | { | |||
1538 | insn_gen_fn m_gen_fun; | |||
1539 | void generate (rtx, rtx, machine_mode) final override; | |||
1540 | bool prepare_mode (machine_mode, unsigned int) final override; | |||
1541 | ||||
1542 | public: | |||
1543 | move_by_pieces_d (rtx to, rtx from, unsigned HOST_WIDE_INTlong len, | |||
1544 | unsigned int align) | |||
1545 | : op_by_pieces_d (MOVE_MAX_PIECES((((global_options.x_ix86_isa_flags & (1UL << 15)) != 0) && (global_options.x_ix86_move_max == PVW_AVX512 || global_options.x_ix86_store_max == PVW_AVX512)) ? 64 : ((((global_options .x_ix86_isa_flags & (1UL << 8)) != 0) && (global_options .x_ix86_move_max >= PVW_AVX256 || global_options.x_ix86_store_max >= PVW_AVX256)) ? 32 : ((((global_options.x_ix86_isa_flags & (1UL << 51)) != 0) && ix86_tune_features [X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL] && ix86_tune_features [X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL]) ? 16 : (((global_options .x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)))), to, false, from, true, NULLnullptr, | |||
1546 | NULLnullptr, len, align, PUSHG_P (to)((to) == nullptr)) | |||
1547 | { | |||
1548 | } | |||
1549 | rtx finish_retmode (memop_ret); | |||
1550 | }; | |||
1551 | ||||
1552 | /* Return true if MODE can be used for a set of copies, given an | |||
1553 | alignment ALIGN. Prepare whatever data is necessary for later | |||
1554 | calls to generate. */ | |||
1555 | ||||
1556 | bool | |||
1557 | move_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align) | |||
1558 | { | |||
1559 | insn_code icode = optab_handler (mov_optab, mode); | |||
1560 | m_gen_fun = GEN_FCN (icode)(insn_data[icode].genfun); | |||
1561 | return icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode)get_mode_alignment (mode); | |||
1562 | } | |||
1563 | ||||
1564 | /* A callback used when iterating for a compare_by_pieces_operation. | |||
1565 | OP0 and OP1 are the values that have been loaded and should be | |||
1566 | compared in MODE. If OP0 is NULL, this means we should generate a | |||
1567 | push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn | |||
1568 | gen function that should be used to generate the mode. */ | |||
1569 | ||||
1570 | void | |||
1571 | move_by_pieces_d::generate (rtx op0, rtx op1, | |||
1572 | machine_mode mode ATTRIBUTE_UNUSED__attribute__ ((__unused__))) | |||
1573 | { | |||
1574 | #ifdef PUSH_ROUNDING | |||
1575 | if (op0 == NULL_RTX(rtx) 0) | |||
1576 | { | |||
1577 | emit_single_push_insn (mode, op1, NULLnullptr); | |||
1578 | return; | |||
1579 | } | |||
1580 | #endif | |||
1581 | emit_insn (m_gen_fun (op0, op1)); | |||
1582 | } | |||
1583 | ||||
1584 | /* Perform the final adjustment at the end of a string to obtain the | |||
1585 | correct return value for the block operation. | |||
1586 | Return value is based on RETMODE argument. */ | |||
1587 | ||||
1588 | rtx | |||
1589 | move_by_pieces_d::finish_retmode (memop_ret retmode) | |||
1590 | { | |||
1591 | gcc_assert (!m_reverse)((void)(!(!m_reverse) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1591, __FUNCTION__), 0 : 0)); | |||
1592 | if (retmode == RETURN_END_MINUS_ONE) | |||
1593 | { | |||
1594 | m_to.maybe_postinc (-1); | |||
1595 | --m_offset; | |||
1596 | } | |||
1597 | return m_to.adjust (QImode(scalar_int_mode ((scalar_int_mode::from_int) E_QImode)), m_offset); | |||
1598 | } | |||
1599 | ||||
1600 | /* Generate several move instructions to copy LEN bytes from block FROM to | |||
1601 | block TO. (These are MEM rtx's with BLKmode). | |||
1602 | ||||
1603 | If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is | |||
1604 | used to push FROM to the stack. | |||
1605 | ||||
1606 | ALIGN is maximum stack alignment we can assume. | |||
1607 | ||||
1608 | Return value is based on RETMODE argument. */ | |||
1609 | ||||
1610 | rtx | |||
1611 | move_by_pieces (rtx to, rtx from, unsigned HOST_WIDE_INTlong len, | |||
1612 | unsigned int align, memop_ret retmode) | |||
1613 | { | |||
1614 | #ifndef PUSH_ROUNDING | |||
1615 | if (to == NULLnullptr) | |||
1616 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1616, __FUNCTION__)); | |||
1617 | #endif | |||
1618 | ||||
1619 | move_by_pieces_d data (to, from, len, align); | |||
1620 | ||||
1621 | data.run (); | |||
1622 | ||||
1623 | if (retmode != RETURN_BEGIN) | |||
1624 | return data.finish_retmode (retmode); | |||
1625 | else | |||
1626 | return to; | |||
1627 | } | |||
1628 | ||||
1629 | /* Derived class from op_by_pieces_d, providing support for block move | |||
1630 | operations. */ | |||
1631 | ||||
1632 | class store_by_pieces_d : public op_by_pieces_d | |||
1633 | { | |||
1634 | insn_gen_fn m_gen_fun; | |||
1635 | void generate (rtx, rtx, machine_mode) final override; | |||
1636 | bool prepare_mode (machine_mode, unsigned int) final override; | |||
1637 | ||||
1638 | public: | |||
1639 | store_by_pieces_d (rtx to, by_pieces_constfn cfn, void *cfn_data, | |||
1640 | unsigned HOST_WIDE_INTlong len, unsigned int align, | |||
1641 | bool qi_vector_mode) | |||
1642 | : op_by_pieces_d (STORE_MAX_PIECES(ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES_TO_VEC] ? ((((global_options .x_ix86_isa_flags & (1UL << 15)) != 0) && global_options .x_ix86_store_max == PVW_AVX512) ? 64 : ((((global_options.x_ix86_isa_flags & (1UL << 8)) != 0) && global_options.x_ix86_store_max >= PVW_AVX256) ? 32 : ((((global_options.x_ix86_isa_flags & (1UL << 51)) != 0) && ix86_tune_features [X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL]) ? 16 : (((global_options .x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)))) : (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)), to, false, NULL_RTX(rtx) 0, true, cfn, | |||
1643 | cfn_data, len, align, false, qi_vector_mode) | |||
1644 | { | |||
1645 | } | |||
1646 | rtx finish_retmode (memop_ret); | |||
1647 | }; | |||
1648 | ||||
1649 | /* Return true if MODE can be used for a set of stores, given an | |||
1650 | alignment ALIGN. Prepare whatever data is necessary for later | |||
1651 | calls to generate. */ | |||
1652 | ||||
1653 | bool | |||
1654 | store_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align) | |||
1655 | { | |||
1656 | insn_code icode = optab_handler (mov_optab, mode); | |||
1657 | m_gen_fun = GEN_FCN (icode)(insn_data[icode].genfun); | |||
1658 | return icode != CODE_FOR_nothing && align >= GET_MODE_ALIGNMENT (mode)get_mode_alignment (mode); | |||
1659 | } | |||
1660 | ||||
1661 | /* A callback used when iterating for a store_by_pieces_operation. | |||
1662 | OP0 and OP1 are the values that have been loaded and should be | |||
1663 | compared in MODE. If OP0 is NULL, this means we should generate a | |||
1664 | push; otherwise EXTRA_DATA holds a pointer to a pointer to the insn | |||
1665 | gen function that should be used to generate the mode. */ | |||
1666 | ||||
1667 | void | |||
1668 | store_by_pieces_d::generate (rtx op0, rtx op1, machine_mode) | |||
1669 | { | |||
1670 | emit_insn (m_gen_fun (op0, op1)); | |||
1671 | } | |||
1672 | ||||
1673 | /* Perform the final adjustment at the end of a string to obtain the | |||
1674 | correct return value for the block operation. | |||
1675 | Return value is based on RETMODE argument. */ | |||
1676 | ||||
1677 | rtx | |||
1678 | store_by_pieces_d::finish_retmode (memop_ret retmode) | |||
1679 | { | |||
1680 | gcc_assert (!m_reverse)((void)(!(!m_reverse) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1680, __FUNCTION__), 0 : 0)); | |||
1681 | if (retmode == RETURN_END_MINUS_ONE) | |||
1682 | { | |||
1683 | m_to.maybe_postinc (-1); | |||
1684 | --m_offset; | |||
1685 | } | |||
1686 | return m_to.adjust (QImode(scalar_int_mode ((scalar_int_mode::from_int) E_QImode)), m_offset); | |||
1687 | } | |||
1688 | ||||
1689 | /* Determine whether the LEN bytes generated by CONSTFUN can be | |||
1690 | stored to memory using several move instructions. CONSTFUNDATA is | |||
1691 | a pointer which will be passed as argument in every CONSTFUN call. | |||
1692 | ALIGN is maximum alignment we can assume. MEMSETP is true if this is | |||
1693 | a memset operation and false if it's a copy of a constant string. | |||
1694 | Return nonzero if a call to store_by_pieces should succeed. */ | |||
1695 | ||||
1696 | int | |||
1697 | can_store_by_pieces (unsigned HOST_WIDE_INTlong len, | |||
1698 | by_pieces_constfn constfun, | |||
1699 | void *constfundata, unsigned int align, bool memsetp) | |||
1700 | { | |||
1701 | unsigned HOST_WIDE_INTlong l; | |||
1702 | unsigned int max_size; | |||
1703 | HOST_WIDE_INTlong offset = 0; | |||
1704 | enum insn_code icode; | |||
1705 | int reverse; | |||
1706 | /* cst is set but not used if LEGITIMATE_CONSTANT doesn't use it. */ | |||
1707 | rtx cst ATTRIBUTE_UNUSED__attribute__ ((__unused__)); | |||
1708 | ||||
1709 | if (len == 0) | |||
1710 | return 1; | |||
1711 | ||||
1712 | if (!targetm.use_by_pieces_infrastructure_p (len, align, | |||
1713 | memsetp | |||
1714 | ? SET_BY_PIECES | |||
1715 | : STORE_BY_PIECES, | |||
1716 | optimize_insn_for_speed_p ())) | |||
1717 | return 0; | |||
1718 | ||||
1719 | align = alignment_for_piecewise_move (STORE_MAX_PIECES(ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES_TO_VEC] ? ((((global_options .x_ix86_isa_flags & (1UL << 15)) != 0) && global_options .x_ix86_store_max == PVW_AVX512) ? 64 : ((((global_options.x_ix86_isa_flags & (1UL << 8)) != 0) && global_options.x_ix86_store_max >= PVW_AVX256) ? 32 : ((((global_options.x_ix86_isa_flags & (1UL << 51)) != 0) && ix86_tune_features [X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL]) ? 16 : (((global_options .x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)))) : (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)), align); | |||
1720 | ||||
1721 | /* We would first store what we can in the largest integer mode, then go to | |||
1722 | successively smaller modes. */ | |||
1723 | ||||
1724 | for (reverse = 0; | |||
1725 | reverse <= (HAVE_PRE_DECREMENT0 || HAVE_POST_DECREMENT0); | |||
1726 | reverse++) | |||
1727 | { | |||
1728 | l = len; | |||
1729 | max_size = STORE_MAX_PIECES(ix86_tune_features[X86_TUNE_INTER_UNIT_MOVES_TO_VEC] ? ((((global_options .x_ix86_isa_flags & (1UL << 15)) != 0) && global_options .x_ix86_store_max == PVW_AVX512) ? 64 : ((((global_options.x_ix86_isa_flags & (1UL << 8)) != 0) && global_options.x_ix86_store_max >= PVW_AVX256) ? 32 : ((((global_options.x_ix86_isa_flags & (1UL << 51)) != 0) && ix86_tune_features [X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL]) ? 16 : (((global_options .x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)))) : (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) + 1; | |||
1730 | while (max_size > 1 && l > 0) | |||
1731 | { | |||
1732 | fixed_size_mode mode | |||
1733 | = widest_fixed_size_mode_for_size (max_size, memsetp); | |||
1734 | ||||
1735 | icode = optab_handler (mov_optab, mode); | |||
1736 | if (icode != CODE_FOR_nothing | |||
1737 | && align >= GET_MODE_ALIGNMENT (mode)get_mode_alignment (mode)) | |||
1738 | { | |||
1739 | unsigned int size = GET_MODE_SIZE (mode); | |||
1740 | ||||
1741 | while (l >= size) | |||
1742 | { | |||
1743 | if (reverse) | |||
1744 | offset -= size; | |||
1745 | ||||
1746 | cst = (*constfun) (constfundata, nullptr, offset, mode); | |||
1747 | /* All CONST_VECTORs can be loaded for memset since | |||
1748 | vec_duplicate_optab is a precondition to pick a | |||
1749 | vector mode for the memset expander. */ | |||
1750 | if (!((memsetp && VECTOR_MODE_P (mode)(((enum mode_class) mode_class[mode]) == MODE_VECTOR_BOOL || ( (enum mode_class) mode_class[mode]) == MODE_VECTOR_INT || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_FLOAT || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_FRACT || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_UFRACT || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_ACCUM || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_UACCUM)) | |||
1751 | || targetm.legitimate_constant_p (mode, cst))) | |||
1752 | return 0; | |||
1753 | ||||
1754 | if (!reverse) | |||
1755 | offset += size; | |||
1756 | ||||
1757 | l -= size; | |||
1758 | } | |||
1759 | } | |||
1760 | ||||
1761 | max_size = GET_MODE_SIZE (mode); | |||
1762 | } | |||
1763 | ||||
1764 | /* The code above should have handled everything. */ | |||
1765 | gcc_assert (!l)((void)(!(!l) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1765, __FUNCTION__), 0 : 0)); | |||
1766 | } | |||
1767 | ||||
1768 | return 1; | |||
1769 | } | |||
1770 | ||||
1771 | /* Generate several move instructions to store LEN bytes generated by | |||
1772 | CONSTFUN to block TO. (A MEM rtx with BLKmode). CONSTFUNDATA is a | |||
1773 | pointer which will be passed as argument in every CONSTFUN call. | |||
1774 | ALIGN is maximum alignment we can assume. MEMSETP is true if this is | |||
1775 | a memset operation and false if it's a copy of a constant string. | |||
1776 | Return value is based on RETMODE argument. */ | |||
1777 | ||||
1778 | rtx | |||
1779 | store_by_pieces (rtx to, unsigned HOST_WIDE_INTlong len, | |||
1780 | by_pieces_constfn constfun, | |||
1781 | void *constfundata, unsigned int align, bool memsetp, | |||
1782 | memop_ret retmode) | |||
1783 | { | |||
1784 | if (len == 0) | |||
1785 | { | |||
1786 | gcc_assert (retmode != RETURN_END_MINUS_ONE)((void)(!(retmode != RETURN_END_MINUS_ONE) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1786, __FUNCTION__), 0 : 0)); | |||
1787 | return to; | |||
1788 | } | |||
1789 | ||||
1790 | gcc_assert (targetm.use_by_pieces_infrastructure_p((void)(!(targetm.use_by_pieces_infrastructure_p (len, align, memsetp ? SET_BY_PIECES : STORE_BY_PIECES, optimize_insn_for_speed_p ())) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1793, __FUNCTION__), 0 : 0)) | |||
1791 | (len, align,((void)(!(targetm.use_by_pieces_infrastructure_p (len, align, memsetp ? SET_BY_PIECES : STORE_BY_PIECES, optimize_insn_for_speed_p ())) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1793, __FUNCTION__), 0 : 0)) | |||
1792 | memsetp ? SET_BY_PIECES : STORE_BY_PIECES,((void)(!(targetm.use_by_pieces_infrastructure_p (len, align, memsetp ? SET_BY_PIECES : STORE_BY_PIECES, optimize_insn_for_speed_p ())) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1793, __FUNCTION__), 0 : 0)) | |||
1793 | optimize_insn_for_speed_p ()))((void)(!(targetm.use_by_pieces_infrastructure_p (len, align, memsetp ? SET_BY_PIECES : STORE_BY_PIECES, optimize_insn_for_speed_p ())) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1793, __FUNCTION__), 0 : 0)); | |||
1794 | ||||
1795 | store_by_pieces_d data (to, constfun, constfundata, len, align, | |||
1796 | memsetp); | |||
1797 | data.run (); | |||
1798 | ||||
1799 | if (retmode != RETURN_BEGIN) | |||
1800 | return data.finish_retmode (retmode); | |||
1801 | else | |||
1802 | return to; | |||
1803 | } | |||
1804 | ||||
1805 | /* Generate several move instructions to clear LEN bytes of block TO. (A MEM | |||
1806 | rtx with BLKmode). ALIGN is maximum alignment we can assume. */ | |||
1807 | ||||
1808 | static void | |||
1809 | clear_by_pieces (rtx to, unsigned HOST_WIDE_INTlong len, unsigned int align) | |||
1810 | { | |||
1811 | if (len == 0) | |||
1812 | return; | |||
1813 | ||||
1814 | /* Use builtin_memset_read_str to support vector mode broadcast. */ | |||
1815 | char c = 0; | |||
1816 | store_by_pieces_d data (to, builtin_memset_read_str, &c, len, align, | |||
1817 | true); | |||
1818 | data.run (); | |||
1819 | } | |||
1820 | ||||
1821 | /* Context used by compare_by_pieces_genfn. It stores the fail label | |||
1822 | to jump to in case of miscomparison, and for branch ratios greater than 1, | |||
1823 | it stores an accumulator and the current and maximum counts before | |||
1824 | emitting another branch. */ | |||
1825 | ||||
1826 | class compare_by_pieces_d : public op_by_pieces_d | |||
1827 | { | |||
1828 | rtx_code_label *m_fail_label; | |||
1829 | rtx m_accumulator; | |||
1830 | int m_count, m_batch; | |||
1831 | ||||
1832 | void generate (rtx, rtx, machine_mode) final override; | |||
1833 | bool prepare_mode (machine_mode, unsigned int) final override; | |||
1834 | void finish_mode (machine_mode) final override; | |||
1835 | public: | |||
1836 | compare_by_pieces_d (rtx op0, rtx op1, by_pieces_constfn op1_cfn, | |||
1837 | void *op1_cfn_data, HOST_WIDE_INTlong len, int align, | |||
1838 | rtx_code_label *fail_label) | |||
1839 | : op_by_pieces_d (COMPARE_MAX_PIECES((((global_options.x_ix86_isa_flags & (1UL << 15)) != 0) && (global_options.x_ix86_move_max == PVW_AVX512 || global_options.x_ix86_store_max == PVW_AVX512)) ? 64 : ((((global_options .x_ix86_isa_flags & (1UL << 8)) != 0) && (global_options .x_ix86_move_max >= PVW_AVX256 || global_options.x_ix86_store_max >= PVW_AVX256)) ? 32 : ((((global_options.x_ix86_isa_flags & (1UL << 51)) != 0) && ix86_tune_features [X86_TUNE_SSE_UNALIGNED_LOAD_OPTIMAL] && ix86_tune_features [X86_TUNE_SSE_UNALIGNED_STORE_OPTIMAL]) ? 16 : (((global_options .x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)))), op0, true, op1, true, op1_cfn, | |||
1840 | op1_cfn_data, len, align, false) | |||
1841 | { | |||
1842 | m_fail_label = fail_label; | |||
1843 | } | |||
1844 | }; | |||
1845 | ||||
1846 | /* A callback used when iterating for a compare_by_pieces_operation. | |||
1847 | OP0 and OP1 are the values that have been loaded and should be | |||
1848 | compared in MODE. DATA holds a pointer to the compare_by_pieces_data | |||
1849 | context structure. */ | |||
1850 | ||||
1851 | void | |||
1852 | compare_by_pieces_d::generate (rtx op0, rtx op1, machine_mode mode) | |||
1853 | { | |||
1854 | if (m_batch > 1) | |||
1855 | { | |||
1856 | rtx temp = expand_binop (mode, sub_optab, op0, op1, NULL_RTX(rtx) 0, | |||
1857 | true, OPTAB_LIB_WIDEN); | |||
1858 | if (m_count != 0) | |||
1859 | temp = expand_binop (mode, ior_optab, m_accumulator, temp, temp, | |||
1860 | true, OPTAB_LIB_WIDEN); | |||
1861 | m_accumulator = temp; | |||
1862 | ||||
1863 | if (++m_count < m_batch) | |||
1864 | return; | |||
1865 | ||||
1866 | m_count = 0; | |||
1867 | op0 = m_accumulator; | |||
1868 | op1 = const0_rtx(const_int_rtx[64]); | |||
1869 | m_accumulator = NULL_RTX(rtx) 0; | |||
1870 | } | |||
1871 | do_compare_rtx_and_jump (op0, op1, NE, true, mode, NULL_RTX(rtx) 0, NULLnullptr, | |||
1872 | m_fail_label, profile_probability::uninitialized ()); | |||
1873 | } | |||
1874 | ||||
1875 | /* Return true if MODE can be used for a set of moves and comparisons, | |||
1876 | given an alignment ALIGN. Prepare whatever data is necessary for | |||
1877 | later calls to generate. */ | |||
1878 | ||||
1879 | bool | |||
1880 | compare_by_pieces_d::prepare_mode (machine_mode mode, unsigned int align) | |||
1881 | { | |||
1882 | insn_code icode = optab_handler (mov_optab, mode); | |||
1883 | if (icode == CODE_FOR_nothing | |||
1884 | || align < GET_MODE_ALIGNMENT (mode)get_mode_alignment (mode) | |||
1885 | || !can_compare_p (EQ, mode, ccp_jump)) | |||
1886 | return false; | |||
1887 | m_batch = targetm.compare_by_pieces_branch_ratio (mode); | |||
1888 | if (m_batch < 0) | |||
1889 | return false; | |||
1890 | m_accumulator = NULL_RTX(rtx) 0; | |||
1891 | m_count = 0; | |||
1892 | return true; | |||
1893 | } | |||
1894 | ||||
1895 | /* Called after expanding a series of comparisons in MODE. If we have | |||
1896 | accumulated results for which we haven't emitted a branch yet, do | |||
1897 | so now. */ | |||
1898 | ||||
1899 | void | |||
1900 | compare_by_pieces_d::finish_mode (machine_mode mode) | |||
1901 | { | |||
1902 | if (m_accumulator != NULL_RTX(rtx) 0) | |||
1903 | do_compare_rtx_and_jump (m_accumulator, const0_rtx(const_int_rtx[64]), NE, true, mode, | |||
1904 | NULL_RTX(rtx) 0, NULLnullptr, m_fail_label, | |||
1905 | profile_probability::uninitialized ()); | |||
1906 | } | |||
1907 | ||||
1908 | /* Generate several move instructions to compare LEN bytes from blocks | |||
1909 | ARG0 and ARG1. (These are MEM rtx's with BLKmode). | |||
1910 | ||||
1911 | If PUSH_ROUNDING is defined and TO is NULL, emit_single_push_insn is | |||
1912 | used to push FROM to the stack. | |||
1913 | ||||
1914 | ALIGN is maximum stack alignment we can assume. | |||
1915 | ||||
1916 | Optionally, the caller can pass a constfn and associated data in A1_CFN | |||
1917 | and A1_CFN_DATA. describing that the second operand being compared is a | |||
1918 | known constant and how to obtain its data. */ | |||
1919 | ||||
1920 | static rtx | |||
1921 | compare_by_pieces (rtx arg0, rtx arg1, unsigned HOST_WIDE_INTlong len, | |||
1922 | rtx target, unsigned int align, | |||
1923 | by_pieces_constfn a1_cfn, void *a1_cfn_data) | |||
1924 | { | |||
1925 | rtx_code_label *fail_label = gen_label_rtx (); | |||
1926 | rtx_code_label *end_label = gen_label_rtx (); | |||
1927 | ||||
1928 | if (target == NULL_RTX(rtx) 0 | |||
1929 | || !REG_P (target)(((enum rtx_code) (target)->code) == REG) || REGNO (target)(rhs_regno(target)) < FIRST_PSEUDO_REGISTER76) | |||
1930 | target = gen_reg_rtx (TYPE_MODE (integer_type_node)((((enum tree_code) ((tree_class_check ((integer_types[itk_int ]), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1930, __FUNCTION__)))->base.code) == VECTOR_TYPE) ? vector_type_mode (integer_types[itk_int]) : (integer_types[itk_int])->type_common .mode)); | |||
1931 | ||||
1932 | compare_by_pieces_d data (arg0, arg1, a1_cfn, a1_cfn_data, len, align, | |||
1933 | fail_label); | |||
1934 | ||||
1935 | data.run (); | |||
1936 | ||||
1937 | emit_move_insn (target, const0_rtx(const_int_rtx[64])); | |||
1938 | emit_jump (end_label); | |||
1939 | emit_barrier (); | |||
1940 | emit_label (fail_label); | |||
1941 | emit_move_insn (target, const1_rtx(const_int_rtx[64 +1])); | |||
1942 | emit_label (end_label); | |||
1943 | ||||
1944 | return target; | |||
1945 | } | |||
1946 | ||||
1947 | /* Emit code to move a block Y to a block X. This may be done with | |||
1948 | string-move instructions, with multiple scalar move instructions, | |||
1949 | or with a library call. | |||
1950 | ||||
1951 | Both X and Y must be MEM rtx's (perhaps inside VOLATILE) with mode BLKmode. | |||
1952 | SIZE is an rtx that says how long they are. | |||
1953 | ALIGN is the maximum alignment we can assume they have. | |||
1954 | METHOD describes what kind of copy this is, and what mechanisms may be used. | |||
1955 | MIN_SIZE is the minimal size of block to move | |||
1956 | MAX_SIZE is the maximal size of block to move, if it cannot be represented | |||
1957 | in unsigned HOST_WIDE_INT, than it is mask of all ones. | |||
1958 | ||||
1959 | Return the address of the new block, if memcpy is called and returns it, | |||
1960 | 0 otherwise. */ | |||
1961 | ||||
1962 | rtx | |||
1963 | emit_block_move_hints (rtx x, rtx y, rtx size, enum block_op_methods method, | |||
1964 | unsigned int expected_align, HOST_WIDE_INTlong expected_size, | |||
1965 | unsigned HOST_WIDE_INTlong min_size, | |||
1966 | unsigned HOST_WIDE_INTlong max_size, | |||
1967 | unsigned HOST_WIDE_INTlong probable_max_size, | |||
1968 | bool bail_out_libcall, bool *is_move_done, | |||
1969 | bool might_overlap) | |||
1970 | { | |||
1971 | int may_use_call; | |||
1972 | rtx retval = 0; | |||
1973 | unsigned int align; | |||
1974 | ||||
1975 | if (is_move_done) | |||
1976 | *is_move_done = true; | |||
1977 | ||||
1978 | gcc_assert (size)((void)(!(size) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 1978, __FUNCTION__), 0 : 0)); | |||
1979 | if (CONST_INT_P (size)(((enum rtx_code) (size)->code) == CONST_INT) && INTVAL (size)((size)->u.hwint[0]) == 0) | |||
1980 | return 0; | |||
1981 | ||||
1982 | switch (method) | |||
1983 | { | |||
1984 | case BLOCK_OP_NORMAL: | |||
1985 | case BLOCK_OP_TAILCALL: | |||
1986 | may_use_call = 1; | |||
1987 | break; | |||
1988 | ||||
1989 | case BLOCK_OP_CALL_PARM: | |||
1990 | may_use_call = block_move_libcall_safe_for_call_parm (); | |||
1991 | ||||
1992 | /* Make inhibit_defer_pop nonzero around the library call | |||
1993 | to force it to pop the arguments right away. */ | |||
1994 | NO_DEFER_POP(((&x_rtl)->expr.x_inhibit_defer_pop) += 1); | |||
1995 | break; | |||
1996 | ||||
1997 | case BLOCK_OP_NO_LIBCALL: | |||
1998 | may_use_call = 0; | |||
1999 | break; | |||
2000 | ||||
2001 | case BLOCK_OP_NO_LIBCALL_RET: | |||
2002 | may_use_call = -1; | |||
2003 | break; | |||
2004 | ||||
2005 | default: | |||
2006 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2006, __FUNCTION__)); | |||
2007 | } | |||
2008 | ||||
2009 | gcc_assert (MEM_P (x) && MEM_P (y))((void)(!((((enum rtx_code) (x)->code) == MEM) && ( ((enum rtx_code) (y)->code) == MEM)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2009, __FUNCTION__), 0 : 0)); | |||
2010 | align = MIN (MEM_ALIGN (x), MEM_ALIGN (y))(((get_mem_attrs (x)->align)) < ((get_mem_attrs (y)-> align)) ? ((get_mem_attrs (x)->align)) : ((get_mem_attrs ( y)->align))); | |||
2011 | gcc_assert (align >= BITS_PER_UNIT)((void)(!(align >= (8)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2011, __FUNCTION__), 0 : 0)); | |||
2012 | ||||
2013 | /* Make sure we've got BLKmode addresses; store_one_arg can decide that | |||
2014 | block copy is more efficient for other large modes, e.g. DCmode. */ | |||
2015 | x = adjust_address (x, BLKmode, 0)adjust_address_1 (x, ((void) 0, E_BLKmode), 0, 1, 1, 0, 0); | |||
2016 | y = adjust_address (y, BLKmode, 0)adjust_address_1 (y, ((void) 0, E_BLKmode), 0, 1, 1, 0, 0); | |||
2017 | ||||
2018 | /* If source and destination are the same, no need to copy anything. */ | |||
2019 | if (rtx_equal_p (x, y) | |||
2020 | && !MEM_VOLATILE_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != MEM && ((enum rtx_code ) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code ) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2020, __FUNCTION__); _rtx; })->volatil) | |||
2021 | && !MEM_VOLATILE_P (y)(__extension__ ({ __typeof ((y)) const _rtx = ((y)); if (((enum rtx_code) (_rtx)->code) != MEM && ((enum rtx_code ) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code ) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2021, __FUNCTION__); _rtx; })->volatil)) | |||
2022 | return 0; | |||
2023 | ||||
2024 | /* Set MEM_SIZE as appropriate for this block copy. The main place this | |||
2025 | can be incorrect is coming from __builtin_memcpy. */ | |||
2026 | poly_int64 const_size; | |||
2027 | if (poly_int_rtx_p (size, &const_size)) | |||
2028 | { | |||
2029 | x = shallow_copy_rtx (x); | |||
2030 | y = shallow_copy_rtx (y); | |||
2031 | set_mem_size (x, const_size); | |||
2032 | set_mem_size (y, const_size); | |||
2033 | } | |||
2034 | ||||
2035 | bool pieces_ok = CONST_INT_P (size)(((enum rtx_code) (size)->code) == CONST_INT) | |||
2036 | && can_move_by_pieces (INTVAL (size)((size)->u.hwint[0]), align); | |||
2037 | bool pattern_ok = false; | |||
2038 | ||||
2039 | if (!pieces_ok || might_overlap) | |||
2040 | { | |||
2041 | pattern_ok | |||
2042 | = emit_block_move_via_pattern (x, y, size, align, | |||
2043 | expected_align, expected_size, | |||
2044 | min_size, max_size, probable_max_size, | |||
2045 | might_overlap); | |||
2046 | if (!pattern_ok && might_overlap) | |||
2047 | { | |||
2048 | /* Do not try any of the other methods below as they are not safe | |||
2049 | for overlapping moves. */ | |||
2050 | *is_move_done = false; | |||
2051 | return retval; | |||
2052 | } | |||
2053 | } | |||
2054 | ||||
2055 | if (pattern_ok) | |||
2056 | ; | |||
2057 | else if (pieces_ok) | |||
2058 | move_by_pieces (x, y, INTVAL (size)((size)->u.hwint[0]), align, RETURN_BEGIN); | |||
2059 | else if (may_use_call && !might_overlap | |||
2060 | && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (x))(((get_mem_attrs (x)->addrspace)) == 0) | |||
2061 | && ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (y))(((get_mem_attrs (y)->addrspace)) == 0)) | |||
2062 | { | |||
2063 | if (bail_out_libcall) | |||
2064 | { | |||
2065 | if (is_move_done) | |||
2066 | *is_move_done = false; | |||
2067 | return retval; | |||
2068 | } | |||
2069 | ||||
2070 | if (may_use_call < 0) | |||
2071 | return pc_rtx; | |||
2072 | ||||
2073 | retval = emit_block_copy_via_libcall (x, y, size, | |||
2074 | method == BLOCK_OP_TAILCALL); | |||
2075 | } | |||
2076 | else if (might_overlap) | |||
2077 | *is_move_done = false; | |||
2078 | else | |||
2079 | emit_block_move_via_loop (x, y, size, align); | |||
2080 | ||||
2081 | if (method == BLOCK_OP_CALL_PARM) | |||
2082 | OK_DEFER_POP(((&x_rtl)->expr.x_inhibit_defer_pop) -= 1); | |||
2083 | ||||
2084 | return retval; | |||
2085 | } | |||
2086 | ||||
2087 | rtx | |||
2088 | emit_block_move (rtx x, rtx y, rtx size, enum block_op_methods method) | |||
2089 | { | |||
2090 | unsigned HOST_WIDE_INTlong max, min = 0; | |||
2091 | if (GET_CODE (size)((enum rtx_code) (size)->code) == CONST_INT) | |||
2092 | min = max = UINTVAL (size)((unsigned long) ((size)->u.hwint[0])); | |||
2093 | else | |||
2094 | max = GET_MODE_MASK (GET_MODE (size))mode_mask_array[((machine_mode) (size)->mode)]; | |||
2095 | return emit_block_move_hints (x, y, size, method, 0, -1, | |||
2096 | min, max, max); | |||
2097 | } | |||
2098 | ||||
2099 | /* A subroutine of emit_block_move. Returns true if calling the | |||
2100 | block move libcall will not clobber any parameters which may have | |||
2101 | already been placed on the stack. */ | |||
2102 | ||||
2103 | static bool | |||
2104 | block_move_libcall_safe_for_call_parm (void) | |||
2105 | { | |||
2106 | tree fn; | |||
2107 | ||||
2108 | /* If arguments are pushed on the stack, then they're safe. */ | |||
2109 | if (targetm.calls.push_argument (0)) | |||
2110 | return true; | |||
2111 | ||||
2112 | /* If registers go on the stack anyway, any argument is sure to clobber | |||
2113 | an outgoing argument. */ | |||
2114 | #if defined (REG_PARM_STACK_SPACE) | |||
2115 | fn = builtin_decl_implicit (BUILT_IN_MEMCPY); | |||
2116 | /* Avoid set but not used warning if *REG_PARM_STACK_SPACE doesn't | |||
2117 | depend on its argument. */ | |||
2118 | (void) fn; | |||
2119 | if (OUTGOING_REG_PARM_STACK_SPACE ((!fn ? NULL_TREE : TREE_TYPE (fn)))(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) && ix86_function_type_abi ((!fn ? (tree) nullptr : ((contains_struct_check ((fn), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2119, __FUNCTION__))->typed.type))) == MS_ABI) | |||
2120 | && REG_PARM_STACK_SPACE (fn)ix86_reg_parm_stack_space (fn) != 0) | |||
2121 | return false; | |||
2122 | #endif | |||
2123 | ||||
2124 | /* If any argument goes in memory, then it might clobber an outgoing | |||
2125 | argument. */ | |||
2126 | { | |||
2127 | CUMULATIVE_ARGS args_so_far_v; | |||
2128 | cumulative_args_t args_so_far; | |||
2129 | tree arg; | |||
2130 | ||||
2131 | fn = builtin_decl_implicit (BUILT_IN_MEMCPY); | |||
2132 | INIT_CUMULATIVE_ARGS (args_so_far_v, TREE_TYPE (fn), NULL_RTX, 0, 3)init_cumulative_args (&(args_so_far_v), (((contains_struct_check ((fn), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2132, __FUNCTION__))->typed.type)), ((rtx) 0), (0), (3) != -1); | |||
2133 | args_so_far = pack_cumulative_args (&args_so_far_v); | |||
2134 | ||||
2135 | arg = TYPE_ARG_TYPES (TREE_TYPE (fn))((tree_check2 ((((contains_struct_check ((fn), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2135, __FUNCTION__))->typed.type)), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2135, __FUNCTION__, (FUNCTION_TYPE), (METHOD_TYPE)))->type_non_common .values); | |||
2136 | for ( ; arg != void_list_nodeglobal_trees[TI_VOID_LIST_NODE] ; arg = TREE_CHAIN (arg)((contains_struct_check ((arg), (TS_COMMON), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2136, __FUNCTION__))->common.chain)) | |||
2137 | { | |||
2138 | machine_mode mode = TYPE_MODE (TREE_VALUE (arg))((((enum tree_code) ((tree_class_check ((((tree_check ((arg), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2138, __FUNCTION__, (TREE_LIST)))->list.value)), (tcc_type ), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2138, __FUNCTION__)))->base.code) == VECTOR_TYPE) ? vector_type_mode (((tree_check ((arg), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2138, __FUNCTION__, (TREE_LIST)))->list.value)) : (((tree_check ((arg), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2138, __FUNCTION__, (TREE_LIST)))->list.value))->type_common .mode); | |||
2139 | function_arg_info arg_info (mode, /*named=*/true); | |||
2140 | rtx tmp = targetm.calls.function_arg (args_so_far, arg_info); | |||
2141 | if (!tmp || !REG_P (tmp)(((enum rtx_code) (tmp)->code) == REG)) | |||
2142 | return false; | |||
2143 | if (targetm.calls.arg_partial_bytes (args_so_far, arg_info)) | |||
2144 | return false; | |||
2145 | targetm.calls.function_arg_advance (args_so_far, arg_info); | |||
2146 | } | |||
2147 | } | |||
2148 | return true; | |||
2149 | } | |||
2150 | ||||
2151 | /* A subroutine of emit_block_move. Expand a cpymem or movmem pattern; | |||
2152 | return true if successful. | |||
2153 | ||||
2154 | X is the destination of the copy or move. | |||
2155 | Y is the source of the copy or move. | |||
2156 | SIZE is the size of the block to be moved. | |||
2157 | ||||
2158 | MIGHT_OVERLAP indicates this originated with expansion of a | |||
2159 | builtin_memmove() and the source and destination blocks may | |||
2160 | overlap. | |||
2161 | */ | |||
2162 | ||||
2163 | static bool | |||
2164 | emit_block_move_via_pattern (rtx x, rtx y, rtx size, unsigned int align, | |||
2165 | unsigned int expected_align, | |||
2166 | HOST_WIDE_INTlong expected_size, | |||
2167 | unsigned HOST_WIDE_INTlong min_size, | |||
2168 | unsigned HOST_WIDE_INTlong max_size, | |||
2169 | unsigned HOST_WIDE_INTlong probable_max_size, | |||
2170 | bool might_overlap) | |||
2171 | { | |||
2172 | if (expected_align < align) | |||
2173 | expected_align = align; | |||
2174 | if (expected_size != -1) | |||
2175 | { | |||
2176 | if ((unsigned HOST_WIDE_INTlong)expected_size > probable_max_size) | |||
2177 | expected_size = probable_max_size; | |||
2178 | if ((unsigned HOST_WIDE_INTlong)expected_size < min_size) | |||
2179 | expected_size = min_size; | |||
2180 | } | |||
2181 | ||||
2182 | /* Since this is a move insn, we don't care about volatility. */ | |||
2183 | temporary_volatile_ok v (true); | |||
2184 | ||||
2185 | /* Try the most limited insn first, because there's no point | |||
2186 | including more than one in the machine description unless | |||
2187 | the more limited one has some advantage. */ | |||
2188 | ||||
2189 | opt_scalar_int_mode mode_iter; | |||
2190 | FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)for (mode_iterator::start (&(mode_iter), MODE_INT); mode_iterator ::iterate_p (&(mode_iter)); mode_iterator::get_next (& (mode_iter))) | |||
2191 | { | |||
2192 | scalar_int_mode mode = mode_iter.require (); | |||
2193 | enum insn_code code; | |||
2194 | if (might_overlap) | |||
2195 | code = direct_optab_handler (movmem_optab, mode); | |||
2196 | else | |||
2197 | code = direct_optab_handler (cpymem_optab, mode); | |||
2198 | ||||
2199 | if (code != CODE_FOR_nothing | |||
2200 | /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT | |||
2201 | here because if SIZE is less than the mode mask, as it is | |||
2202 | returned by the macro, it will definitely be less than the | |||
2203 | actual mode mask. Since SIZE is within the Pmode address | |||
2204 | space, we limit MODE to Pmode. */ | |||
2205 | && ((CONST_INT_P (size)(((enum rtx_code) (size)->code) == CONST_INT) | |||
2206 | && ((unsigned HOST_WIDE_INTlong) INTVAL (size)((size)->u.hwint[0]) | |||
2207 | <= (GET_MODE_MASK (mode)mode_mask_array[mode] >> 1))) | |||
2208 | || max_size <= (GET_MODE_MASK (mode)mode_mask_array[mode] >> 1) | |||
2209 | || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode)))))) | |||
2210 | { | |||
2211 | class expand_operand ops[9]; | |||
2212 | unsigned int nops; | |||
2213 | ||||
2214 | /* ??? When called via emit_block_move_for_call, it'd be | |||
2215 | nice if there were some way to inform the backend, so | |||
2216 | that it doesn't fail the expansion because it thinks | |||
2217 | emitting the libcall would be more efficient. */ | |||
2218 | nops = insn_data[(int) code].n_generator_args; | |||
2219 | gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9)((void)(!(nops == 4 || nops == 6 || nops == 8 || nops == 9) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2219, __FUNCTION__), 0 : 0)); | |||
2220 | ||||
2221 | create_fixed_operand (&ops[0], x); | |||
2222 | create_fixed_operand (&ops[1], y); | |||
2223 | /* The check above guarantees that this size conversion is valid. */ | |||
2224 | create_convert_operand_to (&ops[2], size, mode, true); | |||
2225 | create_integer_operand (&ops[3], align / BITS_PER_UNIT(8)); | |||
2226 | if (nops >= 6) | |||
2227 | { | |||
2228 | create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT(8)); | |||
2229 | create_integer_operand (&ops[5], expected_size); | |||
2230 | } | |||
2231 | if (nops >= 8) | |||
2232 | { | |||
2233 | create_integer_operand (&ops[6], min_size); | |||
2234 | /* If we cannot represent the maximal size, | |||
2235 | make parameter NULL. */ | |||
2236 | if ((HOST_WIDE_INTlong) max_size != -1) | |||
2237 | create_integer_operand (&ops[7], max_size); | |||
2238 | else | |||
2239 | create_fixed_operand (&ops[7], NULLnullptr); | |||
2240 | } | |||
2241 | if (nops == 9) | |||
2242 | { | |||
2243 | /* If we cannot represent the maximal size, | |||
2244 | make parameter NULL. */ | |||
2245 | if ((HOST_WIDE_INTlong) probable_max_size != -1) | |||
2246 | create_integer_operand (&ops[8], probable_max_size); | |||
2247 | else | |||
2248 | create_fixed_operand (&ops[8], NULLnullptr); | |||
2249 | } | |||
2250 | if (maybe_expand_insn (code, nops, ops)) | |||
2251 | return true; | |||
2252 | } | |||
2253 | } | |||
2254 | ||||
2255 | return false; | |||
2256 | } | |||
2257 | ||||
2258 | /* A subroutine of emit_block_move. Copy the data via an explicit | |||
2259 | loop. This is used only when libcalls are forbidden. */ | |||
2260 | /* ??? It'd be nice to copy in hunks larger than QImode. */ | |||
2261 | ||||
2262 | static void | |||
2263 | emit_block_move_via_loop (rtx x, rtx y, rtx size, | |||
2264 | unsigned int align ATTRIBUTE_UNUSED__attribute__ ((__unused__))) | |||
2265 | { | |||
2266 | rtx_code_label *cmp_label, *top_label; | |||
2267 | rtx iter, x_addr, y_addr, tmp; | |||
2268 | machine_mode x_addr_mode = get_address_mode (x); | |||
2269 | machine_mode y_addr_mode = get_address_mode (y); | |||
2270 | machine_mode iter_mode; | |||
2271 | ||||
2272 | iter_mode = GET_MODE (size)((machine_mode) (size)->mode); | |||
2273 | if (iter_mode == VOIDmode((void) 0, E_VOIDmode)) | |||
2274 | iter_mode = word_mode; | |||
2275 | ||||
2276 | top_label = gen_label_rtx (); | |||
2277 | cmp_label = gen_label_rtx (); | |||
2278 | iter = gen_reg_rtx (iter_mode); | |||
2279 | ||||
2280 | emit_move_insn (iter, const0_rtx(const_int_rtx[64])); | |||
2281 | ||||
2282 | x_addr = force_operand (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), NULL_RTX(rtx) 0); | |||
2283 | y_addr = force_operand (XEXP (y, 0)(((y)->u.fld[0]).rt_rtx), NULL_RTX(rtx) 0); | |||
2284 | do_pending_stack_adjust (); | |||
2285 | ||||
2286 | emit_jump (cmp_label); | |||
2287 | emit_label (top_label); | |||
2288 | ||||
2289 | tmp = convert_modes (x_addr_mode, iter_mode, iter, true); | |||
2290 | x_addr = simplify_gen_binary (PLUS, x_addr_mode, x_addr, tmp); | |||
2291 | ||||
2292 | if (x_addr_mode != y_addr_mode) | |||
2293 | tmp = convert_modes (y_addr_mode, iter_mode, iter, true); | |||
2294 | y_addr = simplify_gen_binary (PLUS, y_addr_mode, y_addr, tmp); | |||
2295 | ||||
2296 | x = change_address (x, QImode(scalar_int_mode ((scalar_int_mode::from_int) E_QImode)), x_addr); | |||
2297 | y = change_address (y, QImode(scalar_int_mode ((scalar_int_mode::from_int) E_QImode)), y_addr); | |||
2298 | ||||
2299 | emit_move_insn (x, y); | |||
2300 | ||||
2301 | tmp = expand_simple_binop (iter_mode, PLUS, iter, const1_rtx(const_int_rtx[64 +1]), iter, | |||
2302 | true, OPTAB_LIB_WIDEN); | |||
2303 | if (tmp != iter) | |||
2304 | emit_move_insn (iter, tmp); | |||
2305 | ||||
2306 | emit_label (cmp_label); | |||
2307 | ||||
2308 | emit_cmp_and_jump_insns (iter, size, LT, NULL_RTX(rtx) 0, iter_mode, | |||
2309 | true, top_label, | |||
2310 | profile_probability::guessed_always () | |||
2311 | .apply_scale (9, 10)); | |||
2312 | } | |||
2313 | ||||
2314 | /* Expand a call to memcpy or memmove or memcmp, and return the result. | |||
2315 | TAILCALL is true if this is a tail call. */ | |||
2316 | ||||
2317 | rtx | |||
2318 | emit_block_op_via_libcall (enum built_in_function fncode, rtx dst, rtx src, | |||
2319 | rtx size, bool tailcall) | |||
2320 | { | |||
2321 | rtx dst_addr, src_addr; | |||
2322 | tree call_expr, dst_tree, src_tree, size_tree; | |||
2323 | machine_mode size_mode; | |||
2324 | ||||
2325 | /* Since dst and src are passed to a libcall, mark the corresponding | |||
2326 | tree EXPR as addressable. */ | |||
2327 | tree dst_expr = MEM_EXPR (dst)(get_mem_attrs (dst)->expr); | |||
2328 | tree src_expr = MEM_EXPR (src)(get_mem_attrs (src)->expr); | |||
2329 | if (dst_expr) | |||
2330 | mark_addressable (dst_expr); | |||
2331 | if (src_expr) | |||
2332 | mark_addressable (src_expr); | |||
2333 | ||||
2334 | dst_addr = copy_addr_to_reg (XEXP (dst, 0)(((dst)->u.fld[0]).rt_rtx)); | |||
2335 | dst_addr = convert_memory_address (ptr_mode, dst_addr)convert_memory_address_addr_space ((ptr_mode), (dst_addr), 0); | |||
2336 | dst_tree = make_tree (ptr_type_nodeglobal_trees[TI_PTR_TYPE], dst_addr); | |||
2337 | ||||
2338 | src_addr = copy_addr_to_reg (XEXP (src, 0)(((src)->u.fld[0]).rt_rtx)); | |||
2339 | src_addr = convert_memory_address (ptr_mode, src_addr)convert_memory_address_addr_space ((ptr_mode), (src_addr), 0); | |||
2340 | src_tree = make_tree (ptr_type_nodeglobal_trees[TI_PTR_TYPE], src_addr); | |||
2341 | ||||
2342 | size_mode = TYPE_MODE (sizetype)((((enum tree_code) ((tree_class_check ((sizetype_tab[(int) stk_sizetype ]), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2342, __FUNCTION__)))->base.code) == VECTOR_TYPE) ? vector_type_mode (sizetype_tab[(int) stk_sizetype]) : (sizetype_tab[(int) stk_sizetype ])->type_common.mode); | |||
2343 | size = convert_to_mode (size_mode, size, 1); | |||
2344 | size = copy_to_mode_reg (size_mode, size); | |||
2345 | size_tree = make_tree (sizetypesizetype_tab[(int) stk_sizetype], size); | |||
2346 | ||||
2347 | /* It is incorrect to use the libcall calling conventions for calls to | |||
2348 | memcpy/memmove/memcmp because they can be provided by the user. */ | |||
2349 | tree fn = builtin_decl_implicit (fncode); | |||
2350 | call_expr = build_call_expr (fn, 3, dst_tree, src_tree, size_tree); | |||
2351 | CALL_EXPR_TAILCALL (call_expr)((tree_check ((call_expr), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2351, __FUNCTION__, (CALL_EXPR)))->base.addressable_flag ) = tailcall; | |||
2352 | ||||
2353 | return expand_call (call_expr, NULL_RTX(rtx) 0, false); | |||
2354 | } | |||
2355 | ||||
2356 | /* Try to expand cmpstrn or cmpmem operation ICODE with the given operands. | |||
2357 | ARG3_TYPE is the type of ARG3_RTX. Return the result rtx on success, | |||
2358 | otherwise return null. */ | |||
2359 | ||||
2360 | rtx | |||
2361 | expand_cmpstrn_or_cmpmem (insn_code icode, rtx target, rtx arg1_rtx, | |||
2362 | rtx arg2_rtx, tree arg3_type, rtx arg3_rtx, | |||
2363 | HOST_WIDE_INTlong align) | |||
2364 | { | |||
2365 | machine_mode insn_mode = insn_data[icode].operand[0].mode; | |||
2366 | ||||
2367 | if (target && (!REG_P (target)(((enum rtx_code) (target)->code) == REG) || HARD_REGISTER_P (target)((((rhs_regno(target))) < 76)))) | |||
2368 | target = NULL_RTX(rtx) 0; | |||
2369 | ||||
2370 | class expand_operand ops[5]; | |||
2371 | create_output_operand (&ops[0], target, insn_mode); | |||
2372 | create_fixed_operand (&ops[1], arg1_rtx); | |||
2373 | create_fixed_operand (&ops[2], arg2_rtx); | |||
2374 | create_convert_operand_from (&ops[3], arg3_rtx, TYPE_MODE (arg3_type)((((enum tree_code) ((tree_class_check ((arg3_type), (tcc_type ), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2374, __FUNCTION__)))->base.code) == VECTOR_TYPE) ? vector_type_mode (arg3_type) : (arg3_type)->type_common.mode), | |||
2375 | TYPE_UNSIGNED (arg3_type)((tree_class_check ((arg3_type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2375, __FUNCTION__))->base.u.bits.unsigned_flag)); | |||
2376 | create_integer_operand (&ops[4], align); | |||
2377 | if (maybe_expand_insn (icode, 5, ops)) | |||
2378 | return ops[0].value; | |||
2379 | return NULL_RTX(rtx) 0; | |||
2380 | } | |||
2381 | ||||
2382 | /* Expand a block compare between X and Y with length LEN using the | |||
2383 | cmpmem optab, placing the result in TARGET. LEN_TYPE is the type | |||
2384 | of the expression that was used to calculate the length. ALIGN | |||
2385 | gives the known minimum common alignment. */ | |||
2386 | ||||
2387 | static rtx | |||
2388 | emit_block_cmp_via_cmpmem (rtx x, rtx y, rtx len, tree len_type, rtx target, | |||
2389 | unsigned align) | |||
2390 | { | |||
2391 | /* Note: The cmpstrnsi pattern, if it exists, is not suitable for | |||
2392 | implementing memcmp because it will stop if it encounters two | |||
2393 | zero bytes. */ | |||
2394 | insn_code icode = direct_optab_handler (cmpmem_optab, SImode(scalar_int_mode ((scalar_int_mode::from_int) E_SImode))); | |||
2395 | ||||
2396 | if (icode == CODE_FOR_nothing) | |||
2397 | return NULL_RTX(rtx) 0; | |||
2398 | ||||
2399 | return expand_cmpstrn_or_cmpmem (icode, target, x, y, len_type, len, align); | |||
2400 | } | |||
2401 | ||||
2402 | /* Emit code to compare a block Y to a block X. This may be done with | |||
2403 | string-compare instructions, with multiple scalar instructions, | |||
2404 | or with a library call. | |||
2405 | ||||
2406 | Both X and Y must be MEM rtx's. LEN is an rtx that says how long | |||
2407 | they are. LEN_TYPE is the type of the expression that was used to | |||
2408 | calculate it. | |||
2409 | ||||
2410 | If EQUALITY_ONLY is true, it means we don't have to return the tri-state | |||
2411 | value of a normal memcmp call, instead we can just compare for equality. | |||
2412 | If FORCE_LIBCALL is true, we should emit a call to memcmp rather than | |||
2413 | returning NULL_RTX. | |||
2414 | ||||
2415 | Optionally, the caller can pass a constfn and associated data in Y_CFN | |||
2416 | and Y_CFN_DATA. describing that the second operand being compared is a | |||
2417 | known constant and how to obtain its data. | |||
2418 | Return the result of the comparison, or NULL_RTX if we failed to | |||
2419 | perform the operation. */ | |||
2420 | ||||
2421 | rtx | |||
2422 | emit_block_cmp_hints (rtx x, rtx y, rtx len, tree len_type, rtx target, | |||
2423 | bool equality_only, by_pieces_constfn y_cfn, | |||
2424 | void *y_cfndata) | |||
2425 | { | |||
2426 | rtx result = 0; | |||
2427 | ||||
2428 | if (CONST_INT_P (len)(((enum rtx_code) (len)->code) == CONST_INT) && INTVAL (len)((len)->u.hwint[0]) == 0) | |||
2429 | return const0_rtx(const_int_rtx[64]); | |||
2430 | ||||
2431 | gcc_assert (MEM_P (x) && MEM_P (y))((void)(!((((enum rtx_code) (x)->code) == MEM) && ( ((enum rtx_code) (y)->code) == MEM)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2431, __FUNCTION__), 0 : 0)); | |||
2432 | unsigned int align = MIN (MEM_ALIGN (x), MEM_ALIGN (y))(((get_mem_attrs (x)->align)) < ((get_mem_attrs (y)-> align)) ? ((get_mem_attrs (x)->align)) : ((get_mem_attrs ( y)->align))); | |||
2433 | gcc_assert (align >= BITS_PER_UNIT)((void)(!(align >= (8)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2433, __FUNCTION__), 0 : 0)); | |||
2434 | ||||
2435 | x = adjust_address (x, BLKmode, 0)adjust_address_1 (x, ((void) 0, E_BLKmode), 0, 1, 1, 0, 0); | |||
2436 | y = adjust_address (y, BLKmode, 0)adjust_address_1 (y, ((void) 0, E_BLKmode), 0, 1, 1, 0, 0); | |||
2437 | ||||
2438 | if (equality_only | |||
2439 | && CONST_INT_P (len)(((enum rtx_code) (len)->code) == CONST_INT) | |||
2440 | && can_do_by_pieces (INTVAL (len)((len)->u.hwint[0]), align, COMPARE_BY_PIECES)) | |||
2441 | result = compare_by_pieces (x, y, INTVAL (len)((len)->u.hwint[0]), target, align, | |||
2442 | y_cfn, y_cfndata); | |||
2443 | else | |||
2444 | result = emit_block_cmp_via_cmpmem (x, y, len, len_type, target, align); | |||
2445 | ||||
2446 | return result; | |||
2447 | } | |||
2448 | ||||
2449 | /* Copy all or part of a value X into registers starting at REGNO. | |||
2450 | The number of registers to be filled is NREGS. */ | |||
2451 | ||||
2452 | void | |||
2453 | move_block_to_reg (int regno, rtx x, int nregs, machine_mode mode) | |||
2454 | { | |||
2455 | if (nregs == 0) | |||
2456 | return; | |||
2457 | ||||
2458 | if (CONSTANT_P (x)((rtx_class[(int) (((enum rtx_code) (x)->code))]) == RTX_CONST_OBJ ) && !targetm.legitimate_constant_p (mode, x)) | |||
2459 | x = validize_mem (force_const_mem (mode, x)); | |||
2460 | ||||
2461 | /* See if the machine can do this with a load multiple insn. */ | |||
2462 | if (targetm.have_load_multiple ()) | |||
2463 | { | |||
2464 | rtx_insn *last = get_last_insn (); | |||
2465 | rtx first = gen_rtx_REG (word_mode, regno); | |||
2466 | if (rtx_insn *pat = targetm.gen_load_multiple (first, x, | |||
2467 | GEN_INT (nregs)gen_rtx_CONST_INT (((void) 0, E_VOIDmode), (nregs)))) | |||
2468 | { | |||
2469 | emit_insn (pat); | |||
2470 | return; | |||
2471 | } | |||
2472 | else | |||
2473 | delete_insns_since (last); | |||
2474 | } | |||
2475 | ||||
2476 | for (int i = 0; i < nregs; i++) | |||
2477 | emit_move_insn (gen_rtx_REG (word_mode, regno + i), | |||
2478 | operand_subword_force (x, i, mode)); | |||
2479 | } | |||
2480 | ||||
2481 | /* Copy all or part of a BLKmode value X out of registers starting at REGNO. | |||
2482 | The number of registers to be filled is NREGS. */ | |||
2483 | ||||
2484 | void | |||
2485 | move_block_from_reg (int regno, rtx x, int nregs) | |||
2486 | { | |||
2487 | if (nregs == 0) | |||
2488 | return; | |||
2489 | ||||
2490 | /* See if the machine can do this with a store multiple insn. */ | |||
2491 | if (targetm.have_store_multiple ()) | |||
2492 | { | |||
2493 | rtx_insn *last = get_last_insn (); | |||
2494 | rtx first = gen_rtx_REG (word_mode, regno); | |||
2495 | if (rtx_insn *pat = targetm.gen_store_multiple (x, first, | |||
2496 | GEN_INT (nregs)gen_rtx_CONST_INT (((void) 0, E_VOIDmode), (nregs)))) | |||
2497 | { | |||
2498 | emit_insn (pat); | |||
2499 | return; | |||
2500 | } | |||
2501 | else | |||
2502 | delete_insns_since (last); | |||
2503 | } | |||
2504 | ||||
2505 | for (int i = 0; i < nregs; i++) | |||
2506 | { | |||
2507 | rtx tem = operand_subword (x, i, 1, BLKmode((void) 0, E_BLKmode)); | |||
2508 | ||||
2509 | gcc_assert (tem)((void)(!(tem) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2509, __FUNCTION__), 0 : 0)); | |||
2510 | ||||
2511 | emit_move_insn (tem, gen_rtx_REG (word_mode, regno + i)); | |||
2512 | } | |||
2513 | } | |||
2514 | ||||
2515 | /* Generate a PARALLEL rtx for a new non-consecutive group of registers from | |||
2516 | ORIG, where ORIG is a non-consecutive group of registers represented by | |||
2517 | a PARALLEL. The clone is identical to the original except in that the | |||
2518 | original set of registers is replaced by a new set of pseudo registers. | |||
2519 | The new set has the same modes as the original set. */ | |||
2520 | ||||
2521 | rtx | |||
2522 | gen_group_rtx (rtx orig) | |||
2523 | { | |||
2524 | int i, length; | |||
2525 | rtx *tmps; | |||
2526 | ||||
2527 | gcc_assert (GET_CODE (orig) == PARALLEL)((void)(!(((enum rtx_code) (orig)->code) == PARALLEL) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2527, __FUNCTION__), 0 : 0)); | |||
2528 | ||||
2529 | length = XVECLEN (orig, 0)(((((orig)->u.fld[0]).rt_rtvec))->num_elem); | |||
2530 | tmps = XALLOCAVEC (rtx, length)((rtx *) __builtin_alloca(sizeof (rtx) * (length))); | |||
2531 | ||||
2532 | /* Skip a NULL entry in first slot. */ | |||
2533 | i = XEXP (XVECEXP (orig, 0, 0), 0)((((((((orig)->u.fld[0]).rt_rtvec))->elem[0]))->u.fld [0]).rt_rtx) ? 0 : 1; | |||
2534 | ||||
2535 | if (i) | |||
2536 | tmps[0] = 0; | |||
2537 | ||||
2538 | for (; i < length; i++) | |||
2539 | { | |||
2540 | machine_mode mode = GET_MODE (XEXP (XVECEXP (orig, 0, i), 0))((machine_mode) (((((((((orig)->u.fld[0]).rt_rtvec))->elem [i]))->u.fld[0]).rt_rtx))->mode); | |||
2541 | rtx offset = XEXP (XVECEXP (orig, 0, i), 1)((((((((orig)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld [1]).rt_rtx); | |||
2542 | ||||
2543 | tmps[i] = gen_rtx_EXPR_LIST (VOIDmode((void) 0, E_VOIDmode), gen_reg_rtx (mode), offset); | |||
2544 | } | |||
2545 | ||||
2546 | return gen_rtx_PARALLEL (GET_MODE (orig), gen_rtvec_v (length, tmps))gen_rtx_fmt_E_stat ((PARALLEL), ((((machine_mode) (orig)-> mode))), ((gen_rtvec_v (length, tmps))) ); | |||
2547 | } | |||
2548 | ||||
2549 | /* A subroutine of emit_group_load. Arguments as for emit_group_load, | |||
2550 | except that values are placed in TMPS[i], and must later be moved | |||
2551 | into corresponding XEXP (XVECEXP (DST, 0, i), 0) element. */ | |||
2552 | ||||
2553 | static void | |||
2554 | emit_group_load_1 (rtx *tmps, rtx dst, rtx orig_src, tree type, | |||
2555 | poly_int64 ssize) | |||
2556 | { | |||
2557 | rtx src; | |||
2558 | int start, i; | |||
2559 | machine_mode m = GET_MODE (orig_src)((machine_mode) (orig_src)->mode); | |||
2560 | ||||
2561 | gcc_assert (GET_CODE (dst) == PARALLEL)((void)(!(((enum rtx_code) (dst)->code) == PARALLEL) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2561, __FUNCTION__), 0 : 0)); | |||
2562 | ||||
2563 | if (m != VOIDmode((void) 0, E_VOIDmode) | |||
2564 | && !SCALAR_INT_MODE_P (m)(((enum mode_class) mode_class[m]) == MODE_INT || ((enum mode_class ) mode_class[m]) == MODE_PARTIAL_INT) | |||
2565 | && !MEM_P (orig_src)(((enum rtx_code) (orig_src)->code) == MEM) | |||
2566 | && GET_CODE (orig_src)((enum rtx_code) (orig_src)->code) != CONCAT) | |||
2567 | { | |||
2568 | scalar_int_mode imode; | |||
2569 | if (int_mode_for_mode (GET_MODE (orig_src)((machine_mode) (orig_src)->mode)).exists (&imode)) | |||
2570 | { | |||
2571 | src = gen_reg_rtx (imode); | |||
2572 | emit_move_insn (gen_lowpartrtl_hooks.gen_lowpart (GET_MODE (orig_src)((machine_mode) (orig_src)->mode), src), orig_src); | |||
2573 | } | |||
2574 | else | |||
2575 | { | |||
2576 | src = assign_stack_temp (GET_MODE (orig_src)((machine_mode) (orig_src)->mode), ssize); | |||
2577 | emit_move_insn (src, orig_src); | |||
2578 | } | |||
2579 | emit_group_load_1 (tmps, dst, src, type, ssize); | |||
2580 | return; | |||
2581 | } | |||
2582 | ||||
2583 | /* Check for a NULL entry, used to indicate that the parameter goes | |||
2584 | both on the stack and in registers. */ | |||
2585 | if (XEXP (XVECEXP (dst, 0, 0), 0)((((((((dst)->u.fld[0]).rt_rtvec))->elem[0]))->u.fld [0]).rt_rtx)) | |||
2586 | start = 0; | |||
2587 | else | |||
2588 | start = 1; | |||
2589 | ||||
2590 | /* Process the pieces. */ | |||
2591 | for (i = start; i < XVECLEN (dst, 0)(((((dst)->u.fld[0]).rt_rtvec))->num_elem); i++) | |||
2592 | { | |||
2593 | machine_mode mode = GET_MODE (XEXP (XVECEXP (dst, 0, i), 0))((machine_mode) (((((((((dst)->u.fld[0]).rt_rtvec))->elem [i]))->u.fld[0]).rt_rtx))->mode); | |||
2594 | poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (dst, 0, i), 1)((((((((dst)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld [1]).rt_rtx)); | |||
2595 | poly_int64 bytelen = GET_MODE_SIZE (mode); | |||
2596 | poly_int64 shift = 0; | |||
2597 | ||||
2598 | /* Handle trailing fragments that run over the size of the struct. | |||
2599 | It's the target's responsibility to make sure that the fragment | |||
2600 | cannot be strictly smaller in some cases and strictly larger | |||
2601 | in others. */ | |||
2602 | gcc_checking_assert (ordered_p (bytepos + bytelen, ssize))((void)(!(ordered_p (bytepos + bytelen, ssize)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2602, __FUNCTION__), 0 : 0)); | |||
2603 | if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize)maybe_lt (ssize, bytepos + bytelen)) | |||
2604 | { | |||
2605 | /* Arrange to shift the fragment to where it belongs. | |||
2606 | extract_bit_field loads to the lsb of the reg. */ | |||
2607 | if ( | |||
2608 | #ifdef BLOCK_REG_PADDING | |||
2609 | BLOCK_REG_PADDING (GET_MODE (orig_src)((machine_mode) (orig_src)->mode), type, i == start) | |||
2610 | == (BYTES_BIG_ENDIAN0 ? PAD_UPWARD : PAD_DOWNWARD) | |||
2611 | #else | |||
2612 | BYTES_BIG_ENDIAN0 | |||
2613 | #endif | |||
2614 | ) | |||
2615 | shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT(8); | |||
2616 | bytelen = ssize - bytepos; | |||
2617 | gcc_assert (maybe_gt (bytelen, 0))((void)(!(maybe_lt (0, bytelen)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2617, __FUNCTION__), 0 : 0)); | |||
2618 | } | |||
2619 | ||||
2620 | /* If we won't be loading directly from memory, protect the real source | |||
2621 | from strange tricks we might play; but make sure that the source can | |||
2622 | be loaded directly into the destination. */ | |||
2623 | src = orig_src; | |||
2624 | if (!MEM_P (orig_src)(((enum rtx_code) (orig_src)->code) == MEM) | |||
2625 | && (!CONSTANT_P (orig_src)((rtx_class[(int) (((enum rtx_code) (orig_src)->code))]) == RTX_CONST_OBJ) | |||
2626 | || (GET_MODE (orig_src)((machine_mode) (orig_src)->mode) != mode | |||
2627 | && GET_MODE (orig_src)((machine_mode) (orig_src)->mode) != VOIDmode((void) 0, E_VOIDmode)))) | |||
2628 | { | |||
2629 | if (GET_MODE (orig_src)((machine_mode) (orig_src)->mode) == VOIDmode((void) 0, E_VOIDmode)) | |||
2630 | src = gen_reg_rtx (mode); | |||
2631 | else | |||
2632 | src = gen_reg_rtx (GET_MODE (orig_src)((machine_mode) (orig_src)->mode)); | |||
2633 | ||||
2634 | emit_move_insn (src, orig_src); | |||
2635 | } | |||
2636 | ||||
2637 | /* Optimize the access just a bit. */ | |||
2638 | if (MEM_P (src)(((enum rtx_code) (src)->code) == MEM) | |||
2639 | && (! targetm.slow_unaligned_access (mode, MEM_ALIGN (src)(get_mem_attrs (src)->align)) | |||
2640 | || MEM_ALIGN (src)(get_mem_attrs (src)->align) >= GET_MODE_ALIGNMENT (mode)get_mode_alignment (mode)) | |||
2641 | && multiple_p (bytepos * BITS_PER_UNIT(8), GET_MODE_ALIGNMENT (mode)get_mode_alignment (mode)) | |||
2642 | && known_eq (bytelen, GET_MODE_SIZE (mode))(!maybe_ne (bytelen, GET_MODE_SIZE (mode)))) | |||
2643 | { | |||
2644 | tmps[i] = gen_reg_rtx (mode); | |||
2645 | emit_move_insn (tmps[i], adjust_address (src, mode, bytepos)adjust_address_1 (src, mode, bytepos, 1, 1, 0, 0)); | |||
2646 | } | |||
2647 | else if (COMPLEX_MODE_P (mode)(((enum mode_class) mode_class[mode]) == MODE_COMPLEX_INT || ( (enum mode_class) mode_class[mode]) == MODE_COMPLEX_FLOAT) | |||
2648 | && GET_MODE (src)((machine_mode) (src)->mode) == mode | |||
2649 | && known_eq (bytelen, GET_MODE_SIZE (mode))(!maybe_ne (bytelen, GET_MODE_SIZE (mode)))) | |||
2650 | /* Let emit_move_complex do the bulk of the work. */ | |||
2651 | tmps[i] = src; | |||
2652 | else if (GET_CODE (src)((enum rtx_code) (src)->code) == CONCAT) | |||
2653 | { | |||
2654 | poly_int64 slen = GET_MODE_SIZE (GET_MODE (src)((machine_mode) (src)->mode)); | |||
2655 | poly_int64 slen0 = GET_MODE_SIZE (GET_MODE (XEXP (src, 0))((machine_mode) ((((src)->u.fld[0]).rt_rtx))->mode)); | |||
2656 | unsigned int elt; | |||
2657 | poly_int64 subpos; | |||
2658 | ||||
2659 | if (can_div_trunc_p (bytepos, slen0, &elt, &subpos) | |||
2660 | && known_le (subpos + bytelen, slen0)(!maybe_lt (slen0, subpos + bytelen))) | |||
2661 | { | |||
2662 | /* The following assumes that the concatenated objects all | |||
2663 | have the same size. In this case, a simple calculation | |||
2664 | can be used to determine the object and the bit field | |||
2665 | to be extracted. */ | |||
2666 | tmps[i] = XEXP (src, elt)(((src)->u.fld[elt]).rt_rtx); | |||
2667 | if (maybe_ne (subpos, 0) | |||
2668 | || maybe_ne (subpos + bytelen, slen0) | |||
2669 | || (!CONSTANT_P (tmps[i])((rtx_class[(int) (((enum rtx_code) (tmps[i])->code))]) == RTX_CONST_OBJ) | |||
2670 | && (!REG_P (tmps[i])(((enum rtx_code) (tmps[i])->code) == REG) || GET_MODE (tmps[i])((machine_mode) (tmps[i])->mode) != mode))) | |||
2671 | tmps[i] = extract_bit_field (tmps[i], bytelen * BITS_PER_UNIT(8), | |||
2672 | subpos * BITS_PER_UNIT(8), | |||
2673 | 1, NULL_RTX(rtx) 0, mode, mode, false, | |||
2674 | NULLnullptr); | |||
2675 | } | |||
2676 | else | |||
2677 | { | |||
2678 | rtx mem; | |||
2679 | ||||
2680 | gcc_assert (known_eq (bytepos, 0))((void)(!((!maybe_ne (bytepos, 0))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2680, __FUNCTION__), 0 : 0)); | |||
2681 | mem = assign_stack_temp (GET_MODE (src)((machine_mode) (src)->mode), slen); | |||
2682 | emit_move_insn (mem, src); | |||
2683 | tmps[i] = extract_bit_field (mem, bytelen * BITS_PER_UNIT(8), | |||
2684 | 0, 1, NULL_RTX(rtx) 0, mode, mode, false, | |||
2685 | NULLnullptr); | |||
2686 | } | |||
2687 | } | |||
2688 | else if (CONSTANT_P (src)((rtx_class[(int) (((enum rtx_code) (src)->code))]) == RTX_CONST_OBJ ) && GET_MODE (dst)((machine_mode) (dst)->mode) != BLKmode((void) 0, E_BLKmode) | |||
2689 | && XVECLEN (dst, 0)(((((dst)->u.fld[0]).rt_rtvec))->num_elem) > 1) | |||
2690 | tmps[i] = simplify_gen_subreg (mode, src, GET_MODE (dst)((machine_mode) (dst)->mode), bytepos); | |||
2691 | else if (CONSTANT_P (src)((rtx_class[(int) (((enum rtx_code) (src)->code))]) == RTX_CONST_OBJ )) | |||
2692 | { | |||
2693 | if (known_eq (bytelen, ssize)(!maybe_ne (bytelen, ssize))) | |||
2694 | tmps[i] = src; | |||
2695 | else | |||
2696 | { | |||
2697 | rtx first, second; | |||
2698 | ||||
2699 | /* TODO: const_wide_int can have sizes other than this... */ | |||
2700 | gcc_assert (known_eq (2 * bytelen, ssize))((void)(!((!maybe_ne (2 * bytelen, ssize))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2700, __FUNCTION__), 0 : 0)); | |||
2701 | split_double (src, &first, &second); | |||
2702 | if (i) | |||
2703 | tmps[i] = second; | |||
2704 | else | |||
2705 | tmps[i] = first; | |||
2706 | } | |||
2707 | } | |||
2708 | else if (REG_P (src)(((enum rtx_code) (src)->code) == REG) && GET_MODE (src)((machine_mode) (src)->mode) == mode) | |||
2709 | tmps[i] = src; | |||
2710 | else | |||
2711 | tmps[i] = extract_bit_field (src, bytelen * BITS_PER_UNIT(8), | |||
2712 | bytepos * BITS_PER_UNIT(8), 1, NULL_RTX(rtx) 0, | |||
2713 | mode, mode, false, NULLnullptr); | |||
2714 | ||||
2715 | if (maybe_ne (shift, 0)) | |||
2716 | tmps[i] = expand_shift (LSHIFT_EXPR, mode, tmps[i], | |||
2717 | shift, tmps[i], 0); | |||
2718 | } | |||
2719 | } | |||
2720 | ||||
2721 | /* Emit code to move a block SRC of type TYPE to a block DST, | |||
2722 | where DST is non-consecutive registers represented by a PARALLEL. | |||
2723 | SSIZE represents the total size of block ORIG_SRC in bytes, or -1 | |||
2724 | if not known. */ | |||
2725 | ||||
2726 | void | |||
2727 | emit_group_load (rtx dst, rtx src, tree type, poly_int64 ssize) | |||
2728 | { | |||
2729 | rtx *tmps; | |||
2730 | int i; | |||
2731 | ||||
2732 | tmps = XALLOCAVEC (rtx, XVECLEN (dst, 0))((rtx *) __builtin_alloca(sizeof (rtx) * ((((((dst)->u.fld [0]).rt_rtvec))->num_elem)))); | |||
2733 | emit_group_load_1 (tmps, dst, src, type, ssize); | |||
2734 | ||||
2735 | /* Copy the extracted pieces into the proper (probable) hard regs. */ | |||
2736 | for (i = 0; i < XVECLEN (dst, 0)(((((dst)->u.fld[0]).rt_rtvec))->num_elem); i++) | |||
2737 | { | |||
2738 | rtx d = XEXP (XVECEXP (dst, 0, i), 0)((((((((dst)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld [0]).rt_rtx); | |||
2739 | if (d == NULLnullptr) | |||
2740 | continue; | |||
2741 | emit_move_insn (d, tmps[i]); | |||
2742 | } | |||
2743 | } | |||
2744 | ||||
2745 | /* Similar, but load SRC into new pseudos in a format that looks like | |||
2746 | PARALLEL. This can later be fed to emit_group_move to get things | |||
2747 | in the right place. */ | |||
2748 | ||||
2749 | rtx | |||
2750 | emit_group_load_into_temps (rtx parallel, rtx src, tree type, poly_int64 ssize) | |||
2751 | { | |||
2752 | rtvec vec; | |||
2753 | int i; | |||
2754 | ||||
2755 | vec = rtvec_alloc (XVECLEN (parallel, 0)(((((parallel)->u.fld[0]).rt_rtvec))->num_elem)); | |||
2756 | emit_group_load_1 (&RTVEC_ELT (vec, 0)((vec)->elem[0]), parallel, src, type, ssize); | |||
2757 | ||||
2758 | /* Convert the vector to look just like the original PARALLEL, except | |||
2759 | with the computed values. */ | |||
2760 | for (i = 0; i < XVECLEN (parallel, 0)(((((parallel)->u.fld[0]).rt_rtvec))->num_elem); i++) | |||
2761 | { | |||
2762 | rtx e = XVECEXP (parallel, 0, i)(((((parallel)->u.fld[0]).rt_rtvec))->elem[i]); | |||
2763 | rtx d = XEXP (e, 0)(((e)->u.fld[0]).rt_rtx); | |||
2764 | ||||
2765 | if (d) | |||
2766 | { | |||
2767 | d = force_reg (GET_MODE (d)((machine_mode) (d)->mode), RTVEC_ELT (vec, i)((vec)->elem[i])); | |||
2768 | e = alloc_EXPR_LIST (REG_NOTE_KIND (e)((enum reg_note) ((machine_mode) (e)->mode)), d, XEXP (e, 1)(((e)->u.fld[1]).rt_rtx)); | |||
2769 | } | |||
2770 | RTVEC_ELT (vec, i)((vec)->elem[i]) = e; | |||
2771 | } | |||
2772 | ||||
2773 | return gen_rtx_PARALLEL (GET_MODE (parallel), vec)gen_rtx_fmt_E_stat ((PARALLEL), ((((machine_mode) (parallel)-> mode))), ((vec)) ); | |||
2774 | } | |||
2775 | ||||
2776 | /* Emit code to move a block SRC to block DST, where SRC and DST are | |||
2777 | non-consecutive groups of registers, each represented by a PARALLEL. */ | |||
2778 | ||||
2779 | void | |||
2780 | emit_group_move (rtx dst, rtx src) | |||
2781 | { | |||
2782 | int i; | |||
2783 | ||||
2784 | gcc_assert (GET_CODE (src) == PARALLEL((void)(!(((enum rtx_code) (src)->code) == PARALLEL && ((enum rtx_code) (dst)->code) == PARALLEL && (((( (src)->u.fld[0]).rt_rtvec))->num_elem) == (((((dst)-> u.fld[0]).rt_rtvec))->num_elem)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2786, __FUNCTION__), 0 : 0)) | |||
2785 | && GET_CODE (dst) == PARALLEL((void)(!(((enum rtx_code) (src)->code) == PARALLEL && ((enum rtx_code) (dst)->code) == PARALLEL && (((( (src)->u.fld[0]).rt_rtvec))->num_elem) == (((((dst)-> u.fld[0]).rt_rtvec))->num_elem)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2786, __FUNCTION__), 0 : 0)) | |||
2786 | && XVECLEN (src, 0) == XVECLEN (dst, 0))((void)(!(((enum rtx_code) (src)->code) == PARALLEL && ((enum rtx_code) (dst)->code) == PARALLEL && (((( (src)->u.fld[0]).rt_rtvec))->num_elem) == (((((dst)-> u.fld[0]).rt_rtvec))->num_elem)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2786, __FUNCTION__), 0 : 0)); | |||
2787 | ||||
2788 | /* Skip first entry if NULL. */ | |||
2789 | for (i = XEXP (XVECEXP (src, 0, 0), 0)((((((((src)->u.fld[0]).rt_rtvec))->elem[0]))->u.fld [0]).rt_rtx) ? 0 : 1; i < XVECLEN (src, 0)(((((src)->u.fld[0]).rt_rtvec))->num_elem); i++) | |||
2790 | emit_move_insn (XEXP (XVECEXP (dst, 0, i), 0)((((((((dst)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld [0]).rt_rtx), | |||
2791 | XEXP (XVECEXP (src, 0, i), 0)((((((((src)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld [0]).rt_rtx)); | |||
2792 | } | |||
2793 | ||||
2794 | /* Move a group of registers represented by a PARALLEL into pseudos. */ | |||
2795 | ||||
2796 | rtx | |||
2797 | emit_group_move_into_temps (rtx src) | |||
2798 | { | |||
2799 | rtvec vec = rtvec_alloc (XVECLEN (src, 0)(((((src)->u.fld[0]).rt_rtvec))->num_elem)); | |||
2800 | int i; | |||
2801 | ||||
2802 | for (i = 0; i < XVECLEN (src, 0)(((((src)->u.fld[0]).rt_rtvec))->num_elem); i++) | |||
2803 | { | |||
2804 | rtx e = XVECEXP (src, 0, i)(((((src)->u.fld[0]).rt_rtvec))->elem[i]); | |||
2805 | rtx d = XEXP (e, 0)(((e)->u.fld[0]).rt_rtx); | |||
2806 | ||||
2807 | if (d) | |||
2808 | e = alloc_EXPR_LIST (REG_NOTE_KIND (e)((enum reg_note) ((machine_mode) (e)->mode)), copy_to_reg (d), XEXP (e, 1)(((e)->u.fld[1]).rt_rtx)); | |||
2809 | RTVEC_ELT (vec, i)((vec)->elem[i]) = e; | |||
2810 | } | |||
2811 | ||||
2812 | return gen_rtx_PARALLEL (GET_MODE (src), vec)gen_rtx_fmt_E_stat ((PARALLEL), ((((machine_mode) (src)->mode ))), ((vec)) ); | |||
2813 | } | |||
2814 | ||||
2815 | /* Emit code to move a block SRC to a block ORIG_DST of type TYPE, | |||
2816 | where SRC is non-consecutive registers represented by a PARALLEL. | |||
2817 | SSIZE represents the total size of block ORIG_DST, or -1 if not | |||
2818 | known. */ | |||
2819 | ||||
2820 | void | |||
2821 | emit_group_store (rtx orig_dst, rtx src, tree type ATTRIBUTE_UNUSED__attribute__ ((__unused__)), | |||
2822 | poly_int64 ssize) | |||
2823 | { | |||
2824 | rtx *tmps, dst; | |||
2825 | int start, finish, i; | |||
2826 | machine_mode m = GET_MODE (orig_dst)((machine_mode) (orig_dst)->mode); | |||
2827 | ||||
2828 | gcc_assert (GET_CODE (src) == PARALLEL)((void)(!(((enum rtx_code) (src)->code) == PARALLEL) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2828, __FUNCTION__), 0 : 0)); | |||
2829 | ||||
2830 | if (!SCALAR_INT_MODE_P (m)(((enum mode_class) mode_class[m]) == MODE_INT || ((enum mode_class ) mode_class[m]) == MODE_PARTIAL_INT) | |||
2831 | && !MEM_P (orig_dst)(((enum rtx_code) (orig_dst)->code) == MEM) && GET_CODE (orig_dst)((enum rtx_code) (orig_dst)->code) != CONCAT) | |||
2832 | { | |||
2833 | scalar_int_mode imode; | |||
2834 | if (int_mode_for_mode (GET_MODE (orig_dst)((machine_mode) (orig_dst)->mode)).exists (&imode)) | |||
2835 | { | |||
2836 | dst = gen_reg_rtx (imode); | |||
2837 | emit_group_store (dst, src, type, ssize); | |||
2838 | dst = gen_lowpartrtl_hooks.gen_lowpart (GET_MODE (orig_dst)((machine_mode) (orig_dst)->mode), dst); | |||
2839 | } | |||
2840 | else | |||
2841 | { | |||
2842 | dst = assign_stack_temp (GET_MODE (orig_dst)((machine_mode) (orig_dst)->mode), ssize); | |||
2843 | emit_group_store (dst, src, type, ssize); | |||
2844 | } | |||
2845 | emit_move_insn (orig_dst, dst); | |||
2846 | return; | |||
2847 | } | |||
2848 | ||||
2849 | /* Check for a NULL entry, used to indicate that the parameter goes | |||
2850 | both on the stack and in registers. */ | |||
2851 | if (XEXP (XVECEXP (src, 0, 0), 0)((((((((src)->u.fld[0]).rt_rtvec))->elem[0]))->u.fld [0]).rt_rtx)) | |||
2852 | start = 0; | |||
2853 | else | |||
2854 | start = 1; | |||
2855 | finish = XVECLEN (src, 0)(((((src)->u.fld[0]).rt_rtvec))->num_elem); | |||
2856 | ||||
2857 | tmps = XALLOCAVEC (rtx, finish)((rtx *) __builtin_alloca(sizeof (rtx) * (finish))); | |||
2858 | ||||
2859 | /* Copy the (probable) hard regs into pseudos. */ | |||
2860 | for (i = start; i < finish; i++) | |||
2861 | { | |||
2862 | rtx reg = XEXP (XVECEXP (src, 0, i), 0)((((((((src)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld [0]).rt_rtx); | |||
2863 | if (!REG_P (reg)(((enum rtx_code) (reg)->code) == REG) || REGNO (reg)(rhs_regno(reg)) < FIRST_PSEUDO_REGISTER76) | |||
2864 | { | |||
2865 | tmps[i] = gen_reg_rtx (GET_MODE (reg)((machine_mode) (reg)->mode)); | |||
2866 | emit_move_insn (tmps[i], reg); | |||
2867 | } | |||
2868 | else | |||
2869 | tmps[i] = reg; | |||
2870 | } | |||
2871 | ||||
2872 | /* If we won't be storing directly into memory, protect the real destination | |||
2873 | from strange tricks we might play. */ | |||
2874 | dst = orig_dst; | |||
2875 | if (GET_CODE (dst)((enum rtx_code) (dst)->code) == PARALLEL) | |||
2876 | { | |||
2877 | rtx temp; | |||
2878 | ||||
2879 | /* We can get a PARALLEL dst if there is a conditional expression in | |||
2880 | a return statement. In that case, the dst and src are the same, | |||
2881 | so no action is necessary. */ | |||
2882 | if (rtx_equal_p (dst, src)) | |||
2883 | return; | |||
2884 | ||||
2885 | /* It is unclear if we can ever reach here, but we may as well handle | |||
2886 | it. Allocate a temporary, and split this into a store/load to/from | |||
2887 | the temporary. */ | |||
2888 | temp = assign_stack_temp (GET_MODE (dst)((machine_mode) (dst)->mode), ssize); | |||
2889 | emit_group_store (temp, src, type, ssize); | |||
2890 | emit_group_load (dst, temp, type, ssize); | |||
2891 | return; | |||
2892 | } | |||
2893 | else if (!MEM_P (dst)(((enum rtx_code) (dst)->code) == MEM) && GET_CODE (dst)((enum rtx_code) (dst)->code) != CONCAT) | |||
2894 | { | |||
2895 | machine_mode outer = GET_MODE (dst)((machine_mode) (dst)->mode); | |||
2896 | machine_mode inner; | |||
2897 | poly_int64 bytepos; | |||
2898 | bool done = false; | |||
2899 | rtx temp; | |||
2900 | ||||
2901 | if (!REG_P (dst)(((enum rtx_code) (dst)->code) == REG) || REGNO (dst)(rhs_regno(dst)) < FIRST_PSEUDO_REGISTER76) | |||
2902 | dst = gen_reg_rtx (outer); | |||
2903 | ||||
2904 | /* Make life a bit easier for combine: if the first element of the | |||
2905 | vector is the low part of the destination mode, use a paradoxical | |||
2906 | subreg to initialize the destination. */ | |||
2907 | if (start < finish) | |||
2908 | { | |||
2909 | inner = GET_MODE (tmps[start])((machine_mode) (tmps[start])->mode); | |||
2910 | bytepos = subreg_lowpart_offset (inner, outer); | |||
2911 | if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, start), 1)),(!maybe_ne (rtx_to_poly_int64 (((((((((src)->u.fld[0]).rt_rtvec ))->elem[start]))->u.fld[1]).rt_rtx)), bytepos)) | |||
2912 | bytepos)(!maybe_ne (rtx_to_poly_int64 (((((((((src)->u.fld[0]).rt_rtvec ))->elem[start]))->u.fld[1]).rt_rtx)), bytepos))) | |||
2913 | { | |||
2914 | temp = simplify_gen_subreg (outer, tmps[start], inner, 0); | |||
2915 | if (temp) | |||
2916 | { | |||
2917 | emit_move_insn (dst, temp); | |||
2918 | done = true; | |||
2919 | start++; | |||
2920 | } | |||
2921 | } | |||
2922 | } | |||
2923 | ||||
2924 | /* If the first element wasn't the low part, try the last. */ | |||
2925 | if (!done | |||
2926 | && start < finish - 1) | |||
2927 | { | |||
2928 | inner = GET_MODE (tmps[finish - 1])((machine_mode) (tmps[finish - 1])->mode); | |||
2929 | bytepos = subreg_lowpart_offset (inner, outer); | |||
2930 | if (known_eq (rtx_to_poly_int64 (XEXP (XVECEXP (src, 0,(!maybe_ne (rtx_to_poly_int64 (((((((((src)->u.fld[0]).rt_rtvec ))->elem[finish - 1]))->u.fld[1]).rt_rtx)), bytepos)) | |||
2931 | finish - 1), 1)),(!maybe_ne (rtx_to_poly_int64 (((((((((src)->u.fld[0]).rt_rtvec ))->elem[finish - 1]))->u.fld[1]).rt_rtx)), bytepos)) | |||
2932 | bytepos)(!maybe_ne (rtx_to_poly_int64 (((((((((src)->u.fld[0]).rt_rtvec ))->elem[finish - 1]))->u.fld[1]).rt_rtx)), bytepos))) | |||
2933 | { | |||
2934 | temp = simplify_gen_subreg (outer, tmps[finish - 1], inner, 0); | |||
2935 | if (temp) | |||
2936 | { | |||
2937 | emit_move_insn (dst, temp); | |||
2938 | done = true; | |||
2939 | finish--; | |||
2940 | } | |||
2941 | } | |||
2942 | } | |||
2943 | ||||
2944 | /* Otherwise, simply initialize the result to zero. */ | |||
2945 | if (!done) | |||
2946 | emit_move_insn (dst, CONST0_RTX (outer)(const_tiny_rtx[0][(int) (outer)])); | |||
2947 | } | |||
2948 | ||||
2949 | /* Process the pieces. */ | |||
2950 | for (i = start; i < finish; i++) | |||
2951 | { | |||
2952 | poly_int64 bytepos = rtx_to_poly_int64 (XEXP (XVECEXP (src, 0, i), 1)((((((((src)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld [1]).rt_rtx)); | |||
2953 | machine_mode mode = GET_MODE (tmps[i])((machine_mode) (tmps[i])->mode); | |||
2954 | poly_int64 bytelen = GET_MODE_SIZE (mode); | |||
2955 | poly_uint64 adj_bytelen; | |||
2956 | rtx dest = dst; | |||
2957 | ||||
2958 | /* Handle trailing fragments that run over the size of the struct. | |||
2959 | It's the target's responsibility to make sure that the fragment | |||
2960 | cannot be strictly smaller in some cases and strictly larger | |||
2961 | in others. */ | |||
2962 | gcc_checking_assert (ordered_p (bytepos + bytelen, ssize))((void)(!(ordered_p (bytepos + bytelen, ssize)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2962, __FUNCTION__), 0 : 0)); | |||
2963 | if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize)maybe_lt (ssize, bytepos + bytelen)) | |||
2964 | adj_bytelen = ssize - bytepos; | |||
2965 | else | |||
2966 | adj_bytelen = bytelen; | |||
2967 | ||||
2968 | /* Deal with destination CONCATs by either storing into one of the parts | |||
2969 | or doing a copy after storing into a register or stack temporary. */ | |||
2970 | if (GET_CODE (dst)((enum rtx_code) (dst)->code) == CONCAT) | |||
2971 | { | |||
2972 | if (known_le (bytepos + adj_bytelen,(!maybe_lt (GET_MODE_SIZE (((machine_mode) ((((dst)->u.fld [0]).rt_rtx))->mode)), bytepos + adj_bytelen)) | |||
2973 | GET_MODE_SIZE (GET_MODE (XEXP (dst, 0))))(!maybe_lt (GET_MODE_SIZE (((machine_mode) ((((dst)->u.fld [0]).rt_rtx))->mode)), bytepos + adj_bytelen))) | |||
2974 | dest = XEXP (dst, 0)(((dst)->u.fld[0]).rt_rtx); | |||
2975 | ||||
2976 | else if (known_ge (bytepos, GET_MODE_SIZE (GET_MODE (XEXP (dst, 0))))(!maybe_lt (bytepos, GET_MODE_SIZE (((machine_mode) ((((dst)-> u.fld[0]).rt_rtx))->mode))))) | |||
2977 | { | |||
2978 | bytepos -= GET_MODE_SIZE (GET_MODE (XEXP (dst, 0))((machine_mode) ((((dst)->u.fld[0]).rt_rtx))->mode)); | |||
2979 | dest = XEXP (dst, 1)(((dst)->u.fld[1]).rt_rtx); | |||
2980 | } | |||
2981 | ||||
2982 | else | |||
2983 | { | |||
2984 | machine_mode dest_mode = GET_MODE (dest)((machine_mode) (dest)->mode); | |||
2985 | machine_mode tmp_mode = GET_MODE (tmps[i])((machine_mode) (tmps[i])->mode); | |||
2986 | scalar_int_mode dest_imode; | |||
2987 | ||||
2988 | gcc_assert (known_eq (bytepos, 0) && XVECLEN (src, 0))((void)(!((!maybe_ne (bytepos, 0)) && (((((src)->u .fld[0]).rt_rtvec))->num_elem)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 2988, __FUNCTION__), 0 : 0)); | |||
2989 | ||||
2990 | /* If the source is a single scalar integer register, and the | |||
2991 | destination has a complex mode for which a same-sized integer | |||
2992 | mode exists, then we can take the left-justified part of the | |||
2993 | source in the complex mode. */ | |||
2994 | if (finish == start + 1 | |||
2995 | && REG_P (tmps[i])(((enum rtx_code) (tmps[i])->code) == REG) | |||
2996 | && SCALAR_INT_MODE_P (tmp_mode)(((enum mode_class) mode_class[tmp_mode]) == MODE_INT || ((enum mode_class) mode_class[tmp_mode]) == MODE_PARTIAL_INT) | |||
2997 | && COMPLEX_MODE_P (dest_mode)(((enum mode_class) mode_class[dest_mode]) == MODE_COMPLEX_INT || ((enum mode_class) mode_class[dest_mode]) == MODE_COMPLEX_FLOAT ) | |||
2998 | && int_mode_for_mode (dest_mode).exists (&dest_imode)) | |||
2999 | { | |||
3000 | const scalar_int_mode tmp_imode | |||
3001 | = as_a <scalar_int_mode> (tmp_mode); | |||
3002 | ||||
3003 | if (GET_MODE_BITSIZE (dest_imode) | |||
3004 | < GET_MODE_BITSIZE (tmp_imode)) | |||
3005 | { | |||
3006 | dest = gen_reg_rtx (dest_imode); | |||
3007 | if (BYTES_BIG_ENDIAN0) | |||
3008 | tmps[i] = expand_shift (RSHIFT_EXPR, tmp_mode, tmps[i], | |||
3009 | GET_MODE_BITSIZE (tmp_imode) | |||
3010 | - GET_MODE_BITSIZE (dest_imode), | |||
3011 | NULL_RTX(rtx) 0, 1); | |||
3012 | emit_move_insn (dest, gen_lowpartrtl_hooks.gen_lowpart (dest_imode, tmps[i])); | |||
3013 | dst = gen_lowpartrtl_hooks.gen_lowpart (dest_mode, dest); | |||
3014 | } | |||
3015 | else | |||
3016 | dst = gen_lowpartrtl_hooks.gen_lowpart (dest_mode, tmps[i]); | |||
3017 | } | |||
3018 | ||||
3019 | /* Otherwise spill the source onto the stack using the more | |||
3020 | aligned of the two modes. */ | |||
3021 | else if (GET_MODE_ALIGNMENT (dest_mode)get_mode_alignment (dest_mode) | |||
3022 | >= GET_MODE_ALIGNMENT (tmp_mode)get_mode_alignment (tmp_mode)) | |||
3023 | { | |||
3024 | dest = assign_stack_temp (dest_mode, | |||
3025 | GET_MODE_SIZE (dest_mode)); | |||
3026 | emit_move_insn (adjust_address (dest, tmp_mode, bytepos)adjust_address_1 (dest, tmp_mode, bytepos, 1, 1, 0, 0), | |||
3027 | tmps[i]); | |||
3028 | dst = dest; | |||
3029 | } | |||
3030 | ||||
3031 | else | |||
3032 | { | |||
3033 | dest = assign_stack_temp (tmp_mode, | |||
3034 | GET_MODE_SIZE (tmp_mode)); | |||
3035 | emit_move_insn (dest, tmps[i]); | |||
3036 | dst = adjust_address (dest, dest_mode, bytepos)adjust_address_1 (dest, dest_mode, bytepos, 1, 1, 0, 0); | |||
3037 | } | |||
3038 | ||||
3039 | break; | |||
3040 | } | |||
3041 | } | |||
3042 | ||||
3043 | /* Handle trailing fragments that run over the size of the struct. */ | |||
3044 | if (known_size_p (ssize) && maybe_gt (bytepos + bytelen, ssize)maybe_lt (ssize, bytepos + bytelen)) | |||
3045 | { | |||
3046 | /* store_bit_field always takes its value from the lsb. | |||
3047 | Move the fragment to the lsb if it's not already there. */ | |||
3048 | if ( | |||
3049 | #ifdef BLOCK_REG_PADDING | |||
3050 | BLOCK_REG_PADDING (GET_MODE (orig_dst)((machine_mode) (orig_dst)->mode), type, i == start) | |||
3051 | == (BYTES_BIG_ENDIAN0 ? PAD_UPWARD : PAD_DOWNWARD) | |||
3052 | #else | |||
3053 | BYTES_BIG_ENDIAN0 | |||
3054 | #endif | |||
3055 | ) | |||
3056 | { | |||
3057 | poly_int64 shift = (bytelen - (ssize - bytepos)) * BITS_PER_UNIT(8); | |||
3058 | tmps[i] = expand_shift (RSHIFT_EXPR, mode, tmps[i], | |||
3059 | shift, tmps[i], 0); | |||
3060 | } | |||
3061 | ||||
3062 | /* Make sure not to write past the end of the struct. */ | |||
3063 | store_bit_field (dest, | |||
3064 | adj_bytelen * BITS_PER_UNIT(8), bytepos * BITS_PER_UNIT(8), | |||
3065 | bytepos * BITS_PER_UNIT(8), ssize * BITS_PER_UNIT(8) - 1, | |||
3066 | VOIDmode((void) 0, E_VOIDmode), tmps[i], false, false); | |||
3067 | } | |||
3068 | ||||
3069 | /* Optimize the access just a bit. */ | |||
3070 | else if (MEM_P (dest)(((enum rtx_code) (dest)->code) == MEM) | |||
3071 | && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (dest)(get_mem_attrs (dest)->align)) | |||
3072 | || MEM_ALIGN (dest)(get_mem_attrs (dest)->align) >= GET_MODE_ALIGNMENT (mode)get_mode_alignment (mode)) | |||
3073 | && multiple_p (bytepos * BITS_PER_UNIT(8), | |||
3074 | GET_MODE_ALIGNMENT (mode)get_mode_alignment (mode)) | |||
3075 | && known_eq (bytelen, GET_MODE_SIZE (mode))(!maybe_ne (bytelen, GET_MODE_SIZE (mode)))) | |||
3076 | emit_move_insn (adjust_address (dest, mode, bytepos)adjust_address_1 (dest, mode, bytepos, 1, 1, 0, 0), tmps[i]); | |||
3077 | ||||
3078 | else | |||
3079 | store_bit_field (dest, bytelen * BITS_PER_UNIT(8), bytepos * BITS_PER_UNIT(8), | |||
3080 | 0, 0, mode, tmps[i], false, false); | |||
3081 | } | |||
3082 | ||||
3083 | /* Copy from the pseudo into the (probable) hard reg. */ | |||
3084 | if (orig_dst != dst) | |||
3085 | emit_move_insn (orig_dst, dst); | |||
3086 | } | |||
3087 | ||||
3088 | /* Return a form of X that does not use a PARALLEL. TYPE is the type | |||
3089 | of the value stored in X. */ | |||
3090 | ||||
3091 | rtx | |||
3092 | maybe_emit_group_store (rtx x, tree type) | |||
3093 | { | |||
3094 | machine_mode mode = TYPE_MODE (type)((((enum tree_code) ((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3094, __FUNCTION__)))->base.code) == VECTOR_TYPE) ? vector_type_mode (type) : (type)->type_common.mode); | |||
3095 | gcc_checking_assert (GET_MODE (x) == VOIDmode || GET_MODE (x) == mode)((void)(!(((machine_mode) (x)->mode) == ((void) 0, E_VOIDmode ) || ((machine_mode) (x)->mode) == mode) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3095, __FUNCTION__), 0 : 0)); | |||
3096 | if (GET_CODE (x)((enum rtx_code) (x)->code) == PARALLEL) | |||
3097 | { | |||
3098 | rtx result = gen_reg_rtx (mode); | |||
3099 | emit_group_store (result, x, type, int_size_in_bytes (type)); | |||
3100 | return result; | |||
3101 | } | |||
3102 | return x; | |||
3103 | } | |||
3104 | ||||
3105 | /* Copy a BLKmode object of TYPE out of a register SRCREG into TARGET. | |||
3106 | ||||
3107 | This is used on targets that return BLKmode values in registers. */ | |||
3108 | ||||
3109 | static void | |||
3110 | copy_blkmode_from_reg (rtx target, rtx srcreg, tree type) | |||
3111 | { | |||
3112 | unsigned HOST_WIDE_INTlong bytes = int_size_in_bytes (type); | |||
3113 | rtx src = NULLnullptr, dst = NULLnullptr; | |||
3114 | unsigned HOST_WIDE_INTlong bitsize = MIN (TYPE_ALIGN (type), BITS_PER_WORD)(((((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3114, __FUNCTION__))->type_common.align) ? ((unsigned)1) << (((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3114, __FUNCTION__))->type_common.align) - 1) : 0)) < (((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))) ? ((((tree_class_check ((type), (tcc_type ), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3114, __FUNCTION__))->type_common.align) ? ((unsigned)1) << (((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3114, __FUNCTION__))->type_common.align) - 1) : 0)) : (( (8) * (((global_options.x_ix86_isa_flags & (1UL << 1 )) != 0) ? 8 : 4)))); | |||
3115 | unsigned HOST_WIDE_INTlong bitpos, xbitpos, padding_correction = 0; | |||
3116 | /* No current ABI uses variable-sized modes to pass a BLKmnode type. */ | |||
3117 | fixed_size_mode mode = as_a <fixed_size_mode> (GET_MODE (srcreg)((machine_mode) (srcreg)->mode)); | |||
3118 | fixed_size_mode tmode = as_a <fixed_size_mode> (GET_MODE (target)((machine_mode) (target)->mode)); | |||
3119 | fixed_size_mode copy_mode; | |||
3120 | ||||
3121 | /* BLKmode registers created in the back-end shouldn't have survived. */ | |||
3122 | gcc_assert (mode != BLKmode)((void)(!(mode != ((void) 0, E_BLKmode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3122, __FUNCTION__), 0 : 0)); | |||
3123 | ||||
3124 | /* If the structure doesn't take up a whole number of words, see whether | |||
3125 | SRCREG is padded on the left or on the right. If it's on the left, | |||
3126 | set PADDING_CORRECTION to the number of bits to skip. | |||
3127 | ||||
3128 | In most ABIs, the structure will be returned at the least end of | |||
3129 | the register, which translates to right padding on little-endian | |||
3130 | targets and left padding on big-endian targets. The opposite | |||
3131 | holds if the structure is returned at the most significant | |||
3132 | end of the register. */ | |||
3133 | if (bytes % UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4) != 0 | |||
3134 | && (targetm.calls.return_in_msb (type) | |||
3135 | ? !BYTES_BIG_ENDIAN0 | |||
3136 | : BYTES_BIG_ENDIAN0)) | |||
3137 | padding_correction | |||
3138 | = (BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) - ((bytes % UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) * BITS_PER_UNIT(8))); | |||
3139 | ||||
3140 | /* We can use a single move if we have an exact mode for the size. */ | |||
3141 | else if (MEM_P (target)(((enum rtx_code) (target)->code) == MEM) | |||
3142 | && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (target)(get_mem_attrs (target)->align)) | |||
3143 | || MEM_ALIGN (target)(get_mem_attrs (target)->align) >= GET_MODE_ALIGNMENT (mode)get_mode_alignment (mode)) | |||
3144 | && bytes == GET_MODE_SIZE (mode)) | |||
3145 | { | |||
3146 | emit_move_insn (adjust_address (target, mode, 0)adjust_address_1 (target, mode, 0, 1, 1, 0, 0), srcreg); | |||
3147 | return; | |||
3148 | } | |||
3149 | ||||
3150 | /* And if we additionally have the same mode for a register. */ | |||
3151 | else if (REG_P (target)(((enum rtx_code) (target)->code) == REG) | |||
3152 | && GET_MODE (target)((machine_mode) (target)->mode) == mode | |||
3153 | && bytes == GET_MODE_SIZE (mode)) | |||
3154 | { | |||
3155 | emit_move_insn (target, srcreg); | |||
3156 | return; | |||
3157 | } | |||
3158 | ||||
3159 | /* This code assumes srcreg is at least a full word. If it isn't, copy it | |||
3160 | into a new pseudo which is a full word. */ | |||
3161 | if (GET_MODE_SIZE (mode) < UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) | |||
3162 | { | |||
3163 | srcreg = convert_to_mode (word_mode, srcreg, TYPE_UNSIGNED (type)((tree_class_check ((type), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3163, __FUNCTION__))->base.u.bits.unsigned_flag)); | |||
3164 | mode = word_mode; | |||
3165 | } | |||
3166 | ||||
3167 | /* Copy the structure BITSIZE bits at a time. If the target lives in | |||
3168 | memory, take care of not reading/writing past its end by selecting | |||
3169 | a copy mode suited to BITSIZE. This should always be possible given | |||
3170 | how it is computed. | |||
3171 | ||||
3172 | If the target lives in register, make sure not to select a copy mode | |||
3173 | larger than the mode of the register. | |||
3174 | ||||
3175 | We could probably emit more efficient code for machines which do not use | |||
3176 | strict alignment, but it doesn't seem worth the effort at the current | |||
3177 | time. */ | |||
3178 | ||||
3179 | copy_mode = word_mode; | |||
3180 | if (MEM_P (target)(((enum rtx_code) (target)->code) == MEM)) | |||
3181 | { | |||
3182 | opt_scalar_int_mode mem_mode = int_mode_for_size (bitsize, 1); | |||
3183 | if (mem_mode.exists ()) | |||
3184 | copy_mode = mem_mode.require (); | |||
3185 | } | |||
3186 | else if (REG_P (target)(((enum rtx_code) (target)->code) == REG) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))) | |||
3187 | copy_mode = tmode; | |||
3188 | ||||
3189 | for (bitpos = 0, xbitpos = padding_correction; | |||
3190 | bitpos < bytes * BITS_PER_UNIT(8); | |||
3191 | bitpos += bitsize, xbitpos += bitsize) | |||
3192 | { | |||
3193 | /* We need a new source operand each time xbitpos is on a | |||
3194 | word boundary and when xbitpos == padding_correction | |||
3195 | (the first time through). */ | |||
3196 | if (xbitpos % BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) == 0 || xbitpos == padding_correction) | |||
3197 | src = operand_subword_force (srcreg, xbitpos / BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)), mode); | |||
3198 | ||||
3199 | /* We need a new destination operand each time bitpos is on | |||
3200 | a word boundary. */ | |||
3201 | if (REG_P (target)(((enum rtx_code) (target)->code) == REG) && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))) | |||
3202 | dst = target; | |||
3203 | else if (bitpos % BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) == 0) | |||
3204 | dst = operand_subword (target, bitpos / BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)), 1, tmode); | |||
3205 | ||||
3206 | /* Use xbitpos for the source extraction (right justified) and | |||
3207 | bitpos for the destination store (left justified). */ | |||
3208 | store_bit_field (dst, bitsize, bitpos % BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)), 0, 0, copy_mode, | |||
3209 | extract_bit_field (src, bitsize, | |||
3210 | xbitpos % BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)), 1, | |||
3211 | NULL_RTX(rtx) 0, copy_mode, copy_mode, | |||
3212 | false, NULLnullptr), | |||
3213 | false, false); | |||
3214 | } | |||
3215 | } | |||
3216 | ||||
3217 | /* Copy BLKmode value SRC into a register of mode MODE_IN. Return the | |||
3218 | register if it contains any data, otherwise return null. | |||
3219 | ||||
3220 | This is used on targets that return BLKmode values in registers. */ | |||
3221 | ||||
3222 | rtx | |||
3223 | copy_blkmode_to_reg (machine_mode mode_in, tree src) | |||
3224 | { | |||
3225 | int i, n_regs; | |||
3226 | unsigned HOST_WIDE_INTlong bitpos, xbitpos, padding_correction = 0, bytes; | |||
3227 | unsigned int bitsize; | |||
3228 | rtx *dst_words, dst, x, src_word = NULL_RTX(rtx) 0, dst_word = NULL_RTX(rtx) 0; | |||
3229 | /* No current ABI uses variable-sized modes to pass a BLKmnode type. */ | |||
3230 | fixed_size_mode mode = as_a <fixed_size_mode> (mode_in); | |||
3231 | fixed_size_mode dst_mode; | |||
3232 | scalar_int_mode min_mode; | |||
3233 | ||||
3234 | gcc_assert (TYPE_MODE (TREE_TYPE (src)) == BLKmode)((void)(!(((((enum tree_code) ((tree_class_check ((((contains_struct_check ((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3234, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3234, __FUNCTION__)))->base.code) == VECTOR_TYPE) ? vector_type_mode (((contains_struct_check ((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3234, __FUNCTION__))->typed.type)) : (((contains_struct_check ((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3234, __FUNCTION__))->typed.type))->type_common.mode) == ((void) 0, E_BLKmode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3234, __FUNCTION__), 0 : 0)); | |||
| ||||
3235 | ||||
3236 | x = expand_normal (src); | |||
3237 | ||||
3238 | bytes = arg_int_size_in_bytes (TREE_TYPE (src)((contains_struct_check ((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3238, __FUNCTION__))->typed.type)); | |||
3239 | if (bytes == 0) | |||
3240 | return NULL_RTX(rtx) 0; | |||
3241 | ||||
3242 | /* If the structure doesn't take up a whole number of words, see | |||
3243 | whether the register value should be padded on the left or on | |||
3244 | the right. Set PADDING_CORRECTION to the number of padding | |||
3245 | bits needed on the left side. | |||
3246 | ||||
3247 | In most ABIs, the structure will be returned at the least end of | |||
3248 | the register, which translates to right padding on little-endian | |||
3249 | targets and left padding on big-endian targets. The opposite | |||
3250 | holds if the structure is returned at the most significant | |||
3251 | end of the register. */ | |||
3252 | if (bytes % UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4) != 0 | |||
3253 | && (targetm.calls.return_in_msb (TREE_TYPE (src)((contains_struct_check ((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3253, __FUNCTION__))->typed.type)) | |||
3254 | ? !BYTES_BIG_ENDIAN0 | |||
3255 | : BYTES_BIG_ENDIAN0)) | |||
3256 | padding_correction = (BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) - ((bytes % UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) | |||
3257 | * BITS_PER_UNIT(8))); | |||
3258 | ||||
3259 | n_regs = (bytes + UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4) - 1) / UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4); | |||
3260 | dst_words = XALLOCAVEC (rtx, n_regs)((rtx *) __builtin_alloca(sizeof (rtx) * (n_regs))); | |||
3261 | bitsize = MIN (TYPE_ALIGN (TREE_TYPE (src)), BITS_PER_WORD)(((((tree_class_check ((((contains_struct_check ((src), (TS_TYPED ), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3261, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3261, __FUNCTION__))->type_common.align) ? ((unsigned)1) << (((tree_class_check ((((contains_struct_check ((src ), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3261, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3261, __FUNCTION__))->type_common.align) - 1) : 0)) < (((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))) ? ((((tree_class_check ((((contains_struct_check ((src), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3261, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3261, __FUNCTION__))->type_common.align) ? ((unsigned)1) << (((tree_class_check ((((contains_struct_check ((src ), (TS_TYPED), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3261, __FUNCTION__))->typed.type)), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3261, __FUNCTION__))->type_common.align) - 1) : 0)) : (( (8) * (((global_options.x_ix86_isa_flags & (1UL << 1 )) != 0) ? 8 : 4)))); | |||
3262 | min_mode = smallest_int_mode_for_size (bitsize); | |||
3263 | ||||
3264 | /* Copy the structure BITSIZE bits at a time. */ | |||
3265 | for (bitpos = 0, xbitpos = padding_correction; | |||
3266 | bitpos < bytes * BITS_PER_UNIT(8); | |||
3267 | bitpos += bitsize, xbitpos += bitsize) | |||
3268 | { | |||
3269 | /* We need a new destination pseudo each time xbitpos is | |||
3270 | on a word boundary and when xbitpos == padding_correction | |||
3271 | (the first time through). */ | |||
3272 | if (xbitpos % BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) == 0 | |||
3273 | || xbitpos == padding_correction) | |||
3274 | { | |||
3275 | /* Generate an appropriate register. */ | |||
3276 | dst_word = gen_reg_rtx (word_mode); | |||
3277 | dst_words[xbitpos / BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))] = dst_word; | |||
3278 | ||||
3279 | /* Clear the destination before we move anything into it. */ | |||
3280 | emit_move_insn (dst_word, CONST0_RTX (word_mode)(const_tiny_rtx[0][(int) (word_mode)])); | |||
3281 | } | |||
3282 | ||||
3283 | /* Find the largest integer mode that can be used to copy all or as | |||
3284 | many bits as possible of the structure if the target supports larger | |||
3285 | copies. There are too many corner cases here w.r.t to alignments on | |||
3286 | the read/writes. So if there is any padding just use single byte | |||
3287 | operations. */ | |||
3288 | opt_scalar_int_mode mode_iter; | |||
3289 | if (padding_correction == 0 && !STRICT_ALIGNMENT0) | |||
3290 | { | |||
3291 | FOR_EACH_MODE_FROM (mode_iter, min_mode)for ((mode_iter) = (min_mode); mode_iterator::iterate_p (& (mode_iter)); mode_iterator::get_next (&(mode_iter))) | |||
3292 | { | |||
3293 | unsigned int msize = GET_MODE_BITSIZE (mode_iter.require ()); | |||
3294 | if (msize <= ((bytes * BITS_PER_UNIT(8)) - bitpos) | |||
3295 | && msize <= BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))) | |||
3296 | bitsize = msize; | |||
3297 | else | |||
3298 | break; | |||
3299 | } | |||
3300 | } | |||
3301 | ||||
3302 | /* We need a new source operand each time bitpos is on a word | |||
3303 | boundary. */ | |||
3304 | if (bitpos % BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) == 0) | |||
3305 | src_word = operand_subword_force (x, bitpos / BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)), BLKmode((void) 0, E_BLKmode)); | |||
3306 | ||||
3307 | /* Use bitpos for the source extraction (left justified) and | |||
3308 | xbitpos for the destination store (right justified). */ | |||
3309 | store_bit_field (dst_word, bitsize, xbitpos % BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)), | |||
3310 | 0, 0, word_mode, | |||
3311 | extract_bit_field (src_word, bitsize, | |||
3312 | bitpos % BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)), 1, | |||
3313 | NULL_RTX(rtx) 0, word_mode, word_mode, | |||
3314 | false, NULLnullptr), | |||
3315 | false, false); | |||
3316 | } | |||
3317 | ||||
3318 | if (mode == BLKmode((void) 0, E_BLKmode)) | |||
3319 | { | |||
3320 | /* Find the smallest integer mode large enough to hold the | |||
3321 | entire structure. */ | |||
3322 | opt_scalar_int_mode mode_iter; | |||
3323 | FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)for (mode_iterator::start (&(mode_iter), MODE_INT); mode_iterator ::iterate_p (&(mode_iter)); mode_iterator::get_next (& (mode_iter))) | |||
3324 | if (GET_MODE_SIZE (mode_iter.require ()) >= bytes) | |||
3325 | break; | |||
3326 | ||||
3327 | /* A suitable mode should have been found. */ | |||
3328 | mode = mode_iter.require (); | |||
3329 | } | |||
3330 | ||||
3331 | if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (word_mode)) | |||
3332 | dst_mode = word_mode; | |||
3333 | else | |||
3334 | dst_mode = mode; | |||
3335 | dst = gen_reg_rtx (dst_mode); | |||
3336 | ||||
3337 | for (i = 0; i < n_regs; i++) | |||
3338 | emit_move_insn (operand_subword (dst, i, 0, dst_mode), dst_words[i]); | |||
| ||||
3339 | ||||
3340 | if (mode != dst_mode) | |||
3341 | dst = gen_lowpartrtl_hooks.gen_lowpart (mode, dst); | |||
3342 | ||||
3343 | return dst; | |||
3344 | } | |||
3345 | ||||
3346 | /* Add a USE expression for REG to the (possibly empty) list pointed | |||
3347 | to by CALL_FUSAGE. REG must denote a hard register. */ | |||
3348 | ||||
3349 | void | |||
3350 | use_reg_mode (rtx *call_fusage, rtx reg, machine_mode mode) | |||
3351 | { | |||
3352 | gcc_assert (REG_P (reg))((void)(!((((enum rtx_code) (reg)->code) == REG)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3352, __FUNCTION__), 0 : 0)); | |||
3353 | ||||
3354 | if (!HARD_REGISTER_P (reg)((((rhs_regno(reg))) < 76))) | |||
3355 | return; | |||
3356 | ||||
3357 | *call_fusage | |||
3358 | = gen_rtx_EXPR_LIST (mode, gen_rtx_USE (VOIDmode, reg)gen_rtx_fmt_e_stat ((USE), ((((void) 0, E_VOIDmode))), ((reg) ) ), *call_fusage); | |||
3359 | } | |||
3360 | ||||
3361 | /* Add a CLOBBER expression for REG to the (possibly empty) list pointed | |||
3362 | to by CALL_FUSAGE. REG must denote a hard register. */ | |||
3363 | ||||
3364 | void | |||
3365 | clobber_reg_mode (rtx *call_fusage, rtx reg, machine_mode mode) | |||
3366 | { | |||
3367 | gcc_assert (REG_P (reg) && REGNO (reg) < FIRST_PSEUDO_REGISTER)((void)(!((((enum rtx_code) (reg)->code) == REG) && (rhs_regno(reg)) < 76) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3367, __FUNCTION__), 0 : 0)); | |||
3368 | ||||
3369 | *call_fusage | |||
3370 | = gen_rtx_EXPR_LIST (mode, gen_rtx_CLOBBER (VOIDmode, reg)gen_rtx_fmt_e_stat ((CLOBBER), ((((void) 0, E_VOIDmode))), (( reg)) ), *call_fusage); | |||
3371 | } | |||
3372 | ||||
3373 | /* Add USE expressions to *CALL_FUSAGE for each of NREGS consecutive regs, | |||
3374 | starting at REGNO. All of these registers must be hard registers. */ | |||
3375 | ||||
3376 | void | |||
3377 | use_regs (rtx *call_fusage, int regno, int nregs) | |||
3378 | { | |||
3379 | int i; | |||
3380 | ||||
3381 | gcc_assert (regno + nregs <= FIRST_PSEUDO_REGISTER)((void)(!(regno + nregs <= 76) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3381, __FUNCTION__), 0 : 0)); | |||
3382 | ||||
3383 | for (i = 0; i < nregs; i++) | |||
3384 | use_reg (call_fusage, regno_reg_rtx[regno + i]); | |||
3385 | } | |||
3386 | ||||
3387 | /* Add USE expressions to *CALL_FUSAGE for each REG contained in the | |||
3388 | PARALLEL REGS. This is for calls that pass values in multiple | |||
3389 | non-contiguous locations. The Irix 6 ABI has examples of this. */ | |||
3390 | ||||
3391 | void | |||
3392 | use_group_regs (rtx *call_fusage, rtx regs) | |||
3393 | { | |||
3394 | int i; | |||
3395 | ||||
3396 | for (i = 0; i < XVECLEN (regs, 0)(((((regs)->u.fld[0]).rt_rtvec))->num_elem); i++) | |||
3397 | { | |||
3398 | rtx reg = XEXP (XVECEXP (regs, 0, i), 0)((((((((regs)->u.fld[0]).rt_rtvec))->elem[i]))->u.fld [0]).rt_rtx); | |||
3399 | ||||
3400 | /* A NULL entry means the parameter goes both on the stack and in | |||
3401 | registers. This can also be a MEM for targets that pass values | |||
3402 | partially on the stack and partially in registers. */ | |||
3403 | if (reg != 0 && REG_P (reg)(((enum rtx_code) (reg)->code) == REG)) | |||
3404 | use_reg (call_fusage, reg); | |||
3405 | } | |||
3406 | } | |||
3407 | ||||
3408 | /* Return the defining gimple statement for SSA_NAME NAME if it is an | |||
3409 | assigment and the code of the expresion on the RHS is CODE. Return | |||
3410 | NULL otherwise. */ | |||
3411 | ||||
3412 | static gimple * | |||
3413 | get_def_for_expr (tree name, enum tree_code code) | |||
3414 | { | |||
3415 | gimple *def_stmt; | |||
3416 | ||||
3417 | if (TREE_CODE (name)((enum tree_code) (name)->base.code) != SSA_NAME) | |||
3418 | return NULLnullptr; | |||
3419 | ||||
3420 | def_stmt = get_gimple_for_ssa_name (name); | |||
3421 | if (!def_stmt | |||
3422 | || gimple_assign_rhs_code (def_stmt) != code) | |||
3423 | return NULLnullptr; | |||
3424 | ||||
3425 | return def_stmt; | |||
3426 | } | |||
3427 | ||||
3428 | /* Return the defining gimple statement for SSA_NAME NAME if it is an | |||
3429 | assigment and the class of the expresion on the RHS is CLASS. Return | |||
3430 | NULL otherwise. */ | |||
3431 | ||||
3432 | static gimple * | |||
3433 | get_def_for_expr_class (tree name, enum tree_code_class tclass) | |||
3434 | { | |||
3435 | gimple *def_stmt; | |||
3436 | ||||
3437 | if (TREE_CODE (name)((enum tree_code) (name)->base.code) != SSA_NAME) | |||
3438 | return NULLnullptr; | |||
3439 | ||||
3440 | def_stmt = get_gimple_for_ssa_name (name); | |||
3441 | if (!def_stmt | |||
3442 | || TREE_CODE_CLASS (gimple_assign_rhs_code (def_stmt))tree_code_type_tmpl <0>::tree_code_type[(int) (gimple_assign_rhs_code (def_stmt))] != tclass) | |||
3443 | return NULLnullptr; | |||
3444 | ||||
3445 | return def_stmt; | |||
3446 | } | |||
3447 | ||||
3448 | /* Write zeros through the storage of OBJECT. If OBJECT has BLKmode, SIZE is | |||
3449 | its length in bytes. */ | |||
3450 | ||||
3451 | rtx | |||
3452 | clear_storage_hints (rtx object, rtx size, enum block_op_methods method, | |||
3453 | unsigned int expected_align, HOST_WIDE_INTlong expected_size, | |||
3454 | unsigned HOST_WIDE_INTlong min_size, | |||
3455 | unsigned HOST_WIDE_INTlong max_size, | |||
3456 | unsigned HOST_WIDE_INTlong probable_max_size, | |||
3457 | unsigned ctz_size) | |||
3458 | { | |||
3459 | machine_mode mode = GET_MODE (object)((machine_mode) (object)->mode); | |||
3460 | unsigned int align; | |||
3461 | ||||
3462 | gcc_assert (method == BLOCK_OP_NORMAL || method == BLOCK_OP_TAILCALL)((void)(!(method == BLOCK_OP_NORMAL || method == BLOCK_OP_TAILCALL ) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3462, __FUNCTION__), 0 : 0)); | |||
3463 | ||||
3464 | /* If OBJECT is not BLKmode and SIZE is the same size as its mode, | |||
3465 | just move a zero. Otherwise, do this a piece at a time. */ | |||
3466 | poly_int64 size_val; | |||
3467 | if (mode != BLKmode((void) 0, E_BLKmode) | |||
3468 | && poly_int_rtx_p (size, &size_val) | |||
3469 | && known_eq (size_val, GET_MODE_SIZE (mode))(!maybe_ne (size_val, GET_MODE_SIZE (mode)))) | |||
3470 | { | |||
3471 | rtx zero = CONST0_RTX (mode)(const_tiny_rtx[0][(int) (mode)]); | |||
3472 | if (zero != NULLnullptr) | |||
3473 | { | |||
3474 | emit_move_insn (object, zero); | |||
3475 | return NULLnullptr; | |||
3476 | } | |||
3477 | ||||
3478 | if (COMPLEX_MODE_P (mode)(((enum mode_class) mode_class[mode]) == MODE_COMPLEX_INT || ( (enum mode_class) mode_class[mode]) == MODE_COMPLEX_FLOAT)) | |||
3479 | { | |||
3480 | zero = CONST0_RTX (GET_MODE_INNER (mode))(const_tiny_rtx[0][(int) ((mode_to_inner (mode)))]); | |||
3481 | if (zero != NULLnullptr) | |||
3482 | { | |||
3483 | write_complex_part (object, zero, 0, true); | |||
3484 | write_complex_part (object, zero, 1, false); | |||
3485 | return NULLnullptr; | |||
3486 | } | |||
3487 | } | |||
3488 | } | |||
3489 | ||||
3490 | if (size == const0_rtx(const_int_rtx[64])) | |||
3491 | return NULLnullptr; | |||
3492 | ||||
3493 | align = MEM_ALIGN (object)(get_mem_attrs (object)->align); | |||
3494 | ||||
3495 | if (CONST_INT_P (size)(((enum rtx_code) (size)->code) == CONST_INT) | |||
3496 | && targetm.use_by_pieces_infrastructure_p (INTVAL (size)((size)->u.hwint[0]), align, | |||
3497 | CLEAR_BY_PIECES, | |||
3498 | optimize_insn_for_speed_p ())) | |||
3499 | clear_by_pieces (object, INTVAL (size)((size)->u.hwint[0]), align); | |||
3500 | else if (set_storage_via_setmem (object, size, const0_rtx(const_int_rtx[64]), align, | |||
3501 | expected_align, expected_size, | |||
3502 | min_size, max_size, probable_max_size)) | |||
3503 | ; | |||
3504 | else if (try_store_by_multiple_pieces (object, size, ctz_size, | |||
3505 | min_size, max_size, | |||
3506 | NULL_RTX(rtx) 0, 0, align)) | |||
3507 | ; | |||
3508 | else if (ADDR_SPACE_GENERIC_P (MEM_ADDR_SPACE (object))(((get_mem_attrs (object)->addrspace)) == 0)) | |||
3509 | return set_storage_via_libcall (object, size, const0_rtx(const_int_rtx[64]), | |||
3510 | method == BLOCK_OP_TAILCALL); | |||
3511 | else | |||
3512 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3512, __FUNCTION__)); | |||
3513 | ||||
3514 | return NULLnullptr; | |||
3515 | } | |||
3516 | ||||
3517 | rtx | |||
3518 | clear_storage (rtx object, rtx size, enum block_op_methods method) | |||
3519 | { | |||
3520 | unsigned HOST_WIDE_INTlong max, min = 0; | |||
3521 | if (GET_CODE (size)((enum rtx_code) (size)->code) == CONST_INT) | |||
3522 | min = max = UINTVAL (size)((unsigned long) ((size)->u.hwint[0])); | |||
3523 | else | |||
3524 | max = GET_MODE_MASK (GET_MODE (size))mode_mask_array[((machine_mode) (size)->mode)]; | |||
3525 | return clear_storage_hints (object, size, method, 0, -1, min, max, max, 0); | |||
3526 | } | |||
3527 | ||||
3528 | ||||
3529 | /* A subroutine of clear_storage. Expand a call to memset. | |||
3530 | Return the return value of memset, 0 otherwise. */ | |||
3531 | ||||
3532 | rtx | |||
3533 | set_storage_via_libcall (rtx object, rtx size, rtx val, bool tailcall) | |||
3534 | { | |||
3535 | tree call_expr, fn, object_tree, size_tree, val_tree; | |||
3536 | machine_mode size_mode; | |||
3537 | ||||
3538 | object = copy_addr_to_reg (XEXP (object, 0)(((object)->u.fld[0]).rt_rtx)); | |||
3539 | object_tree = make_tree (ptr_type_nodeglobal_trees[TI_PTR_TYPE], object); | |||
3540 | ||||
3541 | if (!CONST_INT_P (val)(((enum rtx_code) (val)->code) == CONST_INT)) | |||
3542 | val = convert_to_mode (TYPE_MODE (integer_type_node)((((enum tree_code) ((tree_class_check ((integer_types[itk_int ]), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3542, __FUNCTION__)))->base.code) == VECTOR_TYPE) ? vector_type_mode (integer_types[itk_int]) : (integer_types[itk_int])->type_common .mode), val, 1); | |||
3543 | val_tree = make_tree (integer_type_nodeinteger_types[itk_int], val); | |||
3544 | ||||
3545 | size_mode = TYPE_MODE (sizetype)((((enum tree_code) ((tree_class_check ((sizetype_tab[(int) stk_sizetype ]), (tcc_type), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3545, __FUNCTION__)))->base.code) == VECTOR_TYPE) ? vector_type_mode (sizetype_tab[(int) stk_sizetype]) : (sizetype_tab[(int) stk_sizetype ])->type_common.mode); | |||
3546 | size = convert_to_mode (size_mode, size, 1); | |||
3547 | size = copy_to_mode_reg (size_mode, size); | |||
3548 | size_tree = make_tree (sizetypesizetype_tab[(int) stk_sizetype], size); | |||
3549 | ||||
3550 | /* It is incorrect to use the libcall calling conventions for calls to | |||
3551 | memset because it can be provided by the user. */ | |||
3552 | fn = builtin_decl_implicit (BUILT_IN_MEMSET); | |||
3553 | call_expr = build_call_expr (fn, 3, object_tree, val_tree, size_tree); | |||
3554 | CALL_EXPR_TAILCALL (call_expr)((tree_check ((call_expr), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3554, __FUNCTION__, (CALL_EXPR)))->base.addressable_flag ) = tailcall; | |||
3555 | ||||
3556 | return expand_call (call_expr, NULL_RTX(rtx) 0, false); | |||
3557 | } | |||
3558 | ||||
3559 | /* Expand a setmem pattern; return true if successful. */ | |||
3560 | ||||
3561 | bool | |||
3562 | set_storage_via_setmem (rtx object, rtx size, rtx val, unsigned int align, | |||
3563 | unsigned int expected_align, HOST_WIDE_INTlong expected_size, | |||
3564 | unsigned HOST_WIDE_INTlong min_size, | |||
3565 | unsigned HOST_WIDE_INTlong max_size, | |||
3566 | unsigned HOST_WIDE_INTlong probable_max_size) | |||
3567 | { | |||
3568 | /* Try the most limited insn first, because there's no point | |||
3569 | including more than one in the machine description unless | |||
3570 | the more limited one has some advantage. */ | |||
3571 | ||||
3572 | if (expected_align < align) | |||
3573 | expected_align = align; | |||
3574 | if (expected_size != -1) | |||
3575 | { | |||
3576 | if ((unsigned HOST_WIDE_INTlong)expected_size > max_size) | |||
3577 | expected_size = max_size; | |||
3578 | if ((unsigned HOST_WIDE_INTlong)expected_size < min_size) | |||
3579 | expected_size = min_size; | |||
3580 | } | |||
3581 | ||||
3582 | opt_scalar_int_mode mode_iter; | |||
3583 | FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)for (mode_iterator::start (&(mode_iter), MODE_INT); mode_iterator ::iterate_p (&(mode_iter)); mode_iterator::get_next (& (mode_iter))) | |||
3584 | { | |||
3585 | scalar_int_mode mode = mode_iter.require (); | |||
3586 | enum insn_code code = direct_optab_handler (setmem_optab, mode); | |||
3587 | ||||
3588 | if (code != CODE_FOR_nothing | |||
3589 | /* We don't need MODE to be narrower than BITS_PER_HOST_WIDE_INT | |||
3590 | here because if SIZE is less than the mode mask, as it is | |||
3591 | returned by the macro, it will definitely be less than the | |||
3592 | actual mode mask. Since SIZE is within the Pmode address | |||
3593 | space, we limit MODE to Pmode. */ | |||
3594 | && ((CONST_INT_P (size)(((enum rtx_code) (size)->code) == CONST_INT) | |||
3595 | && ((unsigned HOST_WIDE_INTlong) INTVAL (size)((size)->u.hwint[0]) | |||
3596 | <= (GET_MODE_MASK (mode)mode_mask_array[mode] >> 1))) | |||
3597 | || max_size <= (GET_MODE_MASK (mode)mode_mask_array[mode] >> 1) | |||
3598 | || GET_MODE_BITSIZE (mode) >= GET_MODE_BITSIZE (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode)))))) | |||
3599 | { | |||
3600 | class expand_operand ops[9]; | |||
3601 | unsigned int nops; | |||
3602 | ||||
3603 | nops = insn_data[(int) code].n_generator_args; | |||
3604 | gcc_assert (nops == 4 || nops == 6 || nops == 8 || nops == 9)((void)(!(nops == 4 || nops == 6 || nops == 8 || nops == 9) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3604, __FUNCTION__), 0 : 0)); | |||
3605 | ||||
3606 | create_fixed_operand (&ops[0], object); | |||
3607 | /* The check above guarantees that this size conversion is valid. */ | |||
3608 | create_convert_operand_to (&ops[1], size, mode, true); | |||
3609 | create_convert_operand_from (&ops[2], val, byte_mode, true); | |||
3610 | create_integer_operand (&ops[3], align / BITS_PER_UNIT(8)); | |||
3611 | if (nops >= 6) | |||
3612 | { | |||
3613 | create_integer_operand (&ops[4], expected_align / BITS_PER_UNIT(8)); | |||
3614 | create_integer_operand (&ops[5], expected_size); | |||
3615 | } | |||
3616 | if (nops >= 8) | |||
3617 | { | |||
3618 | create_integer_operand (&ops[6], min_size); | |||
3619 | /* If we cannot represent the maximal size, | |||
3620 | make parameter NULL. */ | |||
3621 | if ((HOST_WIDE_INTlong) max_size != -1) | |||
3622 | create_integer_operand (&ops[7], max_size); | |||
3623 | else | |||
3624 | create_fixed_operand (&ops[7], NULLnullptr); | |||
3625 | } | |||
3626 | if (nops == 9) | |||
3627 | { | |||
3628 | /* If we cannot represent the maximal size, | |||
3629 | make parameter NULL. */ | |||
3630 | if ((HOST_WIDE_INTlong) probable_max_size != -1) | |||
3631 | create_integer_operand (&ops[8], probable_max_size); | |||
3632 | else | |||
3633 | create_fixed_operand (&ops[8], NULLnullptr); | |||
3634 | } | |||
3635 | if (maybe_expand_insn (code, nops, ops)) | |||
3636 | return true; | |||
3637 | } | |||
3638 | } | |||
3639 | ||||
3640 | return false; | |||
3641 | } | |||
3642 | ||||
3643 | ||||
3644 | /* Write to one of the components of the complex value CPLX. Write VAL to | |||
3645 | the real part if IMAG_P is false, and the imaginary part if its true. | |||
3646 | If UNDEFINED_P then the value in CPLX is currently undefined. */ | |||
3647 | ||||
3648 | void | |||
3649 | write_complex_part (rtx cplx, rtx val, bool imag_p, bool undefined_p) | |||
3650 | { | |||
3651 | machine_mode cmode; | |||
3652 | scalar_mode imode; | |||
3653 | unsigned ibitsize; | |||
3654 | ||||
3655 | if (GET_CODE (cplx)((enum rtx_code) (cplx)->code) == CONCAT) | |||
3656 | { | |||
3657 | emit_move_insn (XEXP (cplx, imag_p)(((cplx)->u.fld[imag_p]).rt_rtx), val); | |||
3658 | return; | |||
3659 | } | |||
3660 | ||||
3661 | cmode = GET_MODE (cplx)((machine_mode) (cplx)->mode); | |||
3662 | imode = GET_MODE_INNER (cmode)(mode_to_inner (cmode)); | |||
3663 | ibitsize = GET_MODE_BITSIZE (imode); | |||
3664 | ||||
3665 | /* For MEMs simplify_gen_subreg may generate an invalid new address | |||
3666 | because, e.g., the original address is considered mode-dependent | |||
3667 | by the target, which restricts simplify_subreg from invoking | |||
3668 | adjust_address_nv. Instead of preparing fallback support for an | |||
3669 | invalid address, we call adjust_address_nv directly. */ | |||
3670 | if (MEM_P (cplx)(((enum rtx_code) (cplx)->code) == MEM)) | |||
3671 | { | |||
3672 | emit_move_insn (adjust_address_nv (cplx, imode,adjust_address_1 (cplx, imode, imag_p ? GET_MODE_SIZE (imode) : 0, 0, 1, 0, 0) | |||
3673 | imag_p ? GET_MODE_SIZE (imode) : 0)adjust_address_1 (cplx, imode, imag_p ? GET_MODE_SIZE (imode) : 0, 0, 1, 0, 0), | |||
3674 | val); | |||
3675 | return; | |||
3676 | } | |||
3677 | ||||
3678 | /* If the sub-object is at least word sized, then we know that subregging | |||
3679 | will work. This special case is important, since store_bit_field | |||
3680 | wants to operate on integer modes, and there's rarely an OImode to | |||
3681 | correspond to TCmode. */ | |||
3682 | if (ibitsize >= BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) | |||
3683 | /* For hard regs we have exact predicates. Assume we can split | |||
3684 | the original object if it spans an even number of hard regs. | |||
3685 | This special case is important for SCmode on 64-bit platforms | |||
3686 | where the natural size of floating-point regs is 32-bit. */ | |||
3687 | || (REG_P (cplx)(((enum rtx_code) (cplx)->code) == REG) | |||
3688 | && REGNO (cplx)(rhs_regno(cplx)) < FIRST_PSEUDO_REGISTER76 | |||
3689 | && REG_NREGS (cplx)((&(cplx)->u.reg)->nregs) % 2 == 0)) | |||
3690 | { | |||
3691 | rtx part = simplify_gen_subreg (imode, cplx, cmode, | |||
3692 | imag_p ? GET_MODE_SIZE (imode) : 0); | |||
3693 | if (part) | |||
3694 | { | |||
3695 | emit_move_insn (part, val); | |||
3696 | return; | |||
3697 | } | |||
3698 | else | |||
3699 | /* simplify_gen_subreg may fail for sub-word MEMs. */ | |||
3700 | gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD)((void)(!((((enum rtx_code) (cplx)->code) == MEM) && ibitsize < ((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3700, __FUNCTION__), 0 : 0)); | |||
3701 | } | |||
3702 | ||||
3703 | store_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0, 0, 0, imode, val, | |||
3704 | false, undefined_p); | |||
3705 | } | |||
3706 | ||||
3707 | /* Extract one of the components of the complex value CPLX. Extract the | |||
3708 | real part if IMAG_P is false, and the imaginary part if it's true. */ | |||
3709 | ||||
3710 | rtx | |||
3711 | read_complex_part (rtx cplx, bool imag_p) | |||
3712 | { | |||
3713 | machine_mode cmode; | |||
3714 | scalar_mode imode; | |||
3715 | unsigned ibitsize; | |||
3716 | ||||
3717 | if (GET_CODE (cplx)((enum rtx_code) (cplx)->code) == CONCAT) | |||
3718 | return XEXP (cplx, imag_p)(((cplx)->u.fld[imag_p]).rt_rtx); | |||
3719 | ||||
3720 | cmode = GET_MODE (cplx)((machine_mode) (cplx)->mode); | |||
3721 | imode = GET_MODE_INNER (cmode)(mode_to_inner (cmode)); | |||
3722 | ibitsize = GET_MODE_BITSIZE (imode); | |||
3723 | ||||
3724 | /* Special case reads from complex constants that got spilled to memory. */ | |||
3725 | if (MEM_P (cplx)(((enum rtx_code) (cplx)->code) == MEM) && GET_CODE (XEXP (cplx, 0))((enum rtx_code) ((((cplx)->u.fld[0]).rt_rtx))->code) == SYMBOL_REF) | |||
3726 | { | |||
3727 | tree decl = SYMBOL_REF_DECL (XEXP (cplx, 0))((__extension__ ({ __typeof (((((cplx)->u.fld[0]).rt_rtx)) ) const _rtx = (((((cplx)->u.fld[0]).rt_rtx))); if (((enum rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag ("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3727, __FUNCTION__); _rtx; })->unchanging) ? nullptr : ( ((((((cplx)->u.fld[0]).rt_rtx)))->u.fld[1]).rt_tree)); | |||
3728 | if (decl && TREE_CODE (decl)((enum tree_code) (decl)->base.code) == COMPLEX_CST) | |||
3729 | { | |||
3730 | tree part = imag_p ? TREE_IMAGPART (decl)((tree_check ((decl), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3730, __FUNCTION__, (COMPLEX_CST)))->complex.imag) : TREE_REALPART (decl)((tree_check ((decl), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3730, __FUNCTION__, (COMPLEX_CST)))->complex.real); | |||
3731 | if (CONSTANT_CLASS_P (part)(tree_code_type_tmpl <0>::tree_code_type[(int) (((enum tree_code ) (part)->base.code))] == tcc_constant)) | |||
3732 | return expand_expr (part, NULL_RTX(rtx) 0, imode, EXPAND_NORMAL); | |||
3733 | } | |||
3734 | } | |||
3735 | ||||
3736 | /* For MEMs simplify_gen_subreg may generate an invalid new address | |||
3737 | because, e.g., the original address is considered mode-dependent | |||
3738 | by the target, which restricts simplify_subreg from invoking | |||
3739 | adjust_address_nv. Instead of preparing fallback support for an | |||
3740 | invalid address, we call adjust_address_nv directly. */ | |||
3741 | if (MEM_P (cplx)(((enum rtx_code) (cplx)->code) == MEM)) | |||
3742 | return adjust_address_nv (cplx, imode,adjust_address_1 (cplx, imode, imag_p ? GET_MODE_SIZE (imode) : 0, 0, 1, 0, 0) | |||
3743 | imag_p ? GET_MODE_SIZE (imode) : 0)adjust_address_1 (cplx, imode, imag_p ? GET_MODE_SIZE (imode) : 0, 0, 1, 0, 0); | |||
3744 | ||||
3745 | /* If the sub-object is at least word sized, then we know that subregging | |||
3746 | will work. This special case is important, since extract_bit_field | |||
3747 | wants to operate on integer modes, and there's rarely an OImode to | |||
3748 | correspond to TCmode. */ | |||
3749 | if (ibitsize >= BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) | |||
3750 | /* For hard regs we have exact predicates. Assume we can split | |||
3751 | the original object if it spans an even number of hard regs. | |||
3752 | This special case is important for SCmode on 64-bit platforms | |||
3753 | where the natural size of floating-point regs is 32-bit. */ | |||
3754 | || (REG_P (cplx)(((enum rtx_code) (cplx)->code) == REG) | |||
3755 | && REGNO (cplx)(rhs_regno(cplx)) < FIRST_PSEUDO_REGISTER76 | |||
3756 | && REG_NREGS (cplx)((&(cplx)->u.reg)->nregs) % 2 == 0)) | |||
3757 | { | |||
3758 | rtx ret = simplify_gen_subreg (imode, cplx, cmode, | |||
3759 | imag_p ? GET_MODE_SIZE (imode) : 0); | |||
3760 | if (ret) | |||
3761 | return ret; | |||
3762 | else | |||
3763 | /* simplify_gen_subreg may fail for sub-word MEMs. */ | |||
3764 | gcc_assert (MEM_P (cplx) && ibitsize < BITS_PER_WORD)((void)(!((((enum rtx_code) (cplx)->code) == MEM) && ibitsize < ((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3764, __FUNCTION__), 0 : 0)); | |||
3765 | } | |||
3766 | ||||
3767 | return extract_bit_field (cplx, ibitsize, imag_p ? ibitsize : 0, | |||
3768 | true, NULL_RTX(rtx) 0, imode, imode, false, NULLnullptr); | |||
3769 | } | |||
3770 | ||||
3771 | /* A subroutine of emit_move_insn_1. Yet another lowpart generator. | |||
3772 | NEW_MODE and OLD_MODE are the same size. Return NULL if X cannot be | |||
3773 | represented in NEW_MODE. If FORCE is true, this will never happen, as | |||
3774 | we'll force-create a SUBREG if needed. */ | |||
3775 | ||||
3776 | static rtx | |||
3777 | emit_move_change_mode (machine_mode new_mode, | |||
3778 | machine_mode old_mode, rtx x, bool force) | |||
3779 | { | |||
3780 | rtx ret; | |||
3781 | ||||
3782 | if (push_operand (x, GET_MODE (x)((machine_mode) (x)->mode))) | |||
3783 | { | |||
3784 | ret = gen_rtx_MEM (new_mode, XEXP (x, 0)(((x)->u.fld[0]).rt_rtx)); | |||
3785 | MEM_COPY_ATTRIBUTES (ret, x)((__extension__ ({ __typeof ((ret)) const _rtx = ((ret)); if ( ((enum rtx_code) (_rtx)->code) != MEM && ((enum rtx_code ) (_rtx)->code) != ASM_OPERANDS && ((enum rtx_code ) (_rtx)->code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3785, __FUNCTION__); _rtx; })->volatil) = (__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) ( _rtx)->code) != MEM && ((enum rtx_code) (_rtx)-> code) != ASM_OPERANDS && ((enum rtx_code) (_rtx)-> code) != ASM_INPUT) rtl_check_failed_flag ("MEM_VOLATILE_P", _rtx , "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3785, __FUNCTION__); _rtx; })->volatil), (__extension__ ( { __typeof ((ret)) const _rtx = ((ret)); if (((enum rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_NOTRAP_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3785, __FUNCTION__); _rtx; })->call) = (__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx )->code) != MEM) rtl_check_failed_flag ("MEM_NOTRAP_P", _rtx , "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3785, __FUNCTION__); _rtx; })->call), (__extension__ ({ __typeof ((ret)) const _rtx = ((ret)); if (((enum rtx_code) (_rtx)-> code) != MEM) rtl_check_failed_flag ("MEM_READONLY_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3785, __FUNCTION__); _rtx; })->unchanging) = (__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) ( _rtx)->code) != MEM) rtl_check_failed_flag ("MEM_READONLY_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3785, __FUNCTION__); _rtx; })->unchanging), (__extension__ ({ __typeof ((ret)) const _rtx = ((ret)); if (((enum rtx_code ) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_KEEP_ALIAS_SET_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3785, __FUNCTION__); _rtx; })->jump) = (__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx )->code) != MEM) rtl_check_failed_flag ("MEM_KEEP_ALIAS_SET_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3785, __FUNCTION__); _rtx; })->jump), (__extension__ ({ __typeof ((ret)) const _rtx = ((ret)); if (((enum rtx_code) (_rtx)-> code) != MEM) rtl_check_failed_flag ("MEM_POINTER", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3785, __FUNCTION__); _rtx; })->frame_related) = (__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) ( _rtx)->code) != MEM) rtl_check_failed_flag ("MEM_POINTER", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3785, __FUNCTION__); _rtx; })->frame_related), (((ret)-> u.fld[1]).rt_mem) = (((x)->u.fld[1]).rt_mem)); | |||
3786 | } | |||
3787 | else if (MEM_P (x)(((enum rtx_code) (x)->code) == MEM)) | |||
3788 | { | |||
3789 | /* We don't have to worry about changing the address since the | |||
3790 | size in bytes is supposed to be the same. */ | |||
3791 | if (reload_in_progress) | |||
3792 | { | |||
3793 | /* Copy the MEM to change the mode and move any | |||
3794 | substitutions from the old MEM to the new one. */ | |||
3795 | ret = adjust_address_nv (x, new_mode, 0)adjust_address_1 (x, new_mode, 0, 0, 1, 0, 0); | |||
3796 | copy_replacements (x, ret); | |||
3797 | } | |||
3798 | else | |||
3799 | ret = adjust_address (x, new_mode, 0)adjust_address_1 (x, new_mode, 0, 1, 1, 0, 0); | |||
3800 | } | |||
3801 | else | |||
3802 | { | |||
3803 | /* Note that we do want simplify_subreg's behavior of validating | |||
3804 | that the new mode is ok for a hard register. If we were to use | |||
3805 | simplify_gen_subreg, we would create the subreg, but would | |||
3806 | probably run into the target not being able to implement it. */ | |||
3807 | /* Except, of course, when FORCE is true, when this is exactly what | |||
3808 | we want. Which is needed for CCmodes on some targets. */ | |||
3809 | if (force) | |||
3810 | ret = simplify_gen_subreg (new_mode, x, old_mode, 0); | |||
3811 | else | |||
3812 | ret = simplify_subreg (new_mode, x, old_mode, 0); | |||
3813 | } | |||
3814 | ||||
3815 | return ret; | |||
3816 | } | |||
3817 | ||||
3818 | /* A subroutine of emit_move_insn_1. Generate a move from Y into X using | |||
3819 | an integer mode of the same size as MODE. Returns the instruction | |||
3820 | emitted, or NULL if such a move could not be generated. */ | |||
3821 | ||||
3822 | static rtx_insn * | |||
3823 | emit_move_via_integer (machine_mode mode, rtx x, rtx y, bool force) | |||
3824 | { | |||
3825 | scalar_int_mode imode; | |||
3826 | enum insn_code code; | |||
3827 | ||||
3828 | /* There must exist a mode of the exact size we require. */ | |||
3829 | if (!int_mode_for_mode (mode).exists (&imode)) | |||
3830 | return NULLnullptr; | |||
3831 | ||||
3832 | /* The target must support moves in this mode. */ | |||
3833 | code = optab_handler (mov_optab, imode); | |||
3834 | if (code == CODE_FOR_nothing) | |||
3835 | return NULLnullptr; | |||
3836 | ||||
3837 | x = emit_move_change_mode (imode, mode, x, force); | |||
3838 | if (x == NULL_RTX(rtx) 0) | |||
3839 | return NULLnullptr; | |||
3840 | y = emit_move_change_mode (imode, mode, y, force); | |||
3841 | if (y == NULL_RTX(rtx) 0) | |||
3842 | return NULLnullptr; | |||
3843 | return emit_insn (GEN_FCN (code)(insn_data[code].genfun) (x, y)); | |||
3844 | } | |||
3845 | ||||
3846 | /* A subroutine of emit_move_insn_1. X is a push_operand in MODE. | |||
3847 | Return an equivalent MEM that does not use an auto-increment. */ | |||
3848 | ||||
3849 | rtx | |||
3850 | emit_move_resolve_push (machine_mode mode, rtx x) | |||
3851 | { | |||
3852 | enum rtx_code code = GET_CODE (XEXP (x, 0))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code); | |||
3853 | rtx temp; | |||
3854 | ||||
3855 | poly_int64 adjust = GET_MODE_SIZE (mode); | |||
3856 | #ifdef PUSH_ROUNDING | |||
3857 | adjust = PUSH_ROUNDING (adjust)ix86_push_rounding (adjust); | |||
3858 | #endif | |||
3859 | if (code == PRE_DEC || code == POST_DEC) | |||
3860 | adjust = -adjust; | |||
3861 | else if (code == PRE_MODIFY || code == POST_MODIFY) | |||
3862 | { | |||
3863 | rtx expr = XEXP (XEXP (x, 0), 1)((((((x)->u.fld[0]).rt_rtx))->u.fld[1]).rt_rtx); | |||
3864 | ||||
3865 | gcc_assert (GET_CODE (expr) == PLUS || GET_CODE (expr) == MINUS)((void)(!(((enum rtx_code) (expr)->code) == PLUS || ((enum rtx_code) (expr)->code) == MINUS) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3865, __FUNCTION__), 0 : 0)); | |||
3866 | poly_int64 val = rtx_to_poly_int64 (XEXP (expr, 1)(((expr)->u.fld[1]).rt_rtx)); | |||
3867 | if (GET_CODE (expr)((enum rtx_code) (expr)->code) == MINUS) | |||
3868 | val = -val; | |||
3869 | gcc_assert (known_eq (adjust, val) || known_eq (adjust, -val))((void)(!((!maybe_ne (adjust, val)) || (!maybe_ne (adjust, -val ))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3869, __FUNCTION__), 0 : 0)); | |||
3870 | adjust = val; | |||
3871 | } | |||
3872 | ||||
3873 | /* Do not use anti_adjust_stack, since we don't want to update | |||
3874 | stack_pointer_delta. */ | |||
3875 | temp = expand_simple_binop (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), PLUS, stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]), | |||
3876 | gen_int_mode (adjust, Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode)))), stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]), | |||
3877 | 0, OPTAB_LIB_WIDEN); | |||
3878 | if (temp != stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER])) | |||
3879 | emit_move_insn (stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]), temp); | |||
3880 | ||||
3881 | switch (code) | |||
3882 | { | |||
3883 | case PRE_INC: | |||
3884 | case PRE_DEC: | |||
3885 | case PRE_MODIFY: | |||
3886 | temp = stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]); | |||
3887 | break; | |||
3888 | case POST_INC: | |||
3889 | case POST_DEC: | |||
3890 | case POST_MODIFY: | |||
3891 | temp = plus_constant (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]), -adjust); | |||
3892 | break; | |||
3893 | default: | |||
3894 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3894, __FUNCTION__)); | |||
3895 | } | |||
3896 | ||||
3897 | return replace_equiv_address (x, temp); | |||
3898 | } | |||
3899 | ||||
3900 | /* A subroutine of emit_move_complex. Generate a move from Y into X. | |||
3901 | X is known to satisfy push_operand, and MODE is known to be complex. | |||
3902 | Returns the last instruction emitted. */ | |||
3903 | ||||
3904 | rtx_insn * | |||
3905 | emit_move_complex_push (machine_mode mode, rtx x, rtx y) | |||
3906 | { | |||
3907 | scalar_mode submode = GET_MODE_INNER (mode)(mode_to_inner (mode)); | |||
3908 | bool imag_first; | |||
3909 | ||||
3910 | #ifdef PUSH_ROUNDING | |||
3911 | poly_int64 submodesize = GET_MODE_SIZE (submode); | |||
3912 | ||||
3913 | /* In case we output to the stack, but the size is smaller than the | |||
3914 | machine can push exactly, we need to use move instructions. */ | |||
3915 | if (maybe_ne (PUSH_ROUNDING (submodesize)ix86_push_rounding (submodesize), submodesize)) | |||
3916 | { | |||
3917 | x = emit_move_resolve_push (mode, x); | |||
3918 | return emit_move_insn (x, y); | |||
3919 | } | |||
3920 | #endif | |||
3921 | ||||
3922 | /* Note that the real part always precedes the imag part in memory | |||
3923 | regardless of machine's endianness. */ | |||
3924 | switch (GET_CODE (XEXP (x, 0))((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code)) | |||
3925 | { | |||
3926 | case PRE_DEC: | |||
3927 | case POST_DEC: | |||
3928 | imag_first = true; | |||
3929 | break; | |||
3930 | case PRE_INC: | |||
3931 | case POST_INC: | |||
3932 | imag_first = false; | |||
3933 | break; | |||
3934 | default: | |||
3935 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 3935, __FUNCTION__)); | |||
3936 | } | |||
3937 | ||||
3938 | emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)(((x)->u.fld[0]).rt_rtx)), | |||
3939 | read_complex_part (y, imag_first)); | |||
3940 | return emit_move_insn (gen_rtx_MEM (submode, XEXP (x, 0)(((x)->u.fld[0]).rt_rtx)), | |||
3941 | read_complex_part (y, !imag_first)); | |||
3942 | } | |||
3943 | ||||
3944 | /* A subroutine of emit_move_complex. Perform the move from Y to X | |||
3945 | via two moves of the parts. Returns the last instruction emitted. */ | |||
3946 | ||||
3947 | rtx_insn * | |||
3948 | emit_move_complex_parts (rtx x, rtx y) | |||
3949 | { | |||
3950 | /* Show the output dies here. This is necessary for SUBREGs | |||
3951 | of pseudos since we cannot track their lifetimes correctly; | |||
3952 | hard regs shouldn't appear here except as return values. */ | |||
3953 | if (!reload_completed && !reload_in_progress | |||
3954 | && REG_P (x)(((enum rtx_code) (x)->code) == REG) && !reg_overlap_mentioned_p (x, y)) | |||
3955 | emit_clobber (x); | |||
3956 | ||||
3957 | write_complex_part (x, read_complex_part (y, false), false, true); | |||
3958 | write_complex_part (x, read_complex_part (y, true), true, false); | |||
3959 | ||||
3960 | return get_last_insn (); | |||
3961 | } | |||
3962 | ||||
3963 | /* A subroutine of emit_move_insn_1. Generate a move from Y into X. | |||
3964 | MODE is known to be complex. Returns the last instruction emitted. */ | |||
3965 | ||||
3966 | static rtx_insn * | |||
3967 | emit_move_complex (machine_mode mode, rtx x, rtx y) | |||
3968 | { | |||
3969 | bool try_int; | |||
3970 | ||||
3971 | /* Need to take special care for pushes, to maintain proper ordering | |||
3972 | of the data, and possibly extra padding. */ | |||
3973 | if (push_operand (x, mode)) | |||
3974 | return emit_move_complex_push (mode, x, y); | |||
3975 | ||||
3976 | /* See if we can coerce the target into moving both values at once, except | |||
3977 | for floating point where we favor moving as parts if this is easy. */ | |||
3978 | if (GET_MODE_CLASS (mode)((enum mode_class) mode_class[mode]) == MODE_COMPLEX_FLOAT | |||
3979 | && optab_handler (mov_optab, GET_MODE_INNER (mode)(mode_to_inner (mode))) != CODE_FOR_nothing | |||
3980 | && !(REG_P (x)(((enum rtx_code) (x)->code) == REG) | |||
3981 | && HARD_REGISTER_P (x)((((rhs_regno(x))) < 76)) | |||
3982 | && REG_NREGS (x)((&(x)->u.reg)->nregs) == 1) | |||
3983 | && !(REG_P (y)(((enum rtx_code) (y)->code) == REG) | |||
3984 | && HARD_REGISTER_P (y)((((rhs_regno(y))) < 76)) | |||
3985 | && REG_NREGS (y)((&(y)->u.reg)->nregs) == 1)) | |||
3986 | try_int = false; | |||
3987 | /* Not possible if the values are inherently not adjacent. */ | |||
3988 | else if (GET_CODE (x)((enum rtx_code) (x)->code) == CONCAT || GET_CODE (y)((enum rtx_code) (y)->code) == CONCAT) | |||
3989 | try_int = false; | |||
3990 | /* Is possible if both are registers (or subregs of registers). */ | |||
3991 | else if (register_operand (x, mode) && register_operand (y, mode)) | |||
3992 | try_int = true; | |||
3993 | /* If one of the operands is a memory, and alignment constraints | |||
3994 | are friendly enough, we may be able to do combined memory operations. | |||
3995 | We do not attempt this if Y is a constant because that combination is | |||
3996 | usually better with the by-parts thing below. */ | |||
3997 | else if ((MEM_P (x)(((enum rtx_code) (x)->code) == MEM) ? !CONSTANT_P (y)((rtx_class[(int) (((enum rtx_code) (y)->code))]) == RTX_CONST_OBJ ) : MEM_P (y)(((enum rtx_code) (y)->code) == MEM)) | |||
3998 | && (!STRICT_ALIGNMENT0 | |||
3999 | || get_mode_alignment (mode) == BIGGEST_ALIGNMENT(((global_options.x_target_flags & (1U << 12)) != 0 ) ? 32 : (((global_options.x_ix86_isa_flags & (1UL << 15)) != 0) ? 512 : (((global_options.x_ix86_isa_flags & ( 1UL << 8)) != 0) ? 256 : 128))))) | |||
4000 | try_int = true; | |||
4001 | else | |||
4002 | try_int = false; | |||
4003 | ||||
4004 | if (try_int) | |||
4005 | { | |||
4006 | rtx_insn *ret; | |||
4007 | ||||
4008 | /* For memory to memory moves, optimal behavior can be had with the | |||
4009 | existing block move logic. But use normal expansion if optimizing | |||
4010 | for size. */ | |||
4011 | if (MEM_P (x)(((enum rtx_code) (x)->code) == MEM) && MEM_P (y)(((enum rtx_code) (y)->code) == MEM)) | |||
4012 | { | |||
4013 | emit_block_move (x, y, gen_int_mode (GET_MODE_SIZE (mode), Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode)))), | |||
4014 | (optimize_insn_for_speed_p() | |||
4015 | ? BLOCK_OP_NO_LIBCALL : BLOCK_OP_NORMAL)); | |||
4016 | return get_last_insn (); | |||
4017 | } | |||
4018 | ||||
4019 | ret = emit_move_via_integer (mode, x, y, true); | |||
4020 | if (ret) | |||
4021 | return ret; | |||
4022 | } | |||
4023 | ||||
4024 | return emit_move_complex_parts (x, y); | |||
4025 | } | |||
4026 | ||||
4027 | /* A subroutine of emit_move_insn_1. Generate a move from Y into X. | |||
4028 | MODE is known to be MODE_CC. Returns the last instruction emitted. */ | |||
4029 | ||||
4030 | static rtx_insn * | |||
4031 | emit_move_ccmode (machine_mode mode, rtx x, rtx y) | |||
4032 | { | |||
4033 | rtx_insn *ret; | |||
4034 | ||||
4035 | /* Assume all MODE_CC modes are equivalent; if we have movcc, use it. */ | |||
4036 | if (mode != CCmode((void) 0, E_CCmode)) | |||
4037 | { | |||
4038 | enum insn_code code = optab_handler (mov_optab, CCmode((void) 0, E_CCmode)); | |||
4039 | if (code != CODE_FOR_nothing) | |||
4040 | { | |||
4041 | x = emit_move_change_mode (CCmode((void) 0, E_CCmode), mode, x, true); | |||
4042 | y = emit_move_change_mode (CCmode((void) 0, E_CCmode), mode, y, true); | |||
4043 | return emit_insn (GEN_FCN (code)(insn_data[code].genfun) (x, y)); | |||
4044 | } | |||
4045 | } | |||
4046 | ||||
4047 | /* Otherwise, find the MODE_INT mode of the same width. */ | |||
4048 | ret = emit_move_via_integer (mode, x, y, false); | |||
4049 | gcc_assert (ret != NULL)((void)(!(ret != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4049, __FUNCTION__), 0 : 0)); | |||
4050 | return ret; | |||
4051 | } | |||
4052 | ||||
4053 | /* Return true if word I of OP lies entirely in the | |||
4054 | undefined bits of a paradoxical subreg. */ | |||
4055 | ||||
4056 | static bool | |||
4057 | undefined_operand_subword_p (const_rtx op, int i) | |||
4058 | { | |||
4059 | if (GET_CODE (op)((enum rtx_code) (op)->code) != SUBREG) | |||
4060 | return false; | |||
4061 | machine_mode innermostmode = GET_MODE (SUBREG_REG (op))((machine_mode) ((((op)->u.fld[0]).rt_rtx))->mode); | |||
4062 | poly_int64 offset = i * UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4) + subreg_memory_offset (op); | |||
4063 | return (known_ge (offset, GET_MODE_SIZE (innermostmode))(!maybe_lt (offset, GET_MODE_SIZE (innermostmode))) | |||
4064 | || known_le (offset, -UNITS_PER_WORD)(!maybe_lt (-(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4), offset))); | |||
4065 | } | |||
4066 | ||||
4067 | /* A subroutine of emit_move_insn_1. Generate a move from Y into X. | |||
4068 | MODE is any multi-word or full-word mode that lacks a move_insn | |||
4069 | pattern. Note that you will get better code if you define such | |||
4070 | patterns, even if they must turn into multiple assembler instructions. */ | |||
4071 | ||||
4072 | static rtx_insn * | |||
4073 | emit_move_multi_word (machine_mode mode, rtx x, rtx y) | |||
4074 | { | |||
4075 | rtx_insn *last_insn = 0; | |||
4076 | rtx_insn *seq; | |||
4077 | rtx inner; | |||
4078 | bool need_clobber; | |||
4079 | int i, mode_size; | |||
4080 | ||||
4081 | /* This function can only handle cases where the number of words is | |||
4082 | known at compile time. */ | |||
4083 | mode_size = GET_MODE_SIZE (mode).to_constant (); | |||
4084 | gcc_assert (mode_size >= UNITS_PER_WORD)((void)(!(mode_size >= (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4084, __FUNCTION__), 0 : 0)); | |||
4085 | ||||
4086 | /* If X is a push on the stack, do the push now and replace | |||
4087 | X with a reference to the stack pointer. */ | |||
4088 | if (push_operand (x, mode)) | |||
4089 | x = emit_move_resolve_push (mode, x); | |||
4090 | ||||
4091 | /* If we are in reload, see if either operand is a MEM whose address | |||
4092 | is scheduled for replacement. */ | |||
4093 | if (reload_in_progress && MEM_P (x)(((enum rtx_code) (x)->code) == MEM) | |||
4094 | && (inner = find_replacement (&XEXP (x, 0)(((x)->u.fld[0]).rt_rtx))) != XEXP (x, 0)(((x)->u.fld[0]).rt_rtx)) | |||
4095 | x = replace_equiv_address_nv (x, inner); | |||
4096 | if (reload_in_progress && MEM_P (y)(((enum rtx_code) (y)->code) == MEM) | |||
4097 | && (inner = find_replacement (&XEXP (y, 0)(((y)->u.fld[0]).rt_rtx))) != XEXP (y, 0)(((y)->u.fld[0]).rt_rtx)) | |||
4098 | y = replace_equiv_address_nv (y, inner); | |||
4099 | ||||
4100 | start_sequence (); | |||
4101 | ||||
4102 | need_clobber = false; | |||
4103 | for (i = 0; i < CEIL (mode_size, UNITS_PER_WORD)(((mode_size) + ((((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) - 1) / ((((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))); i++) | |||
4104 | { | |||
4105 | /* Do not generate code for a move if it would go entirely | |||
4106 | to the non-existing bits of a paradoxical subreg. */ | |||
4107 | if (undefined_operand_subword_p (x, i)) | |||
4108 | continue; | |||
4109 | ||||
4110 | rtx xpart = operand_subword (x, i, 1, mode); | |||
4111 | rtx ypart; | |||
4112 | ||||
4113 | /* Do not generate code for a move if it would come entirely | |||
4114 | from the undefined bits of a paradoxical subreg. */ | |||
4115 | if (undefined_operand_subword_p (y, i)) | |||
4116 | continue; | |||
4117 | ||||
4118 | ypart = operand_subword (y, i, 1, mode); | |||
4119 | ||||
4120 | /* If we can't get a part of Y, put Y into memory if it is a | |||
4121 | constant. Otherwise, force it into a register. Then we must | |||
4122 | be able to get a part of Y. */ | |||
4123 | if (ypart == 0 && CONSTANT_P (y)((rtx_class[(int) (((enum rtx_code) (y)->code))]) == RTX_CONST_OBJ )) | |||
4124 | { | |||
4125 | y = use_anchored_address (force_const_mem (mode, y)); | |||
4126 | ypart = operand_subword (y, i, 1, mode); | |||
4127 | } | |||
4128 | else if (ypart == 0) | |||
4129 | ypart = operand_subword_force (y, i, mode); | |||
4130 | ||||
4131 | gcc_assert (xpart && ypart)((void)(!(xpart && ypart) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4131, __FUNCTION__), 0 : 0)); | |||
4132 | ||||
4133 | need_clobber |= (GET_CODE (xpart)((enum rtx_code) (xpart)->code) == SUBREG); | |||
4134 | ||||
4135 | last_insn = emit_move_insn (xpart, ypart); | |||
4136 | } | |||
4137 | ||||
4138 | seq = get_insns (); | |||
4139 | end_sequence (); | |||
4140 | ||||
4141 | /* Show the output dies here. This is necessary for SUBREGs | |||
4142 | of pseudos since we cannot track their lifetimes correctly; | |||
4143 | hard regs shouldn't appear here except as return values. | |||
4144 | We never want to emit such a clobber after reload. */ | |||
4145 | if (x != y | |||
4146 | && ! (reload_in_progress || reload_completed) | |||
4147 | && need_clobber != 0) | |||
4148 | emit_clobber (x); | |||
4149 | ||||
4150 | emit_insn (seq); | |||
4151 | ||||
4152 | return last_insn; | |||
4153 | } | |||
4154 | ||||
4155 | /* Low level part of emit_move_insn. | |||
4156 | Called just like emit_move_insn, but assumes X and Y | |||
4157 | are basically valid. */ | |||
4158 | ||||
4159 | rtx_insn * | |||
4160 | emit_move_insn_1 (rtx x, rtx y) | |||
4161 | { | |||
4162 | machine_mode mode = GET_MODE (x)((machine_mode) (x)->mode); | |||
4163 | enum insn_code code; | |||
4164 | ||||
4165 | gcc_assert ((unsigned int) mode < (unsigned int) MAX_MACHINE_MODE)((void)(!((unsigned int) mode < (unsigned int) MAX_MACHINE_MODE ) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4165, __FUNCTION__), 0 : 0)); | |||
4166 | ||||
4167 | code = optab_handler (mov_optab, mode); | |||
4168 | if (code != CODE_FOR_nothing) | |||
4169 | return emit_insn (GEN_FCN (code)(insn_data[code].genfun) (x, y)); | |||
4170 | ||||
4171 | /* Expand complex moves by moving real part and imag part. */ | |||
4172 | if (COMPLEX_MODE_P (mode)(((enum mode_class) mode_class[mode]) == MODE_COMPLEX_INT || ( (enum mode_class) mode_class[mode]) == MODE_COMPLEX_FLOAT)) | |||
4173 | return emit_move_complex (mode, x, y); | |||
4174 | ||||
4175 | if (GET_MODE_CLASS (mode)((enum mode_class) mode_class[mode]) == MODE_DECIMAL_FLOAT | |||
4176 | || ALL_FIXED_POINT_MODE_P (mode)(((((enum mode_class) mode_class[mode]) == MODE_FRACT || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_FRACT) || (((enum mode_class) mode_class[mode]) == MODE_ACCUM || ((enum mode_class ) mode_class[mode]) == MODE_VECTOR_ACCUM)) || ((((enum mode_class ) mode_class[mode]) == MODE_UFRACT || ((enum mode_class) mode_class [mode]) == MODE_VECTOR_UFRACT) || (((enum mode_class) mode_class [mode]) == MODE_UACCUM || ((enum mode_class) mode_class[mode] ) == MODE_VECTOR_UACCUM)))) | |||
4177 | { | |||
4178 | rtx_insn *result = emit_move_via_integer (mode, x, y, true); | |||
4179 | ||||
4180 | /* If we can't find an integer mode, use multi words. */ | |||
4181 | if (result) | |||
4182 | return result; | |||
4183 | else | |||
4184 | return emit_move_multi_word (mode, x, y); | |||
4185 | } | |||
4186 | ||||
4187 | if (GET_MODE_CLASS (mode)((enum mode_class) mode_class[mode]) == MODE_CC) | |||
4188 | return emit_move_ccmode (mode, x, y); | |||
4189 | ||||
4190 | /* Try using a move pattern for the corresponding integer mode. This is | |||
4191 | only safe when simplify_subreg can convert MODE constants into integer | |||
4192 | constants. At present, it can only do this reliably if the value | |||
4193 | fits within a HOST_WIDE_INT. */ | |||
4194 | if (!CONSTANT_P (y)((rtx_class[(int) (((enum rtx_code) (y)->code))]) == RTX_CONST_OBJ ) | |||
4195 | || known_le (GET_MODE_BITSIZE (mode), HOST_BITS_PER_WIDE_INT)(!maybe_lt (64, GET_MODE_BITSIZE (mode)))) | |||
4196 | { | |||
4197 | rtx_insn *ret = emit_move_via_integer (mode, x, y, lra_in_progress); | |||
4198 | ||||
4199 | if (ret) | |||
4200 | { | |||
4201 | if (! lra_in_progress || recog (PATTERN (ret), ret, 0) >= 0) | |||
4202 | return ret; | |||
4203 | } | |||
4204 | } | |||
4205 | ||||
4206 | return emit_move_multi_word (mode, x, y); | |||
4207 | } | |||
4208 | ||||
4209 | /* Generate code to copy Y into X. | |||
4210 | Both Y and X must have the same mode, except that | |||
4211 | Y can be a constant with VOIDmode. | |||
4212 | This mode cannot be BLKmode; use emit_block_move for that. | |||
4213 | ||||
4214 | Return the last instruction emitted. */ | |||
4215 | ||||
4216 | rtx_insn * | |||
4217 | emit_move_insn (rtx x, rtx y) | |||
4218 | { | |||
4219 | machine_mode mode = GET_MODE (x)((machine_mode) (x)->mode); | |||
4220 | rtx y_cst = NULL_RTX(rtx) 0; | |||
4221 | rtx_insn *last_insn; | |||
4222 | rtx set; | |||
4223 | ||||
4224 | gcc_assert (mode != BLKmode((void)(!(mode != ((void) 0, E_BLKmode) && (((machine_mode ) (y)->mode) == mode || ((machine_mode) (y)->mode) == ( (void) 0, E_VOIDmode))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4225, __FUNCTION__), 0 : 0)) | |||
4225 | && (GET_MODE (y) == mode || GET_MODE (y) == VOIDmode))((void)(!(mode != ((void) 0, E_BLKmode) && (((machine_mode ) (y)->mode) == mode || ((machine_mode) (y)->mode) == ( (void) 0, E_VOIDmode))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4225, __FUNCTION__), 0 : 0)); | |||
4226 | ||||
4227 | /* If we have a copy that looks like one of the following patterns: | |||
4228 | (set (subreg:M1 (reg:M2 ...)) (subreg:M1 (reg:M2 ...))) | |||
4229 | (set (subreg:M1 (reg:M2 ...)) (mem:M1 ADDR)) | |||
4230 | (set (mem:M1 ADDR) (subreg:M1 (reg:M2 ...))) | |||
4231 | (set (subreg:M1 (reg:M2 ...)) (constant C)) | |||
4232 | where mode M1 is equal in size to M2, try to detect whether the | |||
4233 | mode change involves an implicit round trip through memory. | |||
4234 | If so, see if we can avoid that by removing the subregs and | |||
4235 | doing the move in mode M2 instead. */ | |||
4236 | ||||
4237 | rtx x_inner = NULL_RTX(rtx) 0; | |||
4238 | rtx y_inner = NULL_RTX(rtx) 0; | |||
4239 | ||||
4240 | auto candidate_subreg_p = [&](rtx subreg) { | |||
4241 | return (REG_P (SUBREG_REG (subreg))(((enum rtx_code) ((((subreg)->u.fld[0]).rt_rtx))->code ) == REG) | |||
4242 | && known_eq (GET_MODE_SIZE (GET_MODE (SUBREG_REG (subreg))),(!maybe_ne (GET_MODE_SIZE (((machine_mode) ((((subreg)->u. fld[0]).rt_rtx))->mode)), GET_MODE_SIZE (((machine_mode) ( subreg)->mode)))) | |||
4243 | GET_MODE_SIZE (GET_MODE (subreg)))(!maybe_ne (GET_MODE_SIZE (((machine_mode) ((((subreg)->u. fld[0]).rt_rtx))->mode)), GET_MODE_SIZE (((machine_mode) ( subreg)->mode)))) | |||
4244 | && optab_handler (mov_optab, GET_MODE (SUBREG_REG (subreg))((machine_mode) ((((subreg)->u.fld[0]).rt_rtx))->mode)) | |||
4245 | != CODE_FOR_nothing); | |||
4246 | }; | |||
4247 | ||||
4248 | auto candidate_mem_p = [&](machine_mode innermode, rtx mem) { | |||
4249 | return (!targetm.can_change_mode_class (innermode, GET_MODE (mem)((machine_mode) (mem)->mode), ALL_REGS) | |||
4250 | && !push_operand (mem, GET_MODE (mem)((machine_mode) (mem)->mode)) | |||
4251 | /* Not a candiate if innermode requires too much alignment. */ | |||
4252 | && (MEM_ALIGN (mem)(get_mem_attrs (mem)->align) >= GET_MODE_ALIGNMENT (innermode)get_mode_alignment (innermode) | |||
4253 | || targetm.slow_unaligned_access (GET_MODE (mem)((machine_mode) (mem)->mode), | |||
4254 | MEM_ALIGN (mem)(get_mem_attrs (mem)->align)) | |||
4255 | || !targetm.slow_unaligned_access (innermode, | |||
4256 | MEM_ALIGN (mem)(get_mem_attrs (mem)->align)))); | |||
4257 | }; | |||
4258 | ||||
4259 | if (SUBREG_P (x)(((enum rtx_code) (x)->code) == SUBREG) && candidate_subreg_p (x)) | |||
4260 | x_inner = SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx); | |||
4261 | ||||
4262 | if (SUBREG_P (y)(((enum rtx_code) (y)->code) == SUBREG) && candidate_subreg_p (y)) | |||
4263 | y_inner = SUBREG_REG (y)(((y)->u.fld[0]).rt_rtx); | |||
4264 | ||||
4265 | if (x_inner != NULL_RTX(rtx) 0 | |||
4266 | && y_inner != NULL_RTX(rtx) 0 | |||
4267 | && GET_MODE (x_inner)((machine_mode) (x_inner)->mode) == GET_MODE (y_inner)((machine_mode) (y_inner)->mode) | |||
4268 | && !targetm.can_change_mode_class (GET_MODE (x_inner)((machine_mode) (x_inner)->mode), mode, ALL_REGS)) | |||
4269 | { | |||
4270 | x = x_inner; | |||
4271 | y = y_inner; | |||
4272 | mode = GET_MODE (x_inner)((machine_mode) (x_inner)->mode); | |||
4273 | } | |||
4274 | else if (x_inner != NULL_RTX(rtx) 0 | |||
4275 | && MEM_P (y)(((enum rtx_code) (y)->code) == MEM) | |||
4276 | && candidate_mem_p (GET_MODE (x_inner)((machine_mode) (x_inner)->mode), y)) | |||
4277 | { | |||
4278 | x = x_inner; | |||
4279 | y = adjust_address (y, GET_MODE (x_inner), 0)adjust_address_1 (y, ((machine_mode) (x_inner)->mode), 0, 1 , 1, 0, 0); | |||
4280 | mode = GET_MODE (x_inner)((machine_mode) (x_inner)->mode); | |||
4281 | } | |||
4282 | else if (y_inner != NULL_RTX(rtx) 0 | |||
4283 | && MEM_P (x)(((enum rtx_code) (x)->code) == MEM) | |||
4284 | && candidate_mem_p (GET_MODE (y_inner)((machine_mode) (y_inner)->mode), x)) | |||
4285 | { | |||
4286 | x = adjust_address (x, GET_MODE (y_inner), 0)adjust_address_1 (x, ((machine_mode) (y_inner)->mode), 0, 1 , 1, 0, 0); | |||
4287 | y = y_inner; | |||
4288 | mode = GET_MODE (y_inner)((machine_mode) (y_inner)->mode); | |||
4289 | } | |||
4290 | else if (x_inner != NULL_RTX(rtx) 0 | |||
4291 | && CONSTANT_P (y)((rtx_class[(int) (((enum rtx_code) (y)->code))]) == RTX_CONST_OBJ ) | |||
4292 | && !targetm.can_change_mode_class (GET_MODE (x_inner)((machine_mode) (x_inner)->mode), | |||
4293 | mode, ALL_REGS) | |||
4294 | && (y_inner = simplify_subreg (GET_MODE (x_inner)((machine_mode) (x_inner)->mode), y, mode, 0))) | |||
4295 | { | |||
4296 | x = x_inner; | |||
4297 | y = y_inner; | |||
4298 | mode = GET_MODE (x_inner)((machine_mode) (x_inner)->mode); | |||
4299 | } | |||
4300 | ||||
4301 | if (CONSTANT_P (y)((rtx_class[(int) (((enum rtx_code) (y)->code))]) == RTX_CONST_OBJ )) | |||
4302 | { | |||
4303 | if (optimizeglobal_options.x_optimize | |||
4304 | && SCALAR_FLOAT_MODE_P (GET_MODE (x))(((enum mode_class) mode_class[((machine_mode) (x)->mode)] ) == MODE_FLOAT || ((enum mode_class) mode_class[((machine_mode ) (x)->mode)]) == MODE_DECIMAL_FLOAT) | |||
4305 | && (last_insn = compress_float_constant (x, y))) | |||
4306 | return last_insn; | |||
4307 | ||||
4308 | y_cst = y; | |||
4309 | ||||
4310 | if (!targetm.legitimate_constant_p (mode, y)) | |||
4311 | { | |||
4312 | y = force_const_mem (mode, y); | |||
4313 | ||||
4314 | /* If the target's cannot_force_const_mem prevented the spill, | |||
4315 | assume that the target's move expanders will also take care | |||
4316 | of the non-legitimate constant. */ | |||
4317 | if (!y) | |||
4318 | y = y_cst; | |||
4319 | else | |||
4320 | y = use_anchored_address (y); | |||
4321 | } | |||
4322 | } | |||
4323 | ||||
4324 | /* If X or Y are memory references, verify that their addresses are valid | |||
4325 | for the machine. */ | |||
4326 | if (MEM_P (x)(((enum rtx_code) (x)->code) == MEM) | |||
4327 | && (! memory_address_addr_space_p (GET_MODE (x)((machine_mode) (x)->mode), XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), | |||
4328 | MEM_ADDR_SPACE (x)(get_mem_attrs (x)->addrspace)) | |||
4329 | && ! push_operand (x, GET_MODE (x)((machine_mode) (x)->mode)))) | |||
4330 | x = validize_mem (x); | |||
4331 | ||||
4332 | if (MEM_P (y)(((enum rtx_code) (y)->code) == MEM) | |||
4333 | && ! memory_address_addr_space_p (GET_MODE (y)((machine_mode) (y)->mode), XEXP (y, 0)(((y)->u.fld[0]).rt_rtx), | |||
4334 | MEM_ADDR_SPACE (y)(get_mem_attrs (y)->addrspace))) | |||
4335 | y = validize_mem (y); | |||
4336 | ||||
4337 | gcc_assert (mode != BLKmode)((void)(!(mode != ((void) 0, E_BLKmode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4337, __FUNCTION__), 0 : 0)); | |||
4338 | ||||
4339 | last_insn = emit_move_insn_1 (x, y); | |||
4340 | ||||
4341 | if (y_cst && REG_P (x)(((enum rtx_code) (x)->code) == REG) | |||
4342 | && (set = single_set (last_insn)) != NULL_RTX(rtx) 0 | |||
4343 | && SET_DEST (set)(((set)->u.fld[0]).rt_rtx) == x | |||
4344 | && ! rtx_equal_p (y_cst, SET_SRC (set)(((set)->u.fld[1]).rt_rtx))) | |||
4345 | set_unique_reg_note (last_insn, REG_EQUAL, copy_rtx (y_cst)); | |||
4346 | ||||
4347 | return last_insn; | |||
4348 | } | |||
4349 | ||||
4350 | /* Generate the body of an instruction to copy Y into X. | |||
4351 | It may be a list of insns, if one insn isn't enough. */ | |||
4352 | ||||
4353 | rtx_insn * | |||
4354 | gen_move_insn (rtx x, rtx y) | |||
4355 | { | |||
4356 | rtx_insn *seq; | |||
4357 | ||||
4358 | start_sequence (); | |||
4359 | emit_move_insn_1 (x, y); | |||
4360 | seq = get_insns (); | |||
4361 | end_sequence (); | |||
4362 | return seq; | |||
4363 | } | |||
4364 | ||||
4365 | /* If Y is representable exactly in a narrower mode, and the target can | |||
4366 | perform the extension directly from constant or memory, then emit the | |||
4367 | move as an extension. */ | |||
4368 | ||||
4369 | static rtx_insn * | |||
4370 | compress_float_constant (rtx x, rtx y) | |||
4371 | { | |||
4372 | machine_mode dstmode = GET_MODE (x)((machine_mode) (x)->mode); | |||
4373 | machine_mode orig_srcmode = GET_MODE (y)((machine_mode) (y)->mode); | |||
4374 | machine_mode srcmode; | |||
4375 | const REAL_VALUE_TYPEstruct real_value *r; | |||
4376 | int oldcost, newcost; | |||
4377 | bool speed = optimize_insn_for_speed_p (); | |||
4378 | ||||
4379 | r = CONST_DOUBLE_REAL_VALUE (y)((const struct real_value *) (&(y)->u.rv)); | |||
4380 | ||||
4381 | if (targetm.legitimate_constant_p (dstmode, y)) | |||
4382 | oldcost = set_src_cost (y, orig_srcmode, speed); | |||
4383 | else | |||
4384 | oldcost = set_src_cost (force_const_mem (dstmode, y), dstmode, speed); | |||
4385 | ||||
4386 | FOR_EACH_MODE_UNTIL (srcmode, orig_srcmode)for ((srcmode) = (get_narrowest_mode (orig_srcmode)); (srcmode ) != (orig_srcmode); mode_iterator::get_known_next (&(srcmode ))) | |||
4387 | { | |||
4388 | enum insn_code ic; | |||
4389 | rtx trunc_y; | |||
4390 | rtx_insn *last_insn; | |||
4391 | ||||
4392 | /* Skip if the target can't extend this way. */ | |||
4393 | ic = can_extend_p (dstmode, srcmode, 0); | |||
4394 | if (ic == CODE_FOR_nothing) | |||
4395 | continue; | |||
4396 | ||||
4397 | /* Skip if the narrowed value isn't exact. */ | |||
4398 | if (! exact_real_truncate (srcmode, r)) | |||
4399 | continue; | |||
4400 | ||||
4401 | trunc_y = const_double_from_real_value (*r, srcmode); | |||
4402 | ||||
4403 | if (targetm.legitimate_constant_p (srcmode, trunc_y)) | |||
4404 | { | |||
4405 | /* Skip if the target needs extra instructions to perform | |||
4406 | the extension. */ | |||
4407 | if (!insn_operand_matches (ic, 1, trunc_y)) | |||
4408 | continue; | |||
4409 | /* This is valid, but may not be cheaper than the original. */ | |||
4410 | newcost = set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y)gen_rtx_fmt_e_stat ((FLOAT_EXTEND), ((dstmode)), ((trunc_y)) ), | |||
4411 | dstmode, speed); | |||
4412 | if (oldcost < newcost) | |||
4413 | continue; | |||
4414 | } | |||
4415 | else if (float_extend_from_mem(this_target_regs->x_float_extend_from_mem)[dstmode][srcmode]) | |||
4416 | { | |||
4417 | trunc_y = force_const_mem (srcmode, trunc_y); | |||
4418 | /* This is valid, but may not be cheaper than the original. */ | |||
4419 | newcost = set_src_cost (gen_rtx_FLOAT_EXTEND (dstmode, trunc_y)gen_rtx_fmt_e_stat ((FLOAT_EXTEND), ((dstmode)), ((trunc_y)) ), | |||
4420 | dstmode, speed); | |||
4421 | if (oldcost < newcost) | |||
4422 | continue; | |||
4423 | trunc_y = validize_mem (trunc_y); | |||
4424 | } | |||
4425 | else | |||
4426 | continue; | |||
4427 | ||||
4428 | /* For CSE's benefit, force the compressed constant pool entry | |||
4429 | into a new pseudo. This constant may be used in different modes, | |||
4430 | and if not, combine will put things back together for us. */ | |||
4431 | trunc_y = force_reg (srcmode, trunc_y); | |||
4432 | ||||
4433 | /* If x is a hard register, perform the extension into a pseudo, | |||
4434 | so that e.g. stack realignment code is aware of it. */ | |||
4435 | rtx target = x; | |||
4436 | if (REG_P (x)(((enum rtx_code) (x)->code) == REG) && HARD_REGISTER_P (x)((((rhs_regno(x))) < 76))) | |||
4437 | target = gen_reg_rtx (dstmode); | |||
4438 | ||||
4439 | emit_unop_insn (ic, target, trunc_y, UNKNOWN); | |||
4440 | last_insn = get_last_insn (); | |||
4441 | ||||
4442 | if (REG_P (target)(((enum rtx_code) (target)->code) == REG)) | |||
4443 | set_unique_reg_note (last_insn, REG_EQUAL, y); | |||
4444 | ||||
4445 | if (target != x) | |||
4446 | return emit_move_insn (x, target); | |||
4447 | return last_insn; | |||
4448 | } | |||
4449 | ||||
4450 | return NULLnullptr; | |||
4451 | } | |||
4452 | ||||
4453 | /* Pushing data onto the stack. */ | |||
4454 | ||||
4455 | /* Push a block of length SIZE (perhaps variable) | |||
4456 | and return an rtx to address the beginning of the block. | |||
4457 | The value may be virtual_outgoing_args_rtx. | |||
4458 | ||||
4459 | EXTRA is the number of bytes of padding to push in addition to SIZE. | |||
4460 | BELOW nonzero means this padding comes at low addresses; | |||
4461 | otherwise, the padding comes at high addresses. */ | |||
4462 | ||||
4463 | rtx | |||
4464 | push_block (rtx size, poly_int64 extra, int below) | |||
4465 | { | |||
4466 | rtx temp; | |||
4467 | ||||
4468 | size = convert_modes (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), ptr_mode, size, 1); | |||
4469 | if (CONSTANT_P (size)((rtx_class[(int) (((enum rtx_code) (size)->code))]) == RTX_CONST_OBJ )) | |||
4470 | anti_adjust_stack (plus_constant (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), size, extra)); | |||
4471 | else if (REG_P (size)(((enum rtx_code) (size)->code) == REG) && known_eq (extra, 0)(!maybe_ne (extra, 0))) | |||
4472 | anti_adjust_stack (size); | |||
4473 | else | |||
4474 | { | |||
4475 | temp = copy_to_mode_reg (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), size); | |||
4476 | if (maybe_ne (extra, 0)) | |||
4477 | temp = expand_binop (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), add_optab, temp, | |||
4478 | gen_int_mode (extra, Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode)))), | |||
4479 | temp, 0, OPTAB_LIB_WIDEN); | |||
4480 | anti_adjust_stack (temp); | |||
4481 | } | |||
4482 | ||||
4483 | if (STACK_GROWS_DOWNWARD1) | |||
4484 | { | |||
4485 | temp = virtual_outgoing_args_rtx((this_target_rtl->x_global_rtl)[GR_VIRTUAL_OUTGOING_ARGS] ); | |||
4486 | if (maybe_ne (extra, 0) && below) | |||
4487 | temp = plus_constant (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), temp, extra); | |||
4488 | } | |||
4489 | else | |||
4490 | { | |||
4491 | poly_int64 csize; | |||
4492 | if (poly_int_rtx_p (size, &csize)) | |||
4493 | temp = plus_constant (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), virtual_outgoing_args_rtx((this_target_rtl->x_global_rtl)[GR_VIRTUAL_OUTGOING_ARGS] ), | |||
4494 | -csize - (below ? 0 : extra)); | |||
4495 | else if (maybe_ne (extra, 0) && !below) | |||
4496 | temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,gen_rtx_fmt_ee_stat ((PLUS), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((((this_target_rtl->x_global_rtl)[GR_VIRTUAL_OUTGOING_ARGS ]))), ((negate_rtx ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : ( scalar_int_mode ((scalar_int_mode::from_int) E_SImode))), plus_constant ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))), size, extra)))) ) | |||
4497 | negate_rtx (Pmode, plus_constant (Pmode, size,gen_rtx_fmt_ee_stat ((PLUS), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((((this_target_rtl->x_global_rtl)[GR_VIRTUAL_OUTGOING_ARGS ]))), ((negate_rtx ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : ( scalar_int_mode ((scalar_int_mode::from_int) E_SImode))), plus_constant ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))), size, extra)))) ) | |||
4498 | extra)))gen_rtx_fmt_ee_stat ((PLUS), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((((this_target_rtl->x_global_rtl)[GR_VIRTUAL_OUTGOING_ARGS ]))), ((negate_rtx ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : ( scalar_int_mode ((scalar_int_mode::from_int) E_SImode))), plus_constant ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))), size, extra)))) ); | |||
4499 | else | |||
4500 | temp = gen_rtx_PLUS (Pmode, virtual_outgoing_args_rtx,gen_rtx_fmt_ee_stat ((PLUS), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((((this_target_rtl->x_global_rtl)[GR_VIRTUAL_OUTGOING_ARGS ]))), ((negate_rtx ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : ( scalar_int_mode ((scalar_int_mode::from_int) E_SImode))), size ))) ) | |||
4501 | negate_rtx (Pmode, size))gen_rtx_fmt_ee_stat ((PLUS), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((((this_target_rtl->x_global_rtl)[GR_VIRTUAL_OUTGOING_ARGS ]))), ((negate_rtx ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : ( scalar_int_mode ((scalar_int_mode::from_int) E_SImode))), size ))) ); | |||
4502 | } | |||
4503 | ||||
4504 | return memory_address (NARROWEST_INT_MODE, temp)memory_address_addr_space (((scalar_int_mode (scalar_int_mode ::from_int (class_narrowest_mode[MODE_INT])))), (temp), 0); | |||
4505 | } | |||
4506 | ||||
4507 | /* A utility routine that returns the base of an auto-inc memory, or NULL. */ | |||
4508 | ||||
4509 | static rtx | |||
4510 | mem_autoinc_base (rtx mem) | |||
4511 | { | |||
4512 | if (MEM_P (mem)(((enum rtx_code) (mem)->code) == MEM)) | |||
4513 | { | |||
4514 | rtx addr = XEXP (mem, 0)(((mem)->u.fld[0]).rt_rtx); | |||
4515 | if (GET_RTX_CLASS (GET_CODE (addr))(rtx_class[(int) (((enum rtx_code) (addr)->code))]) == RTX_AUTOINC) | |||
4516 | return XEXP (addr, 0)(((addr)->u.fld[0]).rt_rtx); | |||
4517 | } | |||
4518 | return NULLnullptr; | |||
4519 | } | |||
4520 | ||||
4521 | /* A utility routine used here, in reload, and in try_split. The insns | |||
4522 | after PREV up to and including LAST are known to adjust the stack, | |||
4523 | with a final value of END_ARGS_SIZE. Iterate backward from LAST | |||
4524 | placing notes as appropriate. PREV may be NULL, indicating the | |||
4525 | entire insn sequence prior to LAST should be scanned. | |||
4526 | ||||
4527 | The set of allowed stack pointer modifications is small: | |||
4528 | (1) One or more auto-inc style memory references (aka pushes), | |||
4529 | (2) One or more addition/subtraction with the SP as destination, | |||
4530 | (3) A single move insn with the SP as destination, | |||
4531 | (4) A call_pop insn, | |||
4532 | (5) Noreturn call insns if !ACCUMULATE_OUTGOING_ARGS. | |||
4533 | ||||
4534 | Insns in the sequence that do not modify the SP are ignored, | |||
4535 | except for noreturn calls. | |||
4536 | ||||
4537 | The return value is the amount of adjustment that can be trivially | |||
4538 | verified, via immediate operand or auto-inc. If the adjustment | |||
4539 | cannot be trivially extracted, the return value is HOST_WIDE_INT_MIN. */ | |||
4540 | ||||
4541 | poly_int64 | |||
4542 | find_args_size_adjust (rtx_insn *insn) | |||
4543 | { | |||
4544 | rtx dest, set, pat; | |||
4545 | int i; | |||
4546 | ||||
4547 | pat = PATTERN (insn); | |||
4548 | set = NULLnullptr; | |||
4549 | ||||
4550 | /* Look for a call_pop pattern. */ | |||
4551 | if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)) | |||
4552 | { | |||
4553 | /* We have to allow non-call_pop patterns for the case | |||
4554 | of emit_single_push_insn of a TLS address. */ | |||
4555 | if (GET_CODE (pat)((enum rtx_code) (pat)->code) != PARALLEL) | |||
4556 | return 0; | |||
4557 | ||||
4558 | /* All call_pop have a stack pointer adjust in the parallel. | |||
4559 | The call itself is always first, and the stack adjust is | |||
4560 | usually last, so search from the end. */ | |||
4561 | for (i = XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem) - 1; i > 0; --i) | |||
4562 | { | |||
4563 | set = XVECEXP (pat, 0, i)(((((pat)->u.fld[0]).rt_rtvec))->elem[i]); | |||
4564 | if (GET_CODE (set)((enum rtx_code) (set)->code) != SET) | |||
4565 | continue; | |||
4566 | dest = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); | |||
4567 | if (dest == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER])) | |||
4568 | break; | |||
4569 | } | |||
4570 | /* We'd better have found the stack pointer adjust. */ | |||
4571 | if (i == 0) | |||
4572 | return 0; | |||
4573 | /* Fall through to process the extracted SET and DEST | |||
4574 | as if it was a standalone insn. */ | |||
4575 | } | |||
4576 | else if (GET_CODE (pat)((enum rtx_code) (pat)->code) == SET) | |||
4577 | set = pat; | |||
4578 | else if ((set = single_set (insn)) != NULLnullptr) | |||
4579 | ; | |||
4580 | else if (GET_CODE (pat)((enum rtx_code) (pat)->code) == PARALLEL) | |||
4581 | { | |||
4582 | /* ??? Some older ports use a parallel with a stack adjust | |||
4583 | and a store for a PUSH_ROUNDING pattern, rather than a | |||
4584 | PRE/POST_MODIFY rtx. Don't force them to update yet... */ | |||
4585 | /* ??? See h8300 and m68k, pushqi1. */ | |||
4586 | for (i = XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; --i) | |||
4587 | { | |||
4588 | set = XVECEXP (pat, 0, i)(((((pat)->u.fld[0]).rt_rtvec))->elem[i]); | |||
4589 | if (GET_CODE (set)((enum rtx_code) (set)->code) != SET) | |||
4590 | continue; | |||
4591 | dest = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); | |||
4592 | if (dest == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER])) | |||
4593 | break; | |||
4594 | ||||
4595 | /* We do not expect an auto-inc of the sp in the parallel. */ | |||
4596 | gcc_checking_assert (mem_autoinc_base (dest) != stack_pointer_rtx)((void)(!(mem_autoinc_base (dest) != ((this_target_rtl->x_global_rtl )[GR_STACK_POINTER])) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4596, __FUNCTION__), 0 : 0)); | |||
4597 | gcc_checking_assert (mem_autoinc_base (SET_SRC (set))((void)(!(mem_autoinc_base ((((set)->u.fld[1]).rt_rtx)) != ((this_target_rtl->x_global_rtl)[GR_STACK_POINTER])) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4598, __FUNCTION__), 0 : 0)) | |||
4598 | != stack_pointer_rtx)((void)(!(mem_autoinc_base ((((set)->u.fld[1]).rt_rtx)) != ((this_target_rtl->x_global_rtl)[GR_STACK_POINTER])) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4598, __FUNCTION__), 0 : 0)); | |||
4599 | } | |||
4600 | if (i < 0) | |||
4601 | return 0; | |||
4602 | } | |||
4603 | else | |||
4604 | return 0; | |||
4605 | ||||
4606 | dest = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); | |||
4607 | ||||
4608 | /* Look for direct modifications of the stack pointer. */ | |||
4609 | if (REG_P (dest)(((enum rtx_code) (dest)->code) == REG) && REGNO (dest)(rhs_regno(dest)) == STACK_POINTER_REGNUM7) | |||
4610 | { | |||
4611 | /* Look for a trivial adjustment, otherwise assume nothing. */ | |||
4612 | /* Note that the SPU restore_stack_block pattern refers to | |||
4613 | the stack pointer in V4SImode. Consider that non-trivial. */ | |||
4614 | poly_int64 offset; | |||
4615 | if (SCALAR_INT_MODE_P (GET_MODE (dest))(((enum mode_class) mode_class[((machine_mode) (dest)->mode )]) == MODE_INT || ((enum mode_class) mode_class[((machine_mode ) (dest)->mode)]) == MODE_PARTIAL_INT) | |||
4616 | && strip_offset (SET_SRC (set)(((set)->u.fld[1]).rt_rtx), &offset) == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER])) | |||
4617 | return offset; | |||
4618 | /* ??? Reload can generate no-op moves, which will be cleaned | |||
4619 | up later. Recognize it and continue searching. */ | |||
4620 | else if (rtx_equal_p (dest, SET_SRC (set)(((set)->u.fld[1]).rt_rtx))) | |||
4621 | return 0; | |||
4622 | else | |||
4623 | return HOST_WIDE_INT_MIN(long) (1UL << (64 - 1)); | |||
4624 | } | |||
4625 | else | |||
4626 | { | |||
4627 | rtx mem, addr; | |||
4628 | ||||
4629 | /* Otherwise only think about autoinc patterns. */ | |||
4630 | if (mem_autoinc_base (dest) == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER])) | |||
4631 | { | |||
4632 | mem = dest; | |||
4633 | gcc_checking_assert (mem_autoinc_base (SET_SRC (set))((void)(!(mem_autoinc_base ((((set)->u.fld[1]).rt_rtx)) != ((this_target_rtl->x_global_rtl)[GR_STACK_POINTER])) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4634, __FUNCTION__), 0 : 0)) | |||
4634 | != stack_pointer_rtx)((void)(!(mem_autoinc_base ((((set)->u.fld[1]).rt_rtx)) != ((this_target_rtl->x_global_rtl)[GR_STACK_POINTER])) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4634, __FUNCTION__), 0 : 0)); | |||
4635 | } | |||
4636 | else if (mem_autoinc_base (SET_SRC (set)(((set)->u.fld[1]).rt_rtx)) == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER])) | |||
4637 | mem = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); | |||
4638 | else | |||
4639 | return 0; | |||
4640 | ||||
4641 | addr = XEXP (mem, 0)(((mem)->u.fld[0]).rt_rtx); | |||
4642 | switch (GET_CODE (addr)((enum rtx_code) (addr)->code)) | |||
4643 | { | |||
4644 | case PRE_INC: | |||
4645 | case POST_INC: | |||
4646 | return GET_MODE_SIZE (GET_MODE (mem)((machine_mode) (mem)->mode)); | |||
4647 | case PRE_DEC: | |||
4648 | case POST_DEC: | |||
4649 | return -GET_MODE_SIZE (GET_MODE (mem)((machine_mode) (mem)->mode)); | |||
4650 | case PRE_MODIFY: | |||
4651 | case POST_MODIFY: | |||
4652 | addr = XEXP (addr, 1)(((addr)->u.fld[1]).rt_rtx); | |||
4653 | gcc_assert (GET_CODE (addr) == PLUS)((void)(!(((enum rtx_code) (addr)->code) == PLUS) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4653, __FUNCTION__), 0 : 0)); | |||
4654 | gcc_assert (XEXP (addr, 0) == stack_pointer_rtx)((void)(!((((addr)->u.fld[0]).rt_rtx) == ((this_target_rtl ->x_global_rtl)[GR_STACK_POINTER])) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4654, __FUNCTION__), 0 : 0)); | |||
4655 | return rtx_to_poly_int64 (XEXP (addr, 1)(((addr)->u.fld[1]).rt_rtx)); | |||
4656 | default: | |||
4657 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4657, __FUNCTION__)); | |||
4658 | } | |||
4659 | } | |||
4660 | } | |||
4661 | ||||
4662 | poly_int64 | |||
4663 | fixup_args_size_notes (rtx_insn *prev, rtx_insn *last, | |||
4664 | poly_int64 end_args_size) | |||
4665 | { | |||
4666 | poly_int64 args_size = end_args_size; | |||
4667 | bool saw_unknown = false; | |||
4668 | rtx_insn *insn; | |||
4669 | ||||
4670 | for (insn = last; insn != prev; insn = PREV_INSN (insn)) | |||
4671 | { | |||
4672 | if (!NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN))) | |||
4673 | continue; | |||
4674 | ||||
4675 | /* We might have existing REG_ARGS_SIZE notes, e.g. when pushing | |||
4676 | a call argument containing a TLS address that itself requires | |||
4677 | a call to __tls_get_addr. The handling of stack_pointer_delta | |||
4678 | in emit_single_push_insn is supposed to ensure that any such | |||
4679 | notes are already correct. */ | |||
4680 | rtx note = find_reg_note (insn, REG_ARGS_SIZE, NULL_RTX(rtx) 0); | |||
4681 | gcc_assert (!note || known_eq (args_size, get_args_size (note)))((void)(!(!note || (!maybe_ne (args_size, get_args_size (note )))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4681, __FUNCTION__), 0 : 0)); | |||
4682 | ||||
4683 | poly_int64 this_delta = find_args_size_adjust (insn); | |||
4684 | if (known_eq (this_delta, 0)(!maybe_ne (this_delta, 0))) | |||
4685 | { | |||
4686 | if (!CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN) | |||
4687 | || ACCUMULATE_OUTGOING_ARGS((((global_options.x_target_flags & (1U << 3)) != 0 ) && optimize_function_for_speed_p ((cfun + 0))) || ( (cfun + 0)->machine->func_type != TYPE_NORMAL && (&x_rtl)->stack_realign_needed) || ((global_options.x_target_flags & (1U << 26)) != 0) || (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) && ix86_cfun_abi () == MS_ABI) || (0 && (&x_rtl)->profile)) | |||
4688 | || find_reg_note (insn, REG_NORETURN, NULL_RTX(rtx) 0) == NULL_RTX(rtx) 0) | |||
4689 | continue; | |||
4690 | } | |||
4691 | ||||
4692 | gcc_assert (!saw_unknown)((void)(!(!saw_unknown) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4692, __FUNCTION__), 0 : 0)); | |||
4693 | if (known_eq (this_delta, HOST_WIDE_INT_MIN)(!maybe_ne (this_delta, (long) (1UL << (64 - 1))))) | |||
4694 | saw_unknown = true; | |||
4695 | ||||
4696 | if (!note) | |||
4697 | add_args_size_note (insn, args_size); | |||
4698 | if (STACK_GROWS_DOWNWARD1) | |||
4699 | this_delta = -poly_uint64 (this_delta); | |||
4700 | ||||
4701 | if (saw_unknown) | |||
4702 | args_size = HOST_WIDE_INT_MIN(long) (1UL << (64 - 1)); | |||
4703 | else | |||
4704 | args_size -= this_delta; | |||
4705 | } | |||
4706 | ||||
4707 | return args_size; | |||
4708 | } | |||
4709 | ||||
4710 | #ifdef PUSH_ROUNDING | |||
4711 | /* Emit single push insn. */ | |||
4712 | ||||
4713 | static void | |||
4714 | emit_single_push_insn_1 (machine_mode mode, rtx x, tree type) | |||
4715 | { | |||
4716 | rtx dest_addr; | |||
4717 | poly_int64 rounded_size = PUSH_ROUNDING (GET_MODE_SIZE (mode))ix86_push_rounding (GET_MODE_SIZE (mode)); | |||
4718 | rtx dest; | |||
4719 | enum insn_code icode; | |||
4720 | ||||
4721 | /* If there is push pattern, use it. Otherwise try old way of throwing | |||
4722 | MEM representing push operation to move expander. */ | |||
4723 | icode = optab_handler (push_optab, mode); | |||
4724 | if (icode != CODE_FOR_nothing) | |||
4725 | { | |||
4726 | class expand_operand ops[1]; | |||
4727 | ||||
4728 | create_input_operand (&ops[0], x, mode); | |||
4729 | if (maybe_expand_insn (icode, 1, ops)) | |||
4730 | return; | |||
4731 | } | |||
4732 | if (known_eq (GET_MODE_SIZE (mode), rounded_size)(!maybe_ne (GET_MODE_SIZE (mode), rounded_size))) | |||
4733 | dest_addr = gen_rtx_fmt_e (STACK_PUSH_CODE, Pmode, stack_pointer_rtx)gen_rtx_fmt_e_stat ((PRE_DEC), ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) )), (((this_target_rtl->x_global_rtl)[GR_STACK_POINTER])) ); | |||
4734 | /* If we are to pad downward, adjust the stack pointer first and | |||
4735 | then store X into the stack location using an offset. This is | |||
4736 | because emit_move_insn does not know how to pad; it does not have | |||
4737 | access to type. */ | |||
4738 | else if (targetm.calls.function_arg_padding (mode, type) == PAD_DOWNWARD) | |||
4739 | { | |||
4740 | emit_move_insn (stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]), | |||
4741 | expand_binop (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), | |||
4742 | STACK_GROWS_DOWNWARD1 ? sub_optab | |||
4743 | : add_optab, | |||
4744 | stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]), | |||
4745 | gen_int_mode (rounded_size, Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode)))), | |||
4746 | NULL_RTX(rtx) 0, 0, OPTAB_LIB_WIDEN)); | |||
4747 | ||||
4748 | poly_int64 offset = rounded_size - GET_MODE_SIZE (mode); | |||
4749 | if (STACK_GROWS_DOWNWARD1 && STACK_PUSH_CODEPRE_DEC == POST_DEC) | |||
4750 | /* We have already decremented the stack pointer, so get the | |||
4751 | previous value. */ | |||
4752 | offset += rounded_size; | |||
4753 | ||||
4754 | if (!STACK_GROWS_DOWNWARD1 && STACK_PUSH_CODEPRE_DEC == POST_INC) | |||
4755 | /* We have already incremented the stack pointer, so get the | |||
4756 | previous value. */ | |||
4757 | offset -= rounded_size; | |||
4758 | ||||
4759 | dest_addr = plus_constant (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]), offset); | |||
4760 | } | |||
4761 | else | |||
4762 | { | |||
4763 | if (STACK_GROWS_DOWNWARD1) | |||
4764 | /* ??? This seems wrong if STACK_PUSH_CODE == POST_DEC. */ | |||
4765 | dest_addr = plus_constant (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]), -rounded_size); | |||
4766 | else | |||
4767 | /* ??? This seems wrong if STACK_PUSH_CODE == POST_INC. */ | |||
4768 | dest_addr = plus_constant (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]), rounded_size); | |||
4769 | ||||
4770 | dest_addr = gen_rtx_PRE_MODIFY (Pmode, stack_pointer_rtx, dest_addr)gen_rtx_fmt_ee_stat ((PRE_MODIFY), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]) )), ((dest_addr)) ); | |||
4771 | } | |||
4772 | ||||
4773 | dest = gen_rtx_MEM (mode, dest_addr); | |||
4774 | ||||
4775 | if (type != 0) | |||
4776 | { | |||
4777 | set_mem_attributes (dest, type, 1); | |||
4778 | ||||
4779 | if (cfun(cfun + 0)->tail_call_marked) | |||
4780 | /* Function incoming arguments may overlap with sibling call | |||
4781 | outgoing arguments and we cannot allow reordering of reads | |||
4782 | from function arguments with stores to outgoing arguments | |||
4783 | of sibling calls. */ | |||
4784 | set_mem_alias_set (dest, 0); | |||
4785 | } | |||
4786 | emit_move_insn (dest, x); | |||
4787 | } | |||
4788 | ||||
4789 | /* Emit and annotate a single push insn. */ | |||
4790 | ||||
4791 | static void | |||
4792 | emit_single_push_insn (machine_mode mode, rtx x, tree type) | |||
4793 | { | |||
4794 | poly_int64 delta, old_delta = stack_pointer_delta((&x_rtl)->expr.x_stack_pointer_delta); | |||
4795 | rtx_insn *prev = get_last_insn (); | |||
4796 | rtx_insn *last; | |||
4797 | ||||
4798 | emit_single_push_insn_1 (mode, x, type); | |||
4799 | ||||
4800 | /* Adjust stack_pointer_delta to describe the situation after the push | |||
4801 | we just performed. Note that we must do this after the push rather | |||
4802 | than before the push in case calculating X needs pushes and pops of | |||
4803 | its own (e.g. if calling __tls_get_addr). The REG_ARGS_SIZE notes | |||
4804 | for such pushes and pops must not include the effect of the future | |||
4805 | push of X. */ | |||
4806 | stack_pointer_delta((&x_rtl)->expr.x_stack_pointer_delta) += PUSH_ROUNDING (GET_MODE_SIZE (mode))ix86_push_rounding (GET_MODE_SIZE (mode)); | |||
4807 | ||||
4808 | last = get_last_insn (); | |||
4809 | ||||
4810 | /* Notice the common case where we emitted exactly one insn. */ | |||
4811 | if (PREV_INSN (last) == prev) | |||
4812 | { | |||
4813 | add_args_size_note (last, stack_pointer_delta((&x_rtl)->expr.x_stack_pointer_delta)); | |||
4814 | return; | |||
4815 | } | |||
4816 | ||||
4817 | delta = fixup_args_size_notes (prev, last, stack_pointer_delta((&x_rtl)->expr.x_stack_pointer_delta)); | |||
4818 | gcc_assert (known_eq (delta, HOST_WIDE_INT_MIN)((void)(!((!maybe_ne (delta, (long) (1UL << (64 - 1)))) || (!maybe_ne (delta, old_delta))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4819, __FUNCTION__), 0 : 0)) | |||
4819 | || known_eq (delta, old_delta))((void)(!((!maybe_ne (delta, (long) (1UL << (64 - 1)))) || (!maybe_ne (delta, old_delta))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4819, __FUNCTION__), 0 : 0)); | |||
4820 | } | |||
4821 | #endif | |||
4822 | ||||
4823 | /* If reading SIZE bytes from X will end up reading from | |||
4824 | Y return the number of bytes that overlap. Return -1 | |||
4825 | if there is no overlap or -2 if we can't determine | |||
4826 | (for example when X and Y have different base registers). */ | |||
4827 | ||||
4828 | static int | |||
4829 | memory_load_overlap (rtx x, rtx y, HOST_WIDE_INTlong size) | |||
4830 | { | |||
4831 | rtx tmp = plus_constant (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), x, size); | |||
4832 | rtx sub = simplify_gen_binary (MINUS, Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), tmp, y); | |||
4833 | ||||
4834 | if (!CONST_INT_P (sub)(((enum rtx_code) (sub)->code) == CONST_INT)) | |||
4835 | return -2; | |||
4836 | ||||
4837 | HOST_WIDE_INTlong val = INTVAL (sub)((sub)->u.hwint[0]); | |||
4838 | ||||
4839 | return IN_RANGE (val, 1, size)((unsigned long) (val) - (unsigned long) (1) <= (unsigned long ) (size) - (unsigned long) (1)) ? val : -1; | |||
4840 | } | |||
4841 | ||||
4842 | /* Generate code to push X onto the stack, assuming it has mode MODE and | |||
4843 | type TYPE. | |||
4844 | MODE is redundant except when X is a CONST_INT (since they don't | |||
4845 | carry mode info). | |||
4846 | SIZE is an rtx for the size of data to be copied (in bytes), | |||
4847 | needed only if X is BLKmode. | |||
4848 | Return true if successful. May return false if asked to push a | |||
4849 | partial argument during a sibcall optimization (as specified by | |||
4850 | SIBCALL_P) and the incoming and outgoing pointers cannot be shown | |||
4851 | to not overlap. | |||
4852 | ||||
4853 | ALIGN (in bits) is maximum alignment we can assume. | |||
4854 | ||||
4855 | If PARTIAL and REG are both nonzero, then copy that many of the first | |||
4856 | bytes of X into registers starting with REG, and push the rest of X. | |||
4857 | The amount of space pushed is decreased by PARTIAL bytes. | |||
4858 | REG must be a hard register in this case. | |||
4859 | If REG is zero but PARTIAL is not, take any all others actions for an | |||
4860 | argument partially in registers, but do not actually load any | |||
4861 | registers. | |||
4862 | ||||
4863 | EXTRA is the amount in bytes of extra space to leave next to this arg. | |||
4864 | This is ignored if an argument block has already been allocated. | |||
4865 | ||||
4866 | On a machine that lacks real push insns, ARGS_ADDR is the address of | |||
4867 | the bottom of the argument block for this call. We use indexing off there | |||
4868 | to store the arg. On machines with push insns, ARGS_ADDR is 0 when a | |||
4869 | argument block has not been preallocated. | |||
4870 | ||||
4871 | ARGS_SO_FAR is the size of args previously pushed for this call. | |||
4872 | ||||
4873 | REG_PARM_STACK_SPACE is nonzero if functions require stack space | |||
4874 | for arguments passed in registers. If nonzero, it will be the number | |||
4875 | of bytes required. */ | |||
4876 | ||||
4877 | bool | |||
4878 | emit_push_insn (rtx x, machine_mode mode, tree type, rtx size, | |||
4879 | unsigned int align, int partial, rtx reg, poly_int64 extra, | |||
4880 | rtx args_addr, rtx args_so_far, int reg_parm_stack_space, | |||
4881 | rtx alignment_pad, bool sibcall_p) | |||
4882 | { | |||
4883 | rtx xinner; | |||
4884 | pad_direction stack_direction | |||
4885 | = STACK_GROWS_DOWNWARD1 ? PAD_DOWNWARD : PAD_UPWARD; | |||
4886 | ||||
4887 | /* Decide where to pad the argument: PAD_DOWNWARD for below, | |||
4888 | PAD_UPWARD for above, or PAD_NONE for don't pad it. | |||
4889 | Default is below for small data on big-endian machines; else above. */ | |||
4890 | pad_direction where_pad = targetm.calls.function_arg_padding (mode, type); | |||
4891 | ||||
4892 | /* Invert direction if stack is post-decrement. | |||
4893 | FIXME: why? */ | |||
4894 | if (STACK_PUSH_CODEPRE_DEC == POST_DEC) | |||
4895 | if (where_pad != PAD_NONE) | |||
4896 | where_pad = (where_pad == PAD_DOWNWARD ? PAD_UPWARD : PAD_DOWNWARD); | |||
4897 | ||||
4898 | xinner = x; | |||
4899 | ||||
4900 | int nregs = partial / UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4); | |||
4901 | rtx *tmp_regs = NULLnullptr; | |||
4902 | int overlapping = 0; | |||
4903 | ||||
4904 | if (mode == BLKmode((void) 0, E_BLKmode) | |||
4905 | || (STRICT_ALIGNMENT0 && align < GET_MODE_ALIGNMENT (mode)get_mode_alignment (mode))) | |||
4906 | { | |||
4907 | /* Copy a block into the stack, entirely or partially. */ | |||
4908 | ||||
4909 | rtx temp; | |||
4910 | int used; | |||
4911 | int offset; | |||
4912 | int skip; | |||
4913 | ||||
4914 | offset = partial % (PARM_BOUNDARY((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) / BITS_PER_UNIT(8)); | |||
4915 | used = partial - offset; | |||
4916 | ||||
4917 | if (mode != BLKmode((void) 0, E_BLKmode)) | |||
4918 | { | |||
4919 | /* A value is to be stored in an insufficiently aligned | |||
4920 | stack slot; copy via a suitably aligned slot if | |||
4921 | necessary. */ | |||
4922 | size = gen_int_mode (GET_MODE_SIZE (mode), Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode)))); | |||
4923 | if (!MEM_P (xinner)(((enum rtx_code) (xinner)->code) == MEM)) | |||
4924 | { | |||
4925 | temp = assign_temp (type, 1, 1); | |||
4926 | emit_move_insn (temp, xinner); | |||
4927 | xinner = temp; | |||
4928 | } | |||
4929 | } | |||
4930 | ||||
4931 | gcc_assert (size)((void)(!(size) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 4931, __FUNCTION__), 0 : 0)); | |||
4932 | ||||
4933 | /* USED is now the # of bytes we need not copy to the stack | |||
4934 | because registers will take care of them. */ | |||
4935 | ||||
4936 | if (partial != 0) | |||
4937 | xinner = adjust_address (xinner, BLKmode, used)adjust_address_1 (xinner, ((void) 0, E_BLKmode), used, 1, 1, 0 , 0); | |||
4938 | ||||
4939 | /* If the partial register-part of the arg counts in its stack size, | |||
4940 | skip the part of stack space corresponding to the registers. | |||
4941 | Otherwise, start copying to the beginning of the stack space, | |||
4942 | by setting SKIP to 0. */ | |||
4943 | skip = (reg_parm_stack_space == 0) ? 0 : used; | |||
4944 | ||||
4945 | #ifdef PUSH_ROUNDING | |||
4946 | /* NB: Let the backend known the number of bytes to push and | |||
4947 | decide if push insns should be generated. */ | |||
4948 | unsigned int push_size; | |||
4949 | if (CONST_INT_P (size)(((enum rtx_code) (size)->code) == CONST_INT)) | |||
4950 | push_size = INTVAL (size)((size)->u.hwint[0]); | |||
4951 | else | |||
4952 | push_size = 0; | |||
4953 | ||||
4954 | /* Do it with several push insns if that doesn't take lots of insns | |||
4955 | and if there is no difficulty with push insns that skip bytes | |||
4956 | on the stack for alignment purposes. */ | |||
4957 | if (args_addr == 0 | |||
4958 | && targetm.calls.push_argument (push_size) | |||
4959 | && CONST_INT_P (size)(((enum rtx_code) (size)->code) == CONST_INT) | |||
4960 | && skip == 0 | |||
4961 | && MEM_ALIGN (xinner)(get_mem_attrs (xinner)->align) >= align | |||
4962 | && can_move_by_pieces ((unsigned) INTVAL (size)((size)->u.hwint[0]) - used, align) | |||
4963 | /* Here we avoid the case of a structure whose weak alignment | |||
4964 | forces many pushes of a small amount of data, | |||
4965 | and such small pushes do rounding that causes trouble. */ | |||
4966 | && ((!targetm.slow_unaligned_access (word_mode, align)) | |||
4967 | || align >= BIGGEST_ALIGNMENT(((global_options.x_target_flags & (1U << 12)) != 0 ) ? 32 : (((global_options.x_ix86_isa_flags & (1UL << 15)) != 0) ? 512 : (((global_options.x_ix86_isa_flags & ( 1UL << 8)) != 0) ? 256 : 128))) | |||
4968 | || known_eq (PUSH_ROUNDING (align / BITS_PER_UNIT),(!maybe_ne (ix86_push_rounding (align / (8)), align / (8))) | |||
4969 | align / BITS_PER_UNIT)(!maybe_ne (ix86_push_rounding (align / (8)), align / (8)))) | |||
4970 | && known_eq (PUSH_ROUNDING (INTVAL (size)), INTVAL (size))(!maybe_ne (ix86_push_rounding (((size)->u.hwint[0])), ((size )->u.hwint[0])))) | |||
4971 | { | |||
4972 | /* Push padding now if padding above and stack grows down, | |||
4973 | or if padding below and stack grows up. | |||
4974 | But if space already allocated, this has already been done. */ | |||
4975 | if (maybe_ne (extra, 0) | |||
4976 | && args_addr == 0 | |||
4977 | && where_pad != PAD_NONE | |||
4978 | && where_pad != stack_direction) | |||
4979 | anti_adjust_stack (gen_int_mode (extra, Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))))); | |||
4980 | ||||
4981 | move_by_pieces (NULLnullptr, xinner, INTVAL (size)((size)->u.hwint[0]) - used, align, | |||
4982 | RETURN_BEGIN); | |||
4983 | } | |||
4984 | else | |||
4985 | #endif /* PUSH_ROUNDING */ | |||
4986 | { | |||
4987 | rtx target; | |||
4988 | ||||
4989 | /* Otherwise make space on the stack and copy the data | |||
4990 | to the address of that space. */ | |||
4991 | ||||
4992 | /* Deduct words put into registers from the size we must copy. */ | |||
4993 | if (partial != 0) | |||
4994 | { | |||
4995 | if (CONST_INT_P (size)(((enum rtx_code) (size)->code) == CONST_INT)) | |||
4996 | size = GEN_INT (INTVAL (size) - used)gen_rtx_CONST_INT (((void) 0, E_VOIDmode), (((size)->u.hwint [0]) - used)); | |||
4997 | else | |||
4998 | size = expand_binop (GET_MODE (size)((machine_mode) (size)->mode), sub_optab, size, | |||
4999 | gen_int_mode (used, GET_MODE (size)((machine_mode) (size)->mode)), | |||
5000 | NULL_RTX(rtx) 0, 0, OPTAB_LIB_WIDEN); | |||
5001 | } | |||
5002 | ||||
5003 | /* Get the address of the stack space. | |||
5004 | In this case, we do not deal with EXTRA separately. | |||
5005 | A single stack adjust will do. */ | |||
5006 | poly_int64 const_args_so_far; | |||
5007 | if (! args_addr) | |||
5008 | { | |||
5009 | temp = push_block (size, extra, where_pad == PAD_DOWNWARD); | |||
5010 | extra = 0; | |||
5011 | } | |||
5012 | else if (poly_int_rtx_p (args_so_far, &const_args_so_far)) | |||
5013 | temp = memory_address (BLKmode,memory_address_addr_space ((((void) 0, E_BLKmode)), (plus_constant ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))), args_addr, skip + const_args_so_far )), 0) | |||
5014 | plus_constant (Pmode, args_addr,memory_address_addr_space ((((void) 0, E_BLKmode)), (plus_constant ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))), args_addr, skip + const_args_so_far )), 0) | |||
5015 | skip + const_args_so_far))memory_address_addr_space ((((void) 0, E_BLKmode)), (plus_constant ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))), args_addr, skip + const_args_so_far )), 0); | |||
5016 | else | |||
5017 | temp = memory_address (BLKmode,memory_address_addr_space ((((void) 0, E_BLKmode)), (plus_constant ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))), gen_rtx_fmt_ee_stat ( (PLUS), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))))), ((args_addr)), ((args_so_far )) ), skip)), 0) | |||
5018 | plus_constant (Pmode,memory_address_addr_space ((((void) 0, E_BLKmode)), (plus_constant ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))), gen_rtx_fmt_ee_stat ( (PLUS), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))))), ((args_addr)), ((args_so_far )) ), skip)), 0) | |||
5019 | gen_rtx_PLUS (Pmode,memory_address_addr_space ((((void) 0, E_BLKmode)), (plus_constant ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))), gen_rtx_fmt_ee_stat ( (PLUS), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))))), ((args_addr)), ((args_so_far )) ), skip)), 0) | |||
5020 | args_addr,memory_address_addr_space ((((void) 0, E_BLKmode)), (plus_constant ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))), gen_rtx_fmt_ee_stat ( (PLUS), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))))), ((args_addr)), ((args_so_far )) ), skip)), 0) | |||
5021 | args_so_far),memory_address_addr_space ((((void) 0, E_BLKmode)), (plus_constant ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))), gen_rtx_fmt_ee_stat ( (PLUS), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))))), ((args_addr)), ((args_so_far )) ), skip)), 0) | |||
5022 | skip))memory_address_addr_space ((((void) 0, E_BLKmode)), (plus_constant ((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))), gen_rtx_fmt_ee_stat ( (PLUS), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ( (scalar_int_mode::from_int) E_SImode))))), ((args_addr)), ((args_so_far )) ), skip)), 0); | |||
5023 | ||||
5024 | if (!ACCUMULATE_OUTGOING_ARGS((((global_options.x_target_flags & (1U << 3)) != 0 ) && optimize_function_for_speed_p ((cfun + 0))) || ( (cfun + 0)->machine->func_type != TYPE_NORMAL && (&x_rtl)->stack_realign_needed) || ((global_options.x_target_flags & (1U << 26)) != 0) || (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) && ix86_cfun_abi () == MS_ABI) || (0 && (&x_rtl)->profile))) | |||
5025 | { | |||
5026 | /* If the source is referenced relative to the stack pointer, | |||
5027 | copy it to another register to stabilize it. We do not need | |||
5028 | to do this if we know that we won't be changing sp. */ | |||
5029 | ||||
5030 | if (reg_mentioned_p (virtual_stack_dynamic_rtx((this_target_rtl->x_global_rtl)[GR_VIRTUAL_STACK_DYNAMIC] ), temp) | |||
5031 | || reg_mentioned_p (virtual_outgoing_args_rtx((this_target_rtl->x_global_rtl)[GR_VIRTUAL_OUTGOING_ARGS] ), temp)) | |||
5032 | temp = copy_to_reg (temp); | |||
5033 | } | |||
5034 | ||||
5035 | target = gen_rtx_MEM (BLKmode((void) 0, E_BLKmode), temp); | |||
5036 | ||||
5037 | /* We do *not* set_mem_attributes here, because incoming arguments | |||
5038 | may overlap with sibling call outgoing arguments and we cannot | |||
5039 | allow reordering of reads from function arguments with stores | |||
5040 | to outgoing arguments of sibling calls. We do, however, want | |||
5041 | to record the alignment of the stack slot. */ | |||
5042 | /* ALIGN may well be better aligned than TYPE, e.g. due to | |||
5043 | PARM_BOUNDARY. Assume the caller isn't lying. */ | |||
5044 | set_mem_align (target, align); | |||
5045 | ||||
5046 | /* If part should go in registers and pushing to that part would | |||
5047 | overwrite some of the values that need to go into regs, load the | |||
5048 | overlapping values into temporary pseudos to be moved into the hard | |||
5049 | regs at the end after the stack pushing has completed. | |||
5050 | We cannot load them directly into the hard regs here because | |||
5051 | they can be clobbered by the block move expansions. | |||
5052 | See PR 65358. */ | |||
5053 | ||||
5054 | if (partial > 0 && reg != 0 && mode == BLKmode((void) 0, E_BLKmode) | |||
5055 | && GET_CODE (reg)((enum rtx_code) (reg)->code) != PARALLEL) | |||
5056 | { | |||
5057 | overlapping = memory_load_overlap (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), temp, partial); | |||
5058 | if (overlapping > 0) | |||
5059 | { | |||
5060 | gcc_assert (overlapping % UNITS_PER_WORD == 0)((void)(!(overlapping % (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4) == 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 5060, __FUNCTION__), 0 : 0)); | |||
5061 | overlapping /= UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4); | |||
5062 | ||||
5063 | tmp_regs = XALLOCAVEC (rtx, overlapping)((rtx *) __builtin_alloca(sizeof (rtx) * (overlapping))); | |||
5064 | ||||
5065 | for (int i = 0; i < overlapping; i++) | |||
5066 | tmp_regs[i] = gen_reg_rtx (word_mode); | |||
5067 | ||||
5068 | for (int i = 0; i < overlapping; i++) | |||
5069 | emit_move_insn (tmp_regs[i], | |||
5070 | operand_subword_force (target, i, mode)); | |||
5071 | } | |||
5072 | else if (overlapping == -1) | |||
5073 | overlapping = 0; | |||
5074 | /* Could not determine whether there is overlap. | |||
5075 | Fail the sibcall. */ | |||
5076 | else | |||
5077 | { | |||
5078 | overlapping = 0; | |||
5079 | if (sibcall_p) | |||
5080 | return false; | |||
5081 | } | |||
5082 | } | |||
5083 | ||||
5084 | /* If source is a constant VAR_DECL with a simple constructor, | |||
5085 | store the constructor to the stack instead of moving it. */ | |||
5086 | const_tree decl; | |||
5087 | if (partial == 0 | |||
5088 | && MEM_P (xinner)(((enum rtx_code) (xinner)->code) == MEM) | |||
5089 | && SYMBOL_REF_P (XEXP (xinner, 0))(((enum rtx_code) ((((xinner)->u.fld[0]).rt_rtx))->code ) == SYMBOL_REF) | |||
5090 | && (decl = SYMBOL_REF_DECL (XEXP (xinner, 0))((__extension__ ({ __typeof (((((xinner)->u.fld[0]).rt_rtx ))) const _rtx = (((((xinner)->u.fld[0]).rt_rtx))); if ((( enum rtx_code) (_rtx)->code) != SYMBOL_REF) rtl_check_failed_flag ("CONSTANT_POOL_ADDRESS_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 5090, __FUNCTION__); _rtx; })->unchanging) ? nullptr : ( ((((((xinner)->u.fld[0]).rt_rtx)))->u.fld[1]).rt_tree))) != NULL_TREE(tree) nullptr | |||
5091 | && VAR_P (decl)(((enum tree_code) (decl)->base.code) == VAR_DECL) | |||
5092 | && TREE_READONLY (decl)((non_type_check ((decl), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 5092, __FUNCTION__))->base.readonly_flag) | |||
5093 | && !TREE_SIDE_EFFECTS (decl)((non_type_check ((decl), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 5093, __FUNCTION__))->base.side_effects_flag) | |||
5094 | && immediate_const_ctor_p (DECL_INITIAL (decl)((contains_struct_check ((decl), (TS_DECL_COMMON), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 5094, __FUNCTION__))->decl_common.initial), 2)) | |||
5095 | store_constructor (DECL_INITIAL (decl)((contains_struct_check ((decl), (TS_DECL_COMMON), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 5095, __FUNCTION__))->decl_common.initial), target, 0, | |||
5096 | int_expr_size (DECL_INITIAL (decl)((contains_struct_check ((decl), (TS_DECL_COMMON), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 5096, __FUNCTION__))->decl_common.initial)), false); | |||
5097 | else | |||
5098 | emit_block_move (target, xinner, size, BLOCK_OP_CALL_PARM); | |||
5099 | } | |||
5100 | } | |||
5101 | else if (partial > 0) | |||
5102 | { | |||
5103 | /* Scalar partly in registers. This case is only supported | |||
5104 | for fixed-wdth modes. */ | |||
5105 | int num_words = GET_MODE_SIZE (mode).to_constant (); | |||
5106 | num_words /= UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4); | |||
5107 | int i; | |||
5108 | int not_stack; | |||
5109 | /* # bytes of start of argument | |||
5110 | that we must make space for but need not store. */ | |||
5111 | int offset = partial % (PARM_BOUNDARY((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) / BITS_PER_UNIT(8)); | |||
5112 | int args_offset = INTVAL (args_so_far)((args_so_far)->u.hwint[0]); | |||
5113 | int skip; | |||
5114 | ||||
5115 | /* Push padding now if padding above and stack grows down, | |||
5116 | or if padding below and stack grows up. | |||
5117 | But if space already allocated, this has already been done. */ | |||
5118 | if (maybe_ne (extra, 0) | |||
5119 | && args_addr == 0 | |||
5120 | && where_pad != PAD_NONE | |||
5121 | && where_pad != stack_direction) | |||
5122 | anti_adjust_stack (gen_int_mode (extra, Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))))); | |||
5123 | ||||
5124 | /* If we make space by pushing it, we might as well push | |||
5125 | the real data. Otherwise, we can leave OFFSET nonzero | |||
5126 | and leave the space uninitialized. */ | |||
5127 | if (args_addr == 0) | |||
5128 | offset = 0; | |||
5129 | ||||
5130 | /* Now NOT_STACK gets the number of words that we don't need to | |||
5131 | allocate on the stack. Convert OFFSET to words too. */ | |||
5132 | not_stack = (partial - offset) / UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4); | |||
5133 | offset /= UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4); | |||
5134 | ||||
5135 | /* If the partial register-part of the arg counts in its stack size, | |||
5136 | skip the part of stack space corresponding to the registers. | |||
5137 | Otherwise, start copying to the beginning of the stack space, | |||
5138 | by setting SKIP to 0. */ | |||
5139 | skip = (reg_parm_stack_space == 0) ? 0 : not_stack; | |||
5140 | ||||
5141 | if (CONSTANT_P (x)((rtx_class[(int) (((enum rtx_code) (x)->code))]) == RTX_CONST_OBJ ) && !targetm.legitimate_constant_p (mode, x)) | |||
5142 | x = validize_mem (force_const_mem (mode, x)); | |||
5143 | ||||
5144 | /* If X is a hard register in a non-integer mode, copy it into a pseudo; | |||
5145 | SUBREGs of such registers are not allowed. */ | |||
5146 | if ((REG_P (x)(((enum rtx_code) (x)->code) == REG) && REGNO (x)(rhs_regno(x)) < FIRST_PSEUDO_REGISTER76 | |||
5147 | && GET_MODE_CLASS (GET_MODE (x))((enum mode_class) mode_class[((machine_mode) (x)->mode)]) != MODE_INT)) | |||
5148 | x = copy_to_reg (x); | |||
5149 | ||||
5150 | /* Loop over all the words allocated on the stack for this arg. */ | |||
5151 | /* We can do it by words, because any scalar bigger than a word | |||
5152 | has a size a multiple of a word. */ | |||
5153 | for (i = num_words - 1; i >= not_stack; i--) | |||
5154 | if (i >= not_stack + offset) | |||
5155 | if (!emit_push_insn (operand_subword_force (x, i, mode), | |||
5156 | word_mode, NULL_TREE(tree) nullptr, NULL_RTX(rtx) 0, align, 0, NULL_RTX(rtx) 0, | |||
5157 | 0, args_addr, | |||
5158 | GEN_INT (args_offset + ((i - not_stack + skip)gen_rtx_CONST_INT (((void) 0, E_VOIDmode), (args_offset + ((i - not_stack + skip) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)))) | |||
5159 | * UNITS_PER_WORD))gen_rtx_CONST_INT (((void) 0, E_VOIDmode), (args_offset + ((i - not_stack + skip) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)))), | |||
5160 | reg_parm_stack_space, alignment_pad, sibcall_p)) | |||
5161 | return false; | |||
5162 | } | |||
5163 | else | |||
5164 | { | |||
5165 | rtx addr; | |||
5166 | rtx dest; | |||
5167 | ||||
5168 | /* Push padding now if padding above and stack grows down, | |||
5169 | or if padding below and stack grows up. | |||
5170 | But if space already allocated, this has already been done. */ | |||
5171 | if (maybe_ne (extra, 0) | |||
5172 | && args_addr == 0 | |||
5173 | && where_pad != PAD_NONE | |||
5174 | && where_pad != stack_direction) | |||
5175 | anti_adjust_stack (gen_int_mode (extra, Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))))); | |||
5176 | ||||
5177 | #ifdef PUSH_ROUNDING | |||
5178 | if (args_addr == 0 && targetm.calls.push_argument (0)) | |||
5179 | emit_single_push_insn (mode, x, type); | |||
5180 | else | |||
5181 | #endif | |||
5182 | { | |||
5183 | addr = simplify_gen_binary (PLUS, Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), args_addr, args_so_far); | |||
5184 | dest = gen_rtx_MEM (mode, memory_address (mode, addr)memory_address_addr_space ((mode), (addr), 0)); | |||
5185 | ||||
5186 | /* We do *not* set_mem_attributes here, because incoming arguments | |||
5187 | may overlap with sibling call outgoing arguments and we cannot | |||
5188 | allow reordering of reads from function arguments with stores | |||
5189 | to outgoing arguments of sibling calls. We do, however, want | |||
5190 | to record the alignment of the stack slot. */ | |||
5191 | /* ALIGN may well be better aligned than TYPE, e.g. due to | |||
5192 | PARM_BOUNDARY. Assume the caller isn't lying. */ | |||
5193 | set_mem_align (dest, align); | |||
5194 | ||||
5195 | emit_move_insn (dest, x); | |||
5196 | } | |||
5197 | } | |||
5198 | ||||
5199 | /* Move the partial arguments into the registers and any overlapping | |||
5200 | values that we moved into the pseudos in tmp_regs. */ | |||
5201 | if (partial > 0 && reg != 0) | |||
5202 | { | |||
5203 | /* Handle calls that pass values in multiple non-contiguous locations. | |||
5204 | The Irix 6 ABI has examples of this. */ | |||
5205 | if (GET_CODE (reg)((enum rtx_code) (reg)->code) == PARALLEL) | |||
5206 | emit_group_load (reg, x, type, -1); | |||
5207 | else | |||
5208 | { | |||
5209 | gcc_assert (partial % UNITS_PER_WORD == 0)((void)(!(partial % (((global_options.x_ix86_isa_flags & ( 1UL << 1)) != 0) ? 8 : 4) == 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 5209, __FUNCTION__), 0 : 0)); | |||
5210 | move_block_to_reg (REGNO (reg)(rhs_regno(reg)), x, nregs - overlapping, mode); | |||
5211 | ||||
5212 | for (int i = 0; i < overlapping; i++) | |||
5213 | emit_move_insn (gen_rtx_REG (word_mode, REGNO (reg)(rhs_regno(reg)) | |||
5214 | + nregs - overlapping + i), | |||
5215 | tmp_regs[i]); | |||
5216 | ||||
5217 | } | |||
5218 | } | |||
5219 | ||||
5220 | if (maybe_ne (extra, 0) && args_addr == 0 && where_pad == stack_direction) | |||
5221 | anti_adjust_stack (gen_int_mode (extra, Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))))); | |||
5222 | ||||
5223 | if (alignment_pad && args_addr == 0) | |||
5224 | anti_adjust_stack (alignment_pad); | |||
5225 | ||||
5226 | return true; | |||
5227 | } | |||
5228 | ||||
5229 | /* Return X if X can be used as a subtarget in a sequence of arithmetic | |||
5230 | operations. */ | |||
5231 | ||||
5232 | static rtx | |||
5233 | get_subtarget (rtx x) | |||
5234 | { | |||
5235 | return (optimizeglobal_options.x_optimize | |||
5236 | || x == 0 | |||
5237 | /* Only registers can be subtargets. */ | |||
5238 | || !REG_P (x)(((enum rtx_code) (x)->code) == REG) | |||
5239 | /* Don't use hard regs to avoid extending their life. */ | |||
5240 | || REGNO (x)(rhs_regno(x)) < FIRST_PSEUDO_REGISTER76 | |||
5241 | ? 0 : x); | |||
5242 | } | |||
5243 | ||||
5244 | /* A subroutine of expand_assignment. Optimize FIELD op= VAL, where | |||
5245 | FIELD is a bitfield. Returns true if the optimization was successful, | |||
5246 | and there's nothing else to do. */ | |||
5247 | ||||
5248 | static bool | |||
5249 | optimize_bitfield_assignment_op (poly_uint64 pbitsize, | |||
5250 | poly_uint64 pbitpos, | |||
5251 | poly_uint64 pbitregion_start, | |||
5252 | poly_uint64 pbitregion_end, | |||
5253 | machine_mode mode1, rtx str_rtx, | |||
5254 | tree to, tree src, bool reverse) | |||
5255 | { | |||
5256 | /* str_mode is not guaranteed to be a scalar type. */ | |||
5257 | machine_mode str_mode = GET_MODE (str_rtx)((machine_mode) (str_rtx)->mode); | |||
5258 | unsigned int str_bitsize; | |||
5259 | tree op0, op1; | |||
5260 | rtx value, result; | |||
5261 | optab binop; | |||
5262 | gimple *srcstmt; | |||
5263 | enum tree_code code; | |||
5264 | ||||
5265 | unsigned HOST_WIDE_INTlong bitsize, bitpos, bitregion_start, bitregion_end; | |||
5266 | if (mode1 != VOIDmode((void) 0, E_VOIDmode) | |||
5267 | || !pbitsize.is_constant (&bitsize) | |||
5268 | || !pbitpos.is_constant (&bitpos) | |||
5269 | || !pbitregion_start.is_constant (&bitregion_start) | |||
5270 | || !pbitregion_end.is_constant (&bitregion_end) | |||
5271 | || bitsize >= BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) | |||
5272 | || !GET_MODE_BITSIZE (str_mode).is_constant (&str_bitsize) | |||
5273 | || str_bitsize > BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) | |||
5274 | || TREE_SIDE_EFFECTS (to)((non_type_check ((to), "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 5274, __FUNCTION__))->base.side_effects_flag) | |||
5275 | || TREE_THIS_VOLATILE (to)((to)->base.volatile_flag)) | |||
5276 | return false; | |||
5277 | ||||
5278 | STRIP_NOPS (src)(src) = tree_strip_nop_conversions ((const_cast<union tree_node *> (((src))))); | |||
5279 | if (TREE_CODE (src)((enum tree_code) (src)->base.code) != SSA_NAME) | |||
5280 | return false; | |||
5281 | if (TREE_CODE (TREE_TYPE (src))((enum tree_code) (((contains_struct_check ((src), (TS_TYPED) , "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/expr.cc" , 5281, __FUNCTION__))->typed.type))->base.code) != INTEGER_TYPE) | |||
5282 | return false; | |||
5283 | ||||
5284 | srcstmt = get_gimple_for_ssa_name (src); | |||
5285 | if (!srcstmt | |||
5286 | || TREE_CODE_CLASS (gimple_assign_rhs_code (srcstmt))tree_code_type_tmpl <0>::tree_code_type[(int) (gimple_assign_rhs_code (srcstmt))] != tcc_binary) | |||
5287 | return false; | |||
5288 | ||||
5289 | code = gimple_assign_rhs_code (srcstmt); | |||
5290 | ||||
5291 | op0 = gimple_assign_rhs1 (srcstmt); | |||
5292 | ||||
5293 | /* If OP0 is an SSA_NAME, then we want to walk the use-def chain | |||
5294 | to find its initialization. Hopefully the initialization will | |||
5295 | be from a bitfield load. */ | |||
5296 | if (TREE_CODE (op0)((enum tree_code) (op0)->base.code) == SSA_NAME) | |||
5297 | { | |||
5298 | gimple *op0stmt = get_gimple_for_ssa_name (op0); | |||
5299 | ||||
5300 | /* We want to eventually have OP0 be the same as TO, which | |||
5301 | should be a bitfield. */ | |||
5302 | if (!op0stmt | |||
5303 | || !is_gimple_assign (op0stmt) | |||
5304 | || gimple_assign_rhs_code (op0stmt) != TREE_CODE (to)((enum tree_code) (to)->base.code)) | |||
5305 | return false; | |||
5306 | op0 = gimple_assign_rhs1 (op0stmt); | |||
5307 | } | |||
5308 | ||||
5309 | op1 = gimple_assign_rhs2 (srcstmt); | |||
5310 | ||||
5311 | if (!operand_equal_p (to, op0, 0)) | |||
5312 | return false; | |||
5313 | ||||
5314 | if (MEM_P (str_rtx)(((enum rtx_code) (str_rtx)->code) == MEM)) | |||
5315 | { | |||
5316 | unsigned HOST_WIDE_INTlong offset1; | |||
5317 | ||||
5318 | if (str_bitsize == 0 || str_bitsize > BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4))) | |||
5319 | str_bitsize = BITS_PER_WORD((8) * (((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)); | |||
5320 | ||||
5321 | scalar_int_mode best_mode; | |||
5322 | if (!get_best_mode (bitsize, bitpos, bitregion_start, bitregion_end, | |||
5323 | MEM_ALIGN (str_rtx)(get_mem_attrs (str_rtx)->align), str_bitsize, false, &best_mode)) | |||
5324 | return false; | |||
5325 | str_mode = best_mode; | |||
5326 | str_bitsize = GET_MODE_BITSIZE (best_mode); | |||
5327 | ||||
5328 | offset1 = bitpos; | |||
5329 | bitpos %= str_bitsize; | |||
5330 | offset1 = (offset1 - bitpos) / BITS_PER_UNIT(8); | |||
5331 | str_rtx = adjust_address (str_rtx, str_mode, offset1)adjust_address_1 (str_rtx, str_mode, offset1, 1, 1, 0, 0); | |||
5332 | } | |||
5333 | else if (!REG_P (str_rtx)(((enum rtx_code) (str_rtx)->code) == REG) && GET_CODE (str_rtx)((enum rtx_code) (str_rtx)->code) != SUBREG) | |||
5334 | return false; | |||
5335 | ||||
5336 | /* If the bit field covers the whole REG/MEM, store_field | |||
5337 | will likely generate better code. */ | |||
5338 | if (bitsize >= str_bitsize) | |||
5339 | return false; | |||
5340 | ||||
5341 | /* We can't handle fields split across multiple entities. */ | |||
5342 | if (bitpos + bitsize > str_bitsize) | |||
5343 | return false; | |||
5344 | ||||
5345 | if (reverse ? !BYTES_BIG_ENDIAN0 : BYTES_BIG_ENDIAN0) | |||
5346 | bitpos = str_bitsize - bitpos - bitsize; | |||
5347 | ||||
5348 | switch (code) | |||
5349 | { | |||
5350 | case PLUS_EXPR: | |||
5351 | case MINUS_EXPR: | |||
5352 | /* For now, just optimize the case of the topmost bitfield | |||
5353 | where we don't need to do any masking and also | |||
5354 | 1 bit bitfields where xor can be used. | |||
5355 | We might win by one instruction for the other bitfields | |||
5356 | too if insv/extv instructions aren't used, so that | |||
5357 | can be added later. */ | |||
5358 | if ((reverse || bitpos + bitsize != str_bitsize) | |||
5359 | && (bitsize != 1 || TREE_CODE (op1)((enum tree_code) (op1)->base.code) != INTEGER_CST)) | |||
5360 | break; | |||
5361 | ||||
5362 | value = expand_expr (op1, NULL_RTX(rtx) 0, str_mode, EXPAND_NORMAL); | |||