File: | build/gcc/lra-constraints.cc |
Warning: | line 5117, column 3 Value stored to 'changed_p' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* Code for RTL transformations to satisfy insn constraints. |
2 | Copyright (C) 2010-2023 Free Software Foundation, Inc. |
3 | Contributed by Vladimir Makarov <vmakarov@redhat.com>. |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free |
9 | Software Foundation; either version 3, or (at your option) any later |
10 | version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
15 | for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | |
22 | /* This file contains code for 3 passes: constraint pass, |
23 | inheritance/split pass, and pass for undoing failed inheritance and |
24 | split. |
25 | |
26 | The major goal of constraint pass is to transform RTL to satisfy |
27 | insn and address constraints by: |
28 | o choosing insn alternatives; |
29 | o generating *reload insns* (or reloads in brief) and *reload |
30 | pseudos* which will get necessary hard registers later; |
31 | o substituting pseudos with equivalent values and removing the |
32 | instructions that initialized those pseudos. |
33 | |
34 | The constraint pass has biggest and most complicated code in LRA. |
35 | There are a lot of important details like: |
36 | o reuse of input reload pseudos to simplify reload pseudo |
37 | allocations; |
38 | o some heuristics to choose insn alternative to improve the |
39 | inheritance; |
40 | o early clobbers etc. |
41 | |
42 | The pass is mimicking former reload pass in alternative choosing |
43 | because the reload pass is oriented to current machine description |
44 | model. It might be changed if the machine description model is |
45 | changed. |
46 | |
47 | There is special code for preventing all LRA and this pass cycling |
48 | in case of bugs. |
49 | |
50 | On the first iteration of the pass we process every instruction and |
51 | choose an alternative for each one. On subsequent iterations we try |
52 | to avoid reprocessing instructions if we can be sure that the old |
53 | choice is still valid. |
54 | |
55 | The inheritance/spilt pass is to transform code to achieve |
56 | ineheritance and live range splitting. It is done on backward |
57 | traversal of EBBs. |
58 | |
59 | The inheritance optimization goal is to reuse values in hard |
60 | registers. There is analogous optimization in old reload pass. The |
61 | inheritance is achieved by following transformation: |
62 | |
63 | reload_p1 <- p reload_p1 <- p |
64 | ... new_p <- reload_p1 |
65 | ... => ... |
66 | reload_p2 <- p reload_p2 <- new_p |
67 | |
68 | where p is spilled and not changed between the insns. Reload_p1 is |
69 | also called *original pseudo* and new_p is called *inheritance |
70 | pseudo*. |
71 | |
72 | The subsequent assignment pass will try to assign the same (or |
73 | another if it is not possible) hard register to new_p as to |
74 | reload_p1 or reload_p2. |
75 | |
76 | If the assignment pass fails to assign a hard register to new_p, |
77 | this file will undo the inheritance and restore the original code. |
78 | This is because implementing the above sequence with a spilled |
79 | new_p would make the code much worse. The inheritance is done in |
80 | EBB scope. The above is just a simplified example to get an idea |
81 | of the inheritance as the inheritance is also done for non-reload |
82 | insns. |
83 | |
84 | Splitting (transformation) is also done in EBB scope on the same |
85 | pass as the inheritance: |
86 | |
87 | r <- ... or ... <- r r <- ... or ... <- r |
88 | ... s <- r (new insn -- save) |
89 | ... => |
90 | ... r <- s (new insn -- restore) |
91 | ... <- r ... <- r |
92 | |
93 | The *split pseudo* s is assigned to the hard register of the |
94 | original pseudo or hard register r. |
95 | |
96 | Splitting is done: |
97 | o In EBBs with high register pressure for global pseudos (living |
98 | in at least 2 BBs) and assigned to hard registers when there |
99 | are more one reloads needing the hard registers; |
100 | o for pseudos needing save/restore code around calls. |
101 | |
102 | If the split pseudo still has the same hard register as the |
103 | original pseudo after the subsequent assignment pass or the |
104 | original pseudo was split, the opposite transformation is done on |
105 | the same pass for undoing inheritance. */ |
106 | |
107 | #undef REG_OK_STRICT |
108 | |
109 | #include "config.h" |
110 | #include "system.h" |
111 | #include "coretypes.h" |
112 | #include "backend.h" |
113 | #include "hooks.h" |
114 | #include "target.h" |
115 | #include "rtl.h" |
116 | #include "tree.h" |
117 | #include "predict.h" |
118 | #include "df.h" |
119 | #include "memmodel.h" |
120 | #include "tm_p.h" |
121 | #include "expmed.h" |
122 | #include "optabs.h" |
123 | #include "regs.h" |
124 | #include "ira.h" |
125 | #include "recog.h" |
126 | #include "output.h" |
127 | #include "addresses.h" |
128 | #include "expr.h" |
129 | #include "cfgrtl.h" |
130 | #include "rtl-error.h" |
131 | #include "lra.h" |
132 | #include "lra-int.h" |
133 | #include "print-rtl.h" |
134 | #include "function-abi.h" |
135 | #include "rtl-iter.h" |
136 | |
137 | /* Value of LRA_CURR_RELOAD_NUM at the beginning of BB of the current |
138 | insn. Remember that LRA_CURR_RELOAD_NUM is the number of emitted |
139 | reload insns. */ |
140 | static int bb_reload_num; |
141 | |
142 | /* The current insn being processed and corresponding its single set |
143 | (NULL otherwise), its data (basic block, the insn data, the insn |
144 | static data, and the mode of each operand). */ |
145 | static rtx_insn *curr_insn; |
146 | static rtx curr_insn_set; |
147 | static basic_block curr_bb; |
148 | static lra_insn_recog_data_t curr_id; |
149 | static struct lra_static_insn_data *curr_static_id; |
150 | static machine_mode curr_operand_mode[MAX_RECOG_OPERANDS30]; |
151 | /* Mode of the register substituted by its equivalence with VOIDmode |
152 | (e.g. constant) and whose subreg is given operand of the current |
153 | insn. VOIDmode in all other cases. */ |
154 | static machine_mode original_subreg_reg_mode[MAX_RECOG_OPERANDS30]; |
155 | |
156 | |
157 | |
158 | /* Start numbers for new registers and insns at the current constraints |
159 | pass start. */ |
160 | static int new_regno_start; |
161 | static int new_insn_uid_start; |
162 | |
163 | /* If LOC is nonnull, strip any outer subreg from it. */ |
164 | static inline rtx * |
165 | strip_subreg (rtx *loc) |
166 | { |
167 | return loc && GET_CODE (*loc)((enum rtx_code) (*loc)->code) == SUBREG ? &SUBREG_REG (*loc)(((*loc)->u.fld[0]).rt_rtx) : loc; |
168 | } |
169 | |
170 | /* Return hard regno of REGNO or if it is was not assigned to a hard |
171 | register, use a hard register from its allocno class. */ |
172 | static int |
173 | get_try_hard_regno (int regno) |
174 | { |
175 | int hard_regno; |
176 | enum reg_class rclass; |
177 | |
178 | if ((hard_regno = regno) >= FIRST_PSEUDO_REGISTER76) |
179 | hard_regno = lra_get_regno_hard_regno (regno); |
180 | if (hard_regno >= 0) |
181 | return hard_regno; |
182 | rclass = lra_get_allocno_class (regno); |
183 | if (rclass == NO_REGS) |
184 | return -1; |
185 | return ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[rclass][0]; |
186 | } |
187 | |
188 | /* Return the hard regno of X after removing its subreg. If X is not a |
189 | register or a subreg of a register, return -1. If X is a pseudo, use its |
190 | assignment. If X is a hard regno, return the final hard regno which will be |
191 | after elimination. */ |
192 | static int |
193 | get_hard_regno (rtx x) |
194 | { |
195 | rtx reg; |
196 | int hard_regno; |
197 | |
198 | reg = x; |
199 | if (SUBREG_P (x)(((enum rtx_code) (x)->code) == SUBREG)) |
200 | reg = SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx); |
201 | if (! REG_P (reg)(((enum rtx_code) (reg)->code) == REG)) |
202 | return -1; |
203 | if (! HARD_REGISTER_NUM_P (hard_regno = REGNO (reg))((hard_regno = (rhs_regno(reg))) < 76)) |
204 | hard_regno = lra_get_regno_hard_regno (hard_regno); |
205 | if (hard_regno < 0) |
206 | return -1; |
207 | if (HARD_REGISTER_NUM_P (REGNO (reg))(((rhs_regno(reg))) < 76)) |
208 | hard_regno = lra_get_elimination_hard_regno (hard_regno); |
209 | if (SUBREG_P (x)(((enum rtx_code) (x)->code) == SUBREG)) |
210 | hard_regno += subreg_regno_offset (hard_regno, GET_MODE (reg)((machine_mode) (reg)->mode), |
211 | SUBREG_BYTE (x)(((x)->u.fld[1]).rt_subreg), GET_MODE (x)((machine_mode) (x)->mode)); |
212 | return hard_regno; |
213 | } |
214 | |
215 | /* If REGNO is a hard register or has been allocated a hard register, |
216 | return the class of that register. If REGNO is a reload pseudo |
217 | created by the current constraints pass, return its allocno class. |
218 | Return NO_REGS otherwise. */ |
219 | static enum reg_class |
220 | get_reg_class (int regno) |
221 | { |
222 | int hard_regno; |
223 | |
224 | if (! HARD_REGISTER_NUM_P (hard_regno = regno)((hard_regno = regno) < 76)) |
225 | hard_regno = lra_get_regno_hard_regno (regno); |
226 | if (hard_regno >= 0) |
227 | { |
228 | hard_regno = lra_get_elimination_hard_regno (hard_regno); |
229 | return REGNO_REG_CLASS (hard_regno)(regclass_map[(hard_regno)]); |
230 | } |
231 | if (regno >= new_regno_start) |
232 | return lra_get_allocno_class (regno); |
233 | return NO_REGS; |
234 | } |
235 | |
236 | /* Return true if REG satisfies (or will satisfy) reg class constraint |
237 | CL. Use elimination first if REG is a hard register. If REG is a |
238 | reload pseudo created by this constraints pass, assume that it will |
239 | be allocated a hard register from its allocno class, but allow that |
240 | class to be narrowed to CL if it is currently a superset of CL and |
241 | if either: |
242 | |
243 | - ALLOW_ALL_RELOAD_CLASS_CHANGES_P is true or |
244 | - the instruction we're processing is not a reload move. |
245 | |
246 | If NEW_CLASS is nonnull, set *NEW_CLASS to the new allocno class of |
247 | REGNO (reg), or NO_REGS if no change in its class was needed. */ |
248 | static bool |
249 | in_class_p (rtx reg, enum reg_class cl, enum reg_class *new_class, |
250 | bool allow_all_reload_class_changes_p = false) |
251 | { |
252 | enum reg_class rclass, common_class; |
253 | machine_mode reg_mode; |
254 | rtx src; |
255 | int class_size, hard_regno, nregs, i, j; |
256 | int regno = REGNO (reg)(rhs_regno(reg)); |
257 | |
258 | if (new_class != NULLnullptr) |
259 | *new_class = NO_REGS; |
260 | if (regno < FIRST_PSEUDO_REGISTER76) |
261 | { |
262 | rtx final_reg = reg; |
263 | rtx *final_loc = &final_reg; |
264 | |
265 | lra_eliminate_reg_if_possible (final_loc); |
266 | return TEST_HARD_REG_BIT (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[cl], REGNO (*final_loc)(rhs_regno(*final_loc))); |
267 | } |
268 | reg_mode = GET_MODE (reg)((machine_mode) (reg)->mode); |
269 | rclass = get_reg_class (regno); |
270 | src = curr_insn_set != NULLnullptr ? SET_SRC (curr_insn_set)(((curr_insn_set)->u.fld[1]).rt_rtx) : NULLnullptr; |
271 | if (regno < new_regno_start |
272 | /* Do not allow the constraints for reload instructions to |
273 | influence the classes of new pseudos. These reloads are |
274 | typically moves that have many alternatives, and restricting |
275 | reload pseudos for one alternative may lead to situations |
276 | where other reload pseudos are no longer allocatable. */ |
277 | || (!allow_all_reload_class_changes_p |
278 | && INSN_UID (curr_insn) >= new_insn_uid_start |
279 | && src != NULLnullptr |
280 | && ((REG_P (src)(((enum rtx_code) (src)->code) == REG) || MEM_P (src)(((enum rtx_code) (src)->code) == MEM)) |
281 | || (GET_CODE (src)((enum rtx_code) (src)->code) == SUBREG |
282 | && (REG_P (SUBREG_REG (src))(((enum rtx_code) ((((src)->u.fld[0]).rt_rtx))->code) == REG) || MEM_P (SUBREG_REG (src))(((enum rtx_code) ((((src)->u.fld[0]).rt_rtx))->code) == MEM)))))) |
283 | /* When we don't know what class will be used finally for reload |
284 | pseudos, we use ALL_REGS. */ |
285 | return ((regno >= new_regno_start && rclass == ALL_REGS) |
286 | || (rclass != NO_REGS && ira_class_subset_p(this_target_ira->x_ira_class_subset_p)[rclass][cl] |
287 | && ! hard_reg_set_subset_p (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[cl], |
288 | lra_no_alloc_regs))); |
289 | else |
290 | { |
291 | common_class = ira_reg_class_subset(this_target_ira->x_ira_reg_class_subset)[rclass][cl]; |
292 | if (new_class != NULLnullptr) |
293 | *new_class = common_class; |
294 | if (hard_reg_set_subset_p (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[common_class], |
295 | lra_no_alloc_regs)) |
296 | return false; |
297 | /* Check that there are enough allocatable regs. */ |
298 | class_size = ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[common_class]; |
299 | for (i = 0; i < class_size; i++) |
300 | { |
301 | hard_regno = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[common_class][i]; |
302 | nregs = hard_regno_nregs (hard_regno, reg_mode); |
303 | if (nregs == 1) |
304 | return true; |
305 | for (j = 0; j < nregs; j++) |
306 | if (TEST_HARD_REG_BIT (lra_no_alloc_regs, hard_regno + j) |
307 | || ! TEST_HARD_REG_BIT (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[common_class], |
308 | hard_regno + j)) |
309 | break; |
310 | if (j >= nregs) |
311 | return true; |
312 | } |
313 | return false; |
314 | } |
315 | } |
316 | |
317 | /* Return true if REGNO satisfies a memory constraint. */ |
318 | static bool |
319 | in_mem_p (int regno) |
320 | { |
321 | return get_reg_class (regno) == NO_REGS; |
322 | } |
323 | |
324 | /* Return 1 if ADDR is a valid memory address for mode MODE in address |
325 | space AS, and check that each pseudo has the proper kind of hard |
326 | reg. */ |
327 | static int |
328 | valid_address_p (machine_mode mode ATTRIBUTE_UNUSED__attribute__ ((__unused__)), |
329 | rtx addr, addr_space_t as) |
330 | { |
331 | #ifdef GO_IF_LEGITIMATE_ADDRESS |
332 | lra_assert (ADDR_SPACE_GENERIC_P (as))((void)(!(((as) == 0)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 332, __FUNCTION__), 0 : 0)); |
333 | GO_IF_LEGITIMATE_ADDRESS (mode, addr, win); |
334 | return 0; |
335 | |
336 | win: |
337 | return 1; |
338 | #else |
339 | return targetm.addr_space.legitimate_address_p (mode, addr, 0, as); |
340 | #endif |
341 | } |
342 | |
343 | namespace { |
344 | /* Temporarily eliminates registers in an address (for the lifetime of |
345 | the object). */ |
346 | class address_eliminator { |
347 | public: |
348 | address_eliminator (struct address_info *ad); |
349 | ~address_eliminator (); |
350 | |
351 | private: |
352 | struct address_info *m_ad; |
353 | rtx *m_base_loc; |
354 | rtx m_base_reg; |
355 | rtx *m_index_loc; |
356 | rtx m_index_reg; |
357 | }; |
358 | } |
359 | |
360 | address_eliminator::address_eliminator (struct address_info *ad) |
361 | : m_ad (ad), |
362 | m_base_loc (strip_subreg (ad->base_term)), |
363 | m_base_reg (NULL_RTX(rtx) 0), |
364 | m_index_loc (strip_subreg (ad->index_term)), |
365 | m_index_reg (NULL_RTX(rtx) 0) |
366 | { |
367 | if (m_base_loc != NULLnullptr) |
368 | { |
369 | m_base_reg = *m_base_loc; |
370 | /* If we have non-legitimate address which is decomposed not in |
371 | the way we expected, don't do elimination here. In such case |
372 | the address will be reloaded and elimination will be done in |
373 | reload insn finally. */ |
374 | if (REG_P (m_base_reg)(((enum rtx_code) (m_base_reg)->code) == REG)) |
375 | lra_eliminate_reg_if_possible (m_base_loc); |
376 | if (m_ad->base_term2 != NULLnullptr) |
377 | *m_ad->base_term2 = *m_ad->base_term; |
378 | } |
379 | if (m_index_loc != NULLnullptr) |
380 | { |
381 | m_index_reg = *m_index_loc; |
382 | if (REG_P (m_index_reg)(((enum rtx_code) (m_index_reg)->code) == REG)) |
383 | lra_eliminate_reg_if_possible (m_index_loc); |
384 | } |
385 | } |
386 | |
387 | address_eliminator::~address_eliminator () |
388 | { |
389 | if (m_base_loc && *m_base_loc != m_base_reg) |
390 | { |
391 | *m_base_loc = m_base_reg; |
392 | if (m_ad->base_term2 != NULLnullptr) |
393 | *m_ad->base_term2 = *m_ad->base_term; |
394 | } |
395 | if (m_index_loc && *m_index_loc != m_index_reg) |
396 | *m_index_loc = m_index_reg; |
397 | } |
398 | |
399 | /* Return true if the eliminated form of AD is a legitimate target address. |
400 | If OP is a MEM, AD is the address within OP, otherwise OP should be |
401 | ignored. CONSTRAINT is one constraint that the operand may need |
402 | to meet. */ |
403 | static bool |
404 | valid_address_p (rtx op, struct address_info *ad, |
405 | enum constraint_num constraint) |
406 | { |
407 | address_eliminator eliminator (ad); |
408 | |
409 | /* Allow a memory OP if it matches CONSTRAINT, even if CONSTRAINT is more |
410 | forgiving than "m". |
411 | Need to extract memory from op for special memory constraint, |
412 | i.e. bcst_mem_operand in i386 backend. */ |
413 | if (MEM_P (extract_mem_from_operand (op))(((enum rtx_code) (extract_mem_from_operand (op))->code) == MEM) |
414 | && insn_extra_relaxed_memory_constraint (constraint) |
415 | && constraint_satisfied_p (op, constraint)) |
416 | return true; |
417 | |
418 | return valid_address_p (ad->mode, *ad->outer, ad->as); |
419 | } |
420 | |
421 | /* For special_memory_operand, it could be false for MEM_P (op), |
422 | i.e. bcst_mem_operand in i386 backend. |
423 | Extract and return real memory operand or op. */ |
424 | rtx |
425 | extract_mem_from_operand (rtx op) |
426 | { |
427 | for (rtx x = op;; x = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx)) |
428 | { |
429 | if (MEM_P (x)(((enum rtx_code) (x)->code) == MEM)) |
430 | return x; |
431 | if (GET_RTX_LENGTH (GET_CODE (x))(rtx_length[(int) (((enum rtx_code) (x)->code))]) != 1 |
432 | || GET_RTX_FORMAT (GET_CODE (x))(rtx_format[(int) (((enum rtx_code) (x)->code))])[0] != 'e') |
433 | break; |
434 | } |
435 | return op; |
436 | } |
437 | |
438 | /* Return true if the eliminated form of memory reference OP satisfies |
439 | extra (special) memory constraint CONSTRAINT. */ |
440 | static bool |
441 | satisfies_memory_constraint_p (rtx op, enum constraint_num constraint) |
442 | { |
443 | struct address_info ad; |
444 | rtx mem = extract_mem_from_operand (op); |
445 | if (!MEM_P (mem)(((enum rtx_code) (mem)->code) == MEM)) |
446 | return false; |
447 | |
448 | decompose_mem_address (&ad, mem); |
449 | address_eliminator eliminator (&ad); |
450 | return constraint_satisfied_p (op, constraint); |
451 | } |
452 | |
453 | /* Return true if the eliminated form of address AD satisfies extra |
454 | address constraint CONSTRAINT. */ |
455 | static bool |
456 | satisfies_address_constraint_p (struct address_info *ad, |
457 | enum constraint_num constraint) |
458 | { |
459 | address_eliminator eliminator (ad); |
460 | return constraint_satisfied_p (*ad->outer, constraint); |
461 | } |
462 | |
463 | /* Return true if the eliminated form of address OP satisfies extra |
464 | address constraint CONSTRAINT. */ |
465 | static bool |
466 | satisfies_address_constraint_p (rtx op, enum constraint_num constraint) |
467 | { |
468 | struct address_info ad; |
469 | |
470 | decompose_lea_address (&ad, &op); |
471 | return satisfies_address_constraint_p (&ad, constraint); |
472 | } |
473 | |
474 | /* Initiate equivalences for LRA. As we keep original equivalences |
475 | before any elimination, we need to make copies otherwise any change |
476 | in insns might change the equivalences. */ |
477 | void |
478 | lra_init_equiv (void) |
479 | { |
480 | ira_expand_reg_equiv (); |
481 | for (int i = FIRST_PSEUDO_REGISTER76; i < max_reg_num (); i++) |
482 | { |
483 | rtx res; |
484 | |
485 | if ((res = ira_reg_equiv[i].memory) != NULL_RTX(rtx) 0) |
486 | ira_reg_equiv[i].memory = copy_rtx (res); |
487 | if ((res = ira_reg_equiv[i].invariant) != NULL_RTX(rtx) 0) |
488 | ira_reg_equiv[i].invariant = copy_rtx (res); |
489 | } |
490 | } |
491 | |
492 | static rtx loc_equivalence_callback (rtx, const_rtx, void *); |
493 | |
494 | /* Update equivalence for REGNO. We need to this as the equivalence |
495 | might contain other pseudos which are changed by their |
496 | equivalences. */ |
497 | static void |
498 | update_equiv (int regno) |
499 | { |
500 | rtx x; |
501 | |
502 | if ((x = ira_reg_equiv[regno].memory) != NULL_RTX(rtx) 0) |
503 | ira_reg_equiv[regno].memory |
504 | = simplify_replace_fn_rtx (x, NULL_RTX(rtx) 0, loc_equivalence_callback, |
505 | NULL_RTX(rtx) 0); |
506 | if ((x = ira_reg_equiv[regno].invariant) != NULL_RTX(rtx) 0) |
507 | ira_reg_equiv[regno].invariant |
508 | = simplify_replace_fn_rtx (x, NULL_RTX(rtx) 0, loc_equivalence_callback, |
509 | NULL_RTX(rtx) 0); |
510 | } |
511 | |
512 | /* If we have decided to substitute X with another value, return that |
513 | value, otherwise return X. */ |
514 | static rtx |
515 | get_equiv (rtx x) |
516 | { |
517 | int regno; |
518 | rtx res; |
519 | |
520 | if (! REG_P (x)(((enum rtx_code) (x)->code) == REG) || (regno = REGNO (x)(rhs_regno(x))) < FIRST_PSEUDO_REGISTER76 |
521 | || ! ira_reg_equiv[regno].defined_p |
522 | || ! ira_reg_equiv[regno].profitable_p |
523 | || lra_get_regno_hard_regno (regno) >= 0) |
524 | return x; |
525 | if ((res = ira_reg_equiv[regno].memory) != NULL_RTX(rtx) 0) |
526 | { |
527 | if (targetm.cannot_substitute_mem_equiv_p (res)) |
528 | return x; |
529 | return res; |
530 | } |
531 | if ((res = ira_reg_equiv[regno].constant) != NULL_RTX(rtx) 0) |
532 | return res; |
533 | if ((res = ira_reg_equiv[regno].invariant) != NULL_RTX(rtx) 0) |
534 | return res; |
535 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 535, __FUNCTION__)); |
536 | } |
537 | |
538 | /* If we have decided to substitute X with the equivalent value, |
539 | return that value after elimination for INSN, otherwise return |
540 | X. */ |
541 | static rtx |
542 | get_equiv_with_elimination (rtx x, rtx_insn *insn) |
543 | { |
544 | rtx res = get_equiv (x); |
545 | |
546 | if (x == res || CONSTANT_P (res)((rtx_class[(int) (((enum rtx_code) (res)->code))]) == RTX_CONST_OBJ )) |
547 | return res; |
548 | return lra_eliminate_regs_1 (insn, res, GET_MODE (res)((machine_mode) (res)->mode), |
549 | false, false, 0, true); |
550 | } |
551 | |
552 | /* Set up curr_operand_mode. */ |
553 | static void |
554 | init_curr_operand_mode (void) |
555 | { |
556 | int nop = curr_static_id->n_operands; |
557 | for (int i = 0; i < nop; i++) |
558 | { |
559 | machine_mode mode = GET_MODE (*curr_id->operand_loc[i])((machine_mode) (*curr_id->operand_loc[i])->mode); |
560 | if (mode == VOIDmode((void) 0, E_VOIDmode)) |
561 | { |
562 | /* The .md mode for address operands is the mode of the |
563 | addressed value rather than the mode of the address itself. */ |
564 | if (curr_id->icode >= 0 && curr_static_id->operand[i].is_address) |
565 | mode = Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))); |
566 | else |
567 | mode = curr_static_id->operand[i].mode; |
568 | } |
569 | curr_operand_mode[i] = mode; |
570 | } |
571 | } |
572 | |
573 | |
574 | |
575 | /* The page contains code to reuse input reloads. */ |
576 | |
577 | /* Structure describes input reload of the current insns. */ |
578 | struct input_reload |
579 | { |
580 | /* True for input reload of matched operands. */ |
581 | bool match_p; |
582 | /* Reloaded value. */ |
583 | rtx input; |
584 | /* Reload pseudo used. */ |
585 | rtx reg; |
586 | }; |
587 | |
588 | /* The number of elements in the following array. */ |
589 | static int curr_insn_input_reloads_num; |
590 | /* Array containing info about input reloads. It is used to find the |
591 | same input reload and reuse the reload pseudo in this case. */ |
592 | static struct input_reload curr_insn_input_reloads[LRA_MAX_INSN_RELOADS(30 * 3)]; |
593 | |
594 | /* Initiate data concerning reuse of input reloads for the current |
595 | insn. */ |
596 | static void |
597 | init_curr_insn_input_reloads (void) |
598 | { |
599 | curr_insn_input_reloads_num = 0; |
600 | } |
601 | |
602 | /* The canonical form of an rtx inside a MEM is not necessarily the same as the |
603 | canonical form of the rtx outside the MEM. Fix this up in the case that |
604 | we're reloading an address (and therefore pulling it outside a MEM). */ |
605 | static rtx |
606 | canonicalize_reload_addr (rtx addr) |
607 | { |
608 | subrtx_var_iterator::array_type array; |
609 | FOR_EACH_SUBRTX_VAR (iter, array, addr, NONCONST)for (subrtx_var_iterator iter (array, addr, rtx_nonconst_subrtx_bounds ); !iter.at_end (); iter.next ()) |
610 | { |
611 | rtx x = *iter; |
612 | if (GET_CODE (x)((enum rtx_code) (x)->code) == MULT && CONST_INT_P (XEXP (x, 1))(((enum rtx_code) ((((x)->u.fld[1]).rt_rtx))->code) == CONST_INT )) |
613 | { |
614 | const HOST_WIDE_INTlong ci = INTVAL (XEXP (x, 1))(((((x)->u.fld[1]).rt_rtx))->u.hwint[0]); |
615 | const int pwr2 = exact_log2 (ci); |
616 | if (pwr2 > 0) |
617 | { |
618 | /* Rewrite this to use a shift instead, which is canonical when |
619 | outside of a MEM. */ |
620 | PUT_CODE (x, ASHIFT)((x)->code = (ASHIFT)); |
621 | XEXP (x, 1)(((x)->u.fld[1]).rt_rtx) = GEN_INT (pwr2)gen_rtx_CONST_INT (((void) 0, E_VOIDmode), (pwr2)); |
622 | } |
623 | } |
624 | } |
625 | |
626 | return addr; |
627 | } |
628 | |
629 | /* Create a new pseudo using MODE, RCLASS, EXCLUDE_START_HARD_REGS, ORIGINAL or |
630 | reuse an existing reload pseudo. Don't reuse an existing reload pseudo if |
631 | IN_SUBREG_P is true and the reused pseudo should be wrapped up in a SUBREG. |
632 | The result pseudo is returned through RESULT_REG. Return TRUE if we created |
633 | a new pseudo, FALSE if we reused an existing reload pseudo. Use TITLE to |
634 | describe new registers for debug purposes. */ |
635 | static bool |
636 | get_reload_reg (enum op_type type, machine_mode mode, rtx original, |
637 | enum reg_class rclass, HARD_REG_SET *exclude_start_hard_regs, |
638 | bool in_subreg_p, const char *title, rtx *result_reg) |
639 | { |
640 | int i, regno; |
641 | enum reg_class new_class; |
642 | bool unique_p = false; |
643 | |
644 | if (type == OP_OUT) |
645 | { |
646 | /* Output reload registers tend to start out with a conservative |
647 | choice of register class. Usually this is ALL_REGS, although |
648 | a target might narrow it (for performance reasons) through |
649 | targetm.preferred_reload_class. It's therefore quite common |
650 | for a reload instruction to require a more restrictive class |
651 | than the class that was originally assigned to the reload register. |
652 | |
653 | In these situations, it's more efficient to refine the choice |
654 | of register class rather than create a second reload register. |
655 | This also helps to avoid cycling for registers that are only |
656 | used by reload instructions. */ |
657 | if (REG_P (original)(((enum rtx_code) (original)->code) == REG) |
658 | && (int) REGNO (original)(rhs_regno(original)) >= new_regno_start |
659 | && INSN_UID (curr_insn) >= new_insn_uid_start |
660 | && in_class_p (original, rclass, &new_class, true)) |
661 | { |
662 | unsigned int regno = REGNO (original)(rhs_regno(original)); |
663 | if (lra_dump_file != NULLnullptr) |
664 | { |
665 | fprintf (lra_dump_file, " Reuse r%d for output ", regno); |
666 | dump_value_slim (lra_dump_file, original, 1); |
667 | } |
668 | if (new_class != lra_get_allocno_class (regno)) |
669 | lra_change_class (regno, new_class, ", change to", false); |
670 | if (lra_dump_file != NULLnullptr) |
671 | fprintf (lra_dump_file, "\n"); |
672 | *result_reg = original; |
673 | return false; |
674 | } |
675 | *result_reg |
676 | = lra_create_new_reg_with_unique_value (mode, original, rclass, |
677 | exclude_start_hard_regs, title); |
678 | return true; |
679 | } |
680 | /* Prevent reuse value of expression with side effects, |
681 | e.g. volatile memory. */ |
682 | if (! side_effects_p (original)) |
683 | for (i = 0; i < curr_insn_input_reloads_num; i++) |
684 | { |
685 | if (! curr_insn_input_reloads[i].match_p |
686 | && rtx_equal_p (curr_insn_input_reloads[i].input, original) |
687 | && in_class_p (curr_insn_input_reloads[i].reg, rclass, &new_class)) |
688 | { |
689 | rtx reg = curr_insn_input_reloads[i].reg; |
690 | regno = REGNO (reg)(rhs_regno(reg)); |
691 | /* If input is equal to original and both are VOIDmode, |
692 | GET_MODE (reg) might be still different from mode. |
693 | Ensure we don't return *result_reg with wrong mode. */ |
694 | if (GET_MODE (reg)((machine_mode) (reg)->mode) != mode) |
695 | { |
696 | if (in_subreg_p) |
697 | continue; |
698 | if (maybe_lt (GET_MODE_SIZE (GET_MODE (reg)((machine_mode) (reg)->mode)), |
699 | GET_MODE_SIZE (mode))) |
700 | continue; |
701 | reg = lowpart_subreg (mode, reg, GET_MODE (reg)((machine_mode) (reg)->mode)); |
702 | if (reg == NULL_RTX(rtx) 0 || GET_CODE (reg)((enum rtx_code) (reg)->code) != SUBREG) |
703 | continue; |
704 | } |
705 | *result_reg = reg; |
706 | if (lra_dump_file != NULLnullptr) |
707 | { |
708 | fprintf (lra_dump_file, " Reuse r%d for reload ", regno); |
709 | dump_value_slim (lra_dump_file, original, 1); |
710 | } |
711 | if (new_class != lra_get_allocno_class (regno)) |
712 | lra_change_class (regno, new_class, ", change to", false); |
713 | if (lra_dump_file != NULLnullptr) |
714 | fprintf (lra_dump_file, "\n"); |
715 | return false; |
716 | } |
717 | /* If we have an input reload with a different mode, make sure it |
718 | will get a different hard reg. */ |
719 | else if (REG_P (original)(((enum rtx_code) (original)->code) == REG) |
720 | && REG_P (curr_insn_input_reloads[i].input)(((enum rtx_code) (curr_insn_input_reloads[i].input)->code ) == REG) |
721 | && REGNO (original)(rhs_regno(original)) == REGNO (curr_insn_input_reloads[i].input)(rhs_regno(curr_insn_input_reloads[i].input)) |
722 | && (GET_MODE (original)((machine_mode) (original)->mode) |
723 | != GET_MODE (curr_insn_input_reloads[i].input)((machine_mode) (curr_insn_input_reloads[i].input)->mode))) |
724 | unique_p = true; |
725 | } |
726 | *result_reg = (unique_p |
727 | ? lra_create_new_reg_with_unique_value |
728 | : lra_create_new_reg) (mode, original, rclass, |
729 | exclude_start_hard_regs, title); |
730 | lra_assert (curr_insn_input_reloads_num < LRA_MAX_INSN_RELOADS)((void)(!(curr_insn_input_reloads_num < (30 * 3)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 730, __FUNCTION__), 0 : 0)); |
731 | curr_insn_input_reloads[curr_insn_input_reloads_num].input = original; |
732 | curr_insn_input_reloads[curr_insn_input_reloads_num].match_p = false; |
733 | curr_insn_input_reloads[curr_insn_input_reloads_num++].reg = *result_reg; |
734 | return true; |
735 | } |
736 | |
737 | |
738 | /* The page contains major code to choose the current insn alternative |
739 | and generate reloads for it. */ |
740 | |
741 | /* Return the offset from REGNO of the least significant register |
742 | in (reg:MODE REGNO). |
743 | |
744 | This function is used to tell whether two registers satisfy |
745 | a matching constraint. (reg:MODE1 REGNO1) matches (reg:MODE2 REGNO2) if: |
746 | |
747 | REGNO1 + lra_constraint_offset (REGNO1, MODE1) |
748 | == REGNO2 + lra_constraint_offset (REGNO2, MODE2) */ |
749 | int |
750 | lra_constraint_offset (int regno, machine_mode mode) |
751 | { |
752 | lra_assert (regno < FIRST_PSEUDO_REGISTER)((void)(!(regno < 76) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 752, __FUNCTION__), 0 : 0)); |
753 | |
754 | scalar_int_mode int_mode; |
755 | if (WORDS_BIG_ENDIAN0 |
756 | && is_a <scalar_int_mode> (mode, &int_mode) |
757 | && GET_MODE_SIZE (int_mode) > UNITS_PER_WORD(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4)) |
758 | return hard_regno_nregs (regno, mode) - 1; |
759 | return 0; |
760 | } |
761 | |
762 | /* Like rtx_equal_p except that it allows a REG and a SUBREG to match |
763 | if they are the same hard reg, and has special hacks for |
764 | auto-increment and auto-decrement. This is specifically intended for |
765 | process_alt_operands to use in determining whether two operands |
766 | match. X is the operand whose number is the lower of the two. |
767 | |
768 | It is supposed that X is the output operand and Y is the input |
769 | operand. Y_HARD_REGNO is the final hard regno of register Y or |
770 | register in subreg Y as we know it now. Otherwise, it is a |
771 | negative value. */ |
772 | static bool |
773 | operands_match_p (rtx x, rtx y, int y_hard_regno) |
774 | { |
775 | int i; |
776 | RTX_CODEenum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code); |
777 | const char *fmt; |
778 | |
779 | if (x == y) |
780 | return true; |
781 | if ((code == REG || (code == SUBREG && REG_P (SUBREG_REG (x))(((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == REG ))) |
782 | && (REG_P (y)(((enum rtx_code) (y)->code) == REG) || (GET_CODE (y)((enum rtx_code) (y)->code) == SUBREG && REG_P (SUBREG_REG (y))(((enum rtx_code) ((((y)->u.fld[0]).rt_rtx))->code) == REG )))) |
783 | { |
784 | int j; |
785 | |
786 | i = get_hard_regno (x); |
787 | if (i < 0) |
788 | goto slow; |
789 | |
790 | if ((j = y_hard_regno) < 0) |
791 | goto slow; |
792 | |
793 | i += lra_constraint_offset (i, GET_MODE (x)((machine_mode) (x)->mode)); |
794 | j += lra_constraint_offset (j, GET_MODE (y)((machine_mode) (y)->mode)); |
795 | |
796 | return i == j; |
797 | } |
798 | |
799 | /* If two operands must match, because they are really a single |
800 | operand of an assembler insn, then two post-increments are invalid |
801 | because the assembler insn would increment only once. On the |
802 | other hand, a post-increment matches ordinary indexing if the |
803 | post-increment is the output operand. */ |
804 | if (code == POST_DEC || code == POST_INC || code == POST_MODIFY) |
805 | return operands_match_p (XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), y, y_hard_regno); |
806 | |
807 | /* Two pre-increments are invalid because the assembler insn would |
808 | increment only once. On the other hand, a pre-increment matches |
809 | ordinary indexing if the pre-increment is the input operand. */ |
810 | if (GET_CODE (y)((enum rtx_code) (y)->code) == PRE_DEC || GET_CODE (y)((enum rtx_code) (y)->code) == PRE_INC |
811 | || GET_CODE (y)((enum rtx_code) (y)->code) == PRE_MODIFY) |
812 | return operands_match_p (x, XEXP (y, 0)(((y)->u.fld[0]).rt_rtx), -1); |
813 | |
814 | slow: |
815 | |
816 | if (code == REG && REG_P (y)(((enum rtx_code) (y)->code) == REG)) |
817 | return REGNO (x)(rhs_regno(x)) == REGNO (y)(rhs_regno(y)); |
818 | |
819 | if (code == REG && GET_CODE (y)((enum rtx_code) (y)->code) == SUBREG && REG_P (SUBREG_REG (y))(((enum rtx_code) ((((y)->u.fld[0]).rt_rtx))->code) == REG ) |
820 | && x == SUBREG_REG (y)(((y)->u.fld[0]).rt_rtx)) |
821 | return true; |
822 | if (GET_CODE (y)((enum rtx_code) (y)->code) == REG && code == SUBREG && REG_P (SUBREG_REG (x))(((enum rtx_code) ((((x)->u.fld[0]).rt_rtx))->code) == REG ) |
823 | && SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx) == y) |
824 | return true; |
825 | |
826 | /* Now we have disposed of all the cases in which different rtx |
827 | codes can match. */ |
828 | if (code != GET_CODE (y)((enum rtx_code) (y)->code)) |
829 | return false; |
830 | |
831 | /* (MULT:SI x y) and (MULT:HI x y) are NOT equivalent. */ |
832 | if (GET_MODE (x)((machine_mode) (x)->mode) != GET_MODE (y)((machine_mode) (y)->mode)) |
833 | return false; |
834 | |
835 | switch (code) |
836 | { |
837 | CASE_CONST_UNIQUEcase CONST_INT: case CONST_WIDE_INT: case CONST_POLY_INT: case CONST_DOUBLE: case CONST_FIXED: |
838 | return false; |
839 | |
840 | case CONST_VECTOR: |
841 | if (!same_vector_encodings_p (x, y)) |
842 | return false; |
843 | break; |
844 | |
845 | case LABEL_REF: |
846 | return label_ref_label (x) == label_ref_label (y); |
847 | case SYMBOL_REF: |
848 | return XSTR (x, 0)(((x)->u.fld[0]).rt_str) == XSTR (y, 0)(((y)->u.fld[0]).rt_str); |
849 | |
850 | default: |
851 | break; |
852 | } |
853 | |
854 | /* Compare the elements. If any pair of corresponding elements fail |
855 | to match, return false for the whole things. */ |
856 | |
857 | fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
858 | for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--) |
859 | { |
860 | int val, j; |
861 | switch (fmt[i]) |
862 | { |
863 | case 'w': |
864 | if (XWINT (x, i)((x)->u.hwint[i]) != XWINT (y, i)((y)->u.hwint[i])) |
865 | return false; |
866 | break; |
867 | |
868 | case 'i': |
869 | if (XINT (x, i)(((x)->u.fld[i]).rt_int) != XINT (y, i)(((y)->u.fld[i]).rt_int)) |
870 | return false; |
871 | break; |
872 | |
873 | case 'p': |
874 | if (maybe_ne (SUBREG_BYTE (x)(((x)->u.fld[1]).rt_subreg), SUBREG_BYTE (y)(((y)->u.fld[1]).rt_subreg))) |
875 | return false; |
876 | break; |
877 | |
878 | case 'e': |
879 | val = operands_match_p (XEXP (x, i)(((x)->u.fld[i]).rt_rtx), XEXP (y, i)(((y)->u.fld[i]).rt_rtx), -1); |
880 | if (val == 0) |
881 | return false; |
882 | break; |
883 | |
884 | case '0': |
885 | break; |
886 | |
887 | case 'E': |
888 | if (XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) != XVECLEN (y, i)(((((y)->u.fld[i]).rt_rtvec))->num_elem)) |
889 | return false; |
890 | for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; --j) |
891 | { |
892 | val = operands_match_p (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), XVECEXP (y, i, j)(((((y)->u.fld[i]).rt_rtvec))->elem[j]), -1); |
893 | if (val == 0) |
894 | return false; |
895 | } |
896 | break; |
897 | |
898 | /* It is believed that rtx's at this level will never |
899 | contain anything but integers and other rtx's, except for |
900 | within LABEL_REFs and SYMBOL_REFs. */ |
901 | default: |
902 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 902, __FUNCTION__)); |
903 | } |
904 | } |
905 | return true; |
906 | } |
907 | |
908 | /* True if X is a constant that can be forced into the constant pool. |
909 | MODE is the mode of the operand, or VOIDmode if not known. */ |
910 | #define CONST_POOL_OK_P(MODE, X)((MODE) != ((void) 0, E_VOIDmode) && ((rtx_class[(int ) (((enum rtx_code) (X)->code))]) == RTX_CONST_OBJ) && ((enum rtx_code) (X)->code) != HIGH && GET_MODE_SIZE (MODE).is_constant () && !targetm.cannot_force_const_mem (MODE, X)) \ |
911 | ((MODE) != VOIDmode((void) 0, E_VOIDmode) \ |
912 | && CONSTANT_P (X)((rtx_class[(int) (((enum rtx_code) (X)->code))]) == RTX_CONST_OBJ ) \ |
913 | && GET_CODE (X)((enum rtx_code) (X)->code) != HIGH \ |
914 | && GET_MODE_SIZE (MODE).is_constant () \ |
915 | && !targetm.cannot_force_const_mem (MODE, X)) |
916 | |
917 | /* True if C is a non-empty register class that has too few registers |
918 | to be safely used as a reload target class. */ |
919 | #define SMALL_REGISTER_CLASS_P(C)((this_target_ira->x_ira_class_hard_regs_num) [(C)] == 1 || ((this_target_ira->x_ira_class_hard_regs_num) [(C)] >= 1 && targetm.class_likely_spilled_p (C))) \ |
920 | (ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num) [(C)] == 1 \ |
921 | || (ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num) [(C)] >= 1 \ |
922 | && targetm.class_likely_spilled_p (C))) |
923 | |
924 | /* If REG is a reload pseudo, try to make its class satisfying CL. */ |
925 | static void |
926 | narrow_reload_pseudo_class (rtx reg, enum reg_class cl) |
927 | { |
928 | enum reg_class rclass; |
929 | |
930 | /* Do not make more accurate class from reloads generated. They are |
931 | mostly moves with a lot of constraints. Making more accurate |
932 | class may results in very narrow class and impossibility of find |
933 | registers for several reloads of one insn. */ |
934 | if (INSN_UID (curr_insn) >= new_insn_uid_start) |
935 | return; |
936 | if (GET_CODE (reg)((enum rtx_code) (reg)->code) == SUBREG) |
937 | reg = SUBREG_REG (reg)(((reg)->u.fld[0]).rt_rtx); |
938 | if (! REG_P (reg)(((enum rtx_code) (reg)->code) == REG) || (int) REGNO (reg)(rhs_regno(reg)) < new_regno_start) |
939 | return; |
940 | if (in_class_p (reg, cl, &rclass) && rclass != cl) |
941 | lra_change_class (REGNO (reg)(rhs_regno(reg)), rclass, " Change to", true); |
942 | } |
943 | |
944 | /* Searches X for any reference to a reg with the same value as REGNO, |
945 | returning the rtx of the reference found if any. Otherwise, |
946 | returns NULL_RTX. */ |
947 | static rtx |
948 | regno_val_use_in (unsigned int regno, rtx x) |
949 | { |
950 | const char *fmt; |
951 | int i, j; |
952 | rtx tem; |
953 | |
954 | if (REG_P (x)(((enum rtx_code) (x)->code) == REG) && lra_reg_info[REGNO (x)(rhs_regno(x))].val == lra_reg_info[regno].val) |
955 | return x; |
956 | |
957 | fmt = GET_RTX_FORMAT (GET_CODE (x))(rtx_format[(int) (((enum rtx_code) (x)->code))]); |
958 | for (i = GET_RTX_LENGTH (GET_CODE (x))(rtx_length[(int) (((enum rtx_code) (x)->code))]) - 1; i >= 0; i--) |
959 | { |
960 | if (fmt[i] == 'e') |
961 | { |
962 | if ((tem = regno_val_use_in (regno, XEXP (x, i)(((x)->u.fld[i]).rt_rtx)))) |
963 | return tem; |
964 | } |
965 | else if (fmt[i] == 'E') |
966 | for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--) |
967 | if ((tem = regno_val_use_in (regno , XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j])))) |
968 | return tem; |
969 | } |
970 | |
971 | return NULL_RTX(rtx) 0; |
972 | } |
973 | |
974 | /* Return true if all current insn non-output operands except INS (it |
975 | has a negaitve end marker) do not use pseudos with the same value |
976 | as REGNO. */ |
977 | static bool |
978 | check_conflict_input_operands (int regno, signed char *ins) |
979 | { |
980 | int in; |
981 | int n_operands = curr_static_id->n_operands; |
982 | |
983 | for (int nop = 0; nop < n_operands; nop++) |
984 | if (! curr_static_id->operand[nop].is_operator |
985 | && curr_static_id->operand[nop].type != OP_OUT) |
986 | { |
987 | for (int i = 0; (in = ins[i]) >= 0; i++) |
988 | if (in == nop) |
989 | break; |
990 | if (in < 0 |
991 | && regno_val_use_in (regno, *curr_id->operand_loc[nop]) != NULL_RTX(rtx) 0) |
992 | return false; |
993 | } |
994 | return true; |
995 | } |
996 | |
997 | /* Generate reloads for matching OUT and INS (array of input operand numbers |
998 | with end marker -1) with reg class GOAL_CLASS and EXCLUDE_START_HARD_REGS, |
999 | considering output operands OUTS (similar array to INS) needing to be in |
1000 | different registers. Add input and output reloads correspondingly to the |
1001 | lists *BEFORE and *AFTER. OUT might be negative. In this case we generate |
1002 | input reloads for matched input operands INS. EARLY_CLOBBER_P is a flag |
1003 | that the output operand is early clobbered for chosen alternative. */ |
1004 | static void |
1005 | match_reload (signed char out, signed char *ins, signed char *outs, |
1006 | enum reg_class goal_class, HARD_REG_SET *exclude_start_hard_regs, |
1007 | rtx_insn **before, rtx_insn **after, bool early_clobber_p) |
1008 | { |
1009 | bool out_conflict; |
1010 | int i, in; |
1011 | rtx new_in_reg, new_out_reg, reg; |
1012 | machine_mode inmode, outmode; |
1013 | rtx in_rtx = *curr_id->operand_loc[ins[0]]; |
1014 | rtx out_rtx = out < 0 ? in_rtx : *curr_id->operand_loc[out]; |
1015 | |
1016 | inmode = curr_operand_mode[ins[0]]; |
1017 | outmode = out < 0 ? inmode : curr_operand_mode[out]; |
1018 | push_to_sequence (*before); |
1019 | if (inmode != outmode) |
1020 | { |
1021 | /* process_alt_operands has already checked that the mode sizes |
1022 | are ordered. */ |
1023 | if (partial_subreg_p (outmode, inmode)) |
1024 | { |
1025 | reg = new_in_reg |
1026 | = lra_create_new_reg_with_unique_value (inmode, in_rtx, goal_class, |
1027 | exclude_start_hard_regs, |
1028 | ""); |
1029 | new_out_reg = gen_lowpart_SUBREG (outmode, reg); |
1030 | LRA_SUBREG_P (new_out_reg)(__extension__ ({ __typeof ((new_out_reg)) const _rtx = ((new_out_reg )); if (((enum rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag ("LRA_SUBREG_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1030, __FUNCTION__); _rtx; })->jump) = 1; |
1031 | /* If the input reg is dying here, we can use the same hard |
1032 | register for REG and IN_RTX. We do it only for original |
1033 | pseudos as reload pseudos can die although original |
1034 | pseudos still live where reload pseudos dies. */ |
1035 | if (REG_P (in_rtx)(((enum rtx_code) (in_rtx)->code) == REG) && (int) REGNO (in_rtx)(rhs_regno(in_rtx)) < lra_new_regno_start |
1036 | && find_regno_note (curr_insn, REG_DEAD, REGNO (in_rtx)(rhs_regno(in_rtx))) |
1037 | && (!early_clobber_p |
1038 | || check_conflict_input_operands(REGNO (in_rtx)(rhs_regno(in_rtx)), ins))) |
1039 | lra_assign_reg_val (REGNO (in_rtx)(rhs_regno(in_rtx)), REGNO (reg)(rhs_regno(reg))); |
1040 | } |
1041 | else |
1042 | { |
1043 | reg = new_out_reg |
1044 | = lra_create_new_reg_with_unique_value (outmode, out_rtx, |
1045 | goal_class, |
1046 | exclude_start_hard_regs, |
1047 | ""); |
1048 | new_in_reg = gen_lowpart_SUBREG (inmode, reg); |
1049 | /* NEW_IN_REG is non-paradoxical subreg. We don't want |
1050 | NEW_OUT_REG living above. We add clobber clause for |
1051 | this. This is just a temporary clobber. We can remove |
1052 | it at the end of LRA work. */ |
1053 | rtx_insn *clobber = emit_clobber (new_out_reg); |
1054 | LRA_TEMP_CLOBBER_P (PATTERN (clobber))(__extension__ ({ __typeof ((PATTERN (clobber))) const _rtx = ((PATTERN (clobber))); if (((enum rtx_code) (_rtx)->code) != CLOBBER) rtl_check_failed_flag ("TEMP_CLOBBER_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1054, __FUNCTION__); _rtx; })->unchanging) = 1; |
1055 | LRA_SUBREG_P (new_in_reg)(__extension__ ({ __typeof ((new_in_reg)) const _rtx = ((new_in_reg )); if (((enum rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag ("LRA_SUBREG_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1055, __FUNCTION__); _rtx; })->jump) = 1; |
1056 | if (GET_CODE (in_rtx)((enum rtx_code) (in_rtx)->code) == SUBREG) |
1057 | { |
1058 | rtx subreg_reg = SUBREG_REG (in_rtx)(((in_rtx)->u.fld[0]).rt_rtx); |
1059 | |
1060 | /* If SUBREG_REG is dying here and sub-registers IN_RTX |
1061 | and NEW_IN_REG are similar, we can use the same hard |
1062 | register for REG and SUBREG_REG. */ |
1063 | if (REG_P (subreg_reg)(((enum rtx_code) (subreg_reg)->code) == REG) |
1064 | && (int) REGNO (subreg_reg)(rhs_regno(subreg_reg)) < lra_new_regno_start |
1065 | && GET_MODE (subreg_reg)((machine_mode) (subreg_reg)->mode) == outmode |
1066 | && known_eq (SUBREG_BYTE (in_rtx), SUBREG_BYTE (new_in_reg))(!maybe_ne ((((in_rtx)->u.fld[1]).rt_subreg), (((new_in_reg )->u.fld[1]).rt_subreg))) |
1067 | && find_regno_note (curr_insn, REG_DEAD, REGNO (subreg_reg)(rhs_regno(subreg_reg))) |
1068 | && (! early_clobber_p |
1069 | || check_conflict_input_operands (REGNO (subreg_reg)(rhs_regno(subreg_reg)), |
1070 | ins))) |
1071 | lra_assign_reg_val (REGNO (subreg_reg)(rhs_regno(subreg_reg)), REGNO (reg)(rhs_regno(reg))); |
1072 | } |
1073 | } |
1074 | } |
1075 | else |
1076 | { |
1077 | /* Pseudos have values -- see comments for lra_reg_info. |
1078 | Different pseudos with the same value do not conflict even if |
1079 | they live in the same place. When we create a pseudo we |
1080 | assign value of original pseudo (if any) from which we |
1081 | created the new pseudo. If we create the pseudo from the |
1082 | input pseudo, the new pseudo will have no conflict with the |
1083 | input pseudo which is wrong when the input pseudo lives after |
1084 | the insn and as the new pseudo value is changed by the insn |
1085 | output. Therefore we create the new pseudo from the output |
1086 | except the case when we have single matched dying input |
1087 | pseudo. |
1088 | |
1089 | We cannot reuse the current output register because we might |
1090 | have a situation like "a <- a op b", where the constraints |
1091 | force the second input operand ("b") to match the output |
1092 | operand ("a"). "b" must then be copied into a new register |
1093 | so that it doesn't clobber the current value of "a". |
1094 | |
1095 | We cannot use the same value if the output pseudo is |
1096 | early clobbered or the input pseudo is mentioned in the |
1097 | output, e.g. as an address part in memory, because |
1098 | output reload will actually extend the pseudo liveness. |
1099 | We don't care about eliminable hard regs here as we are |
1100 | interesting only in pseudos. */ |
1101 | |
1102 | /* Matching input's register value is the same as one of the other |
1103 | output operand. Output operands in a parallel insn must be in |
1104 | different registers. */ |
1105 | out_conflict = false; |
1106 | if (REG_P (in_rtx)(((enum rtx_code) (in_rtx)->code) == REG)) |
1107 | { |
1108 | for (i = 0; outs[i] >= 0; i++) |
1109 | { |
1110 | rtx other_out_rtx = *curr_id->operand_loc[outs[i]]; |
1111 | if (outs[i] != out && REG_P (other_out_rtx)(((enum rtx_code) (other_out_rtx)->code) == REG) |
1112 | && (regno_val_use_in (REGNO (in_rtx)(rhs_regno(in_rtx)), other_out_rtx) |
1113 | != NULL_RTX(rtx) 0)) |
1114 | { |
1115 | out_conflict = true; |
1116 | break; |
1117 | } |
1118 | } |
1119 | } |
1120 | |
1121 | new_in_reg = new_out_reg |
1122 | = (! early_clobber_p && ins[1] < 0 && REG_P (in_rtx)(((enum rtx_code) (in_rtx)->code) == REG) |
1123 | && (int) REGNO (in_rtx)(rhs_regno(in_rtx)) < lra_new_regno_start |
1124 | && find_regno_note (curr_insn, REG_DEAD, REGNO (in_rtx)(rhs_regno(in_rtx))) |
1125 | && (! early_clobber_p |
1126 | || check_conflict_input_operands (REGNO (in_rtx)(rhs_regno(in_rtx)), ins)) |
1127 | && (out < 0 |
1128 | || regno_val_use_in (REGNO (in_rtx)(rhs_regno(in_rtx)), out_rtx) == NULL_RTX(rtx) 0) |
1129 | && !out_conflict |
1130 | ? lra_create_new_reg (inmode, in_rtx, goal_class, |
1131 | exclude_start_hard_regs, "") |
1132 | : lra_create_new_reg_with_unique_value (outmode, out_rtx, goal_class, |
1133 | exclude_start_hard_regs, |
1134 | "")); |
1135 | } |
1136 | /* In operand can be got from transformations before processing insn |
1137 | constraints. One example of such transformations is subreg |
1138 | reloading (see function simplify_operand_subreg). The new |
1139 | pseudos created by the transformations might have inaccurate |
1140 | class (ALL_REGS) and we should make their classes more |
1141 | accurate. */ |
1142 | narrow_reload_pseudo_class (in_rtx, goal_class); |
1143 | lra_emit_move (copy_rtx (new_in_reg), in_rtx); |
1144 | *before = get_insns (); |
1145 | end_sequence (); |
1146 | /* Add the new pseudo to consider values of subsequent input reload |
1147 | pseudos. */ |
1148 | lra_assert (curr_insn_input_reloads_num < LRA_MAX_INSN_RELOADS)((void)(!(curr_insn_input_reloads_num < (30 * 3)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1148, __FUNCTION__), 0 : 0)); |
1149 | curr_insn_input_reloads[curr_insn_input_reloads_num].input = in_rtx; |
1150 | curr_insn_input_reloads[curr_insn_input_reloads_num].match_p = true; |
1151 | curr_insn_input_reloads[curr_insn_input_reloads_num++].reg = new_in_reg; |
1152 | for (i = 0; (in = ins[i]) >= 0; i++) |
1153 | if (GET_MODE (*curr_id->operand_loc[in])((machine_mode) (*curr_id->operand_loc[in])->mode) == VOIDmode((void) 0, E_VOIDmode) |
1154 | || GET_MODE (new_in_reg)((machine_mode) (new_in_reg)->mode) == GET_MODE (*curr_id->operand_loc[in])((machine_mode) (*curr_id->operand_loc[in])->mode)) |
1155 | *curr_id->operand_loc[in] = new_in_reg; |
1156 | else |
1157 | { |
1158 | lra_assert((void)(!(((machine_mode) (new_out_reg)->mode) == ((machine_mode ) (*curr_id->operand_loc[in])->mode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1159, __FUNCTION__), 0 : 0)) |
1159 | (GET_MODE (new_out_reg) == GET_MODE (*curr_id->operand_loc[in]))((void)(!(((machine_mode) (new_out_reg)->mode) == ((machine_mode ) (*curr_id->operand_loc[in])->mode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1159, __FUNCTION__), 0 : 0)); |
1160 | *curr_id->operand_loc[in] = new_out_reg; |
1161 | } |
1162 | lra_update_dups (curr_id, ins); |
1163 | if (out < 0) |
1164 | return; |
1165 | /* See a comment for the input operand above. */ |
1166 | narrow_reload_pseudo_class (out_rtx, goal_class); |
1167 | if (find_reg_note (curr_insn, REG_UNUSED, out_rtx) == NULL_RTX(rtx) 0) |
1168 | { |
1169 | reg = SUBREG_P (out_rtx)(((enum rtx_code) (out_rtx)->code) == SUBREG) ? SUBREG_REG (out_rtx)(((out_rtx)->u.fld[0]).rt_rtx) : out_rtx; |
1170 | start_sequence (); |
1171 | /* If we had strict_low_part, use it also in reload to keep other |
1172 | parts unchanged but do it only for regs as strict_low_part |
1173 | has no sense for memory and probably there is no insn pattern |
1174 | to match the reload insn in memory case. */ |
1175 | if (out >= 0 && curr_static_id->operand[out].strict_low && REG_P (reg)(((enum rtx_code) (reg)->code) == REG)) |
1176 | out_rtx = gen_rtx_STRICT_LOW_PART (VOIDmode, out_rtx)gen_rtx_fmt_e_stat ((STRICT_LOW_PART), ((((void) 0, E_VOIDmode ))), ((out_rtx)) ); |
1177 | lra_emit_move (out_rtx, copy_rtx (new_out_reg)); |
1178 | emit_insn (*after); |
1179 | *after = get_insns (); |
1180 | end_sequence (); |
1181 | } |
1182 | *curr_id->operand_loc[out] = new_out_reg; |
1183 | lra_update_dup (curr_id, out); |
1184 | } |
1185 | |
1186 | /* Return register class which is union of all reg classes in insn |
1187 | constraint alternative string starting with P. */ |
1188 | static enum reg_class |
1189 | reg_class_from_constraints (const char *p) |
1190 | { |
1191 | int c, len; |
1192 | enum reg_class op_class = NO_REGS; |
1193 | |
1194 | do |
1195 | switch ((c = *p, len = CONSTRAINT_LEN (c, p)insn_constraint_len (c,p)), c) |
1196 | { |
1197 | case '#': |
1198 | case ',': |
1199 | return op_class; |
1200 | |
1201 | case 'g': |
1202 | op_class = reg_class_subunion(this_target_hard_regs->x_reg_class_subunion)[op_class][GENERAL_REGS]; |
1203 | break; |
1204 | |
1205 | default: |
1206 | enum constraint_num cn = lookup_constraint (p); |
1207 | enum reg_class cl = reg_class_for_constraint (cn); |
1208 | if (cl == NO_REGS) |
1209 | { |
1210 | if (insn_extra_address_constraint (cn)) |
1211 | op_class |
1212 | = (reg_class_subunion(this_target_hard_regs->x_reg_class_subunion) |
1213 | [op_class][base_reg_class (VOIDmode((void) 0, E_VOIDmode), ADDR_SPACE_GENERIC0, |
1214 | ADDRESS, SCRATCH)]); |
1215 | break; |
1216 | } |
1217 | |
1218 | op_class = reg_class_subunion(this_target_hard_regs->x_reg_class_subunion)[op_class][cl]; |
1219 | break; |
1220 | } |
1221 | while ((p += len), c); |
1222 | return op_class; |
1223 | } |
1224 | |
1225 | /* If OP is a register, return the class of the register as per |
1226 | get_reg_class, otherwise return NO_REGS. */ |
1227 | static inline enum reg_class |
1228 | get_op_class (rtx op) |
1229 | { |
1230 | return REG_P (op)(((enum rtx_code) (op)->code) == REG) ? get_reg_class (REGNO (op)(rhs_regno(op))) : NO_REGS; |
1231 | } |
1232 | |
1233 | /* Return generated insn mem_pseudo:=val if TO_P or val:=mem_pseudo |
1234 | otherwise. If modes of MEM_PSEUDO and VAL are different, use |
1235 | SUBREG for VAL to make them equal. */ |
1236 | static rtx_insn * |
1237 | emit_spill_move (bool to_p, rtx mem_pseudo, rtx val) |
1238 | { |
1239 | if (GET_MODE (mem_pseudo)((machine_mode) (mem_pseudo)->mode) != GET_MODE (val)((machine_mode) (val)->mode)) |
1240 | { |
1241 | /* Usually size of mem_pseudo is greater than val size but in |
1242 | rare cases it can be less as it can be defined by target |
1243 | dependent macro HARD_REGNO_CALLER_SAVE_MODE. */ |
1244 | if (! MEM_P (val)(((enum rtx_code) (val)->code) == MEM)) |
1245 | { |
1246 | val = gen_lowpart_SUBREG (GET_MODE (mem_pseudo)((machine_mode) (mem_pseudo)->mode), |
1247 | GET_CODE (val)((enum rtx_code) (val)->code) == SUBREG |
1248 | ? SUBREG_REG (val)(((val)->u.fld[0]).rt_rtx) : val); |
1249 | LRA_SUBREG_P (val)(__extension__ ({ __typeof ((val)) const _rtx = ((val)); if ( ((enum rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag ("LRA_SUBREG_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1249, __FUNCTION__); _rtx; })->jump) = 1; |
1250 | } |
1251 | else |
1252 | { |
1253 | mem_pseudo = gen_lowpart_SUBREG (GET_MODE (val)((machine_mode) (val)->mode), mem_pseudo); |
1254 | LRA_SUBREG_P (mem_pseudo)(__extension__ ({ __typeof ((mem_pseudo)) const _rtx = ((mem_pseudo )); if (((enum rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag ("LRA_SUBREG_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1254, __FUNCTION__); _rtx; })->jump) = 1; |
1255 | } |
1256 | } |
1257 | return to_p ? gen_move_insn (mem_pseudo, val) |
1258 | : gen_move_insn (val, mem_pseudo); |
1259 | } |
1260 | |
1261 | /* Process a special case insn (register move), return true if we |
1262 | don't need to process it anymore. INSN should be a single set |
1263 | insn. Set up that RTL was changed through CHANGE_P and that hook |
1264 | TARGET_SECONDARY_MEMORY_NEEDED says to use secondary memory through |
1265 | SEC_MEM_P. */ |
1266 | static bool |
1267 | check_and_process_move (bool *change_p, bool *sec_mem_p ATTRIBUTE_UNUSED__attribute__ ((__unused__))) |
1268 | { |
1269 | int sregno, dregno; |
1270 | rtx dest, src, dreg, sreg, new_reg, scratch_reg; |
1271 | rtx_insn *before; |
1272 | enum reg_class dclass, sclass, secondary_class; |
1273 | secondary_reload_info sri; |
1274 | |
1275 | lra_assert (curr_insn_set != NULL_RTX)((void)(!(curr_insn_set != (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1275, __FUNCTION__), 0 : 0)); |
1276 | dreg = dest = SET_DEST (curr_insn_set)(((curr_insn_set)->u.fld[0]).rt_rtx); |
1277 | sreg = src = SET_SRC (curr_insn_set)(((curr_insn_set)->u.fld[1]).rt_rtx); |
1278 | if (GET_CODE (dest)((enum rtx_code) (dest)->code) == SUBREG) |
1279 | dreg = SUBREG_REG (dest)(((dest)->u.fld[0]).rt_rtx); |
1280 | if (GET_CODE (src)((enum rtx_code) (src)->code) == SUBREG) |
1281 | sreg = SUBREG_REG (src)(((src)->u.fld[0]).rt_rtx); |
1282 | if (! (REG_P (dreg)(((enum rtx_code) (dreg)->code) == REG) || MEM_P (dreg)(((enum rtx_code) (dreg)->code) == MEM)) || ! (REG_P (sreg)(((enum rtx_code) (sreg)->code) == REG) || MEM_P (sreg)(((enum rtx_code) (sreg)->code) == MEM))) |
1283 | return false; |
1284 | sclass = dclass = NO_REGS; |
1285 | if (REG_P (dreg)(((enum rtx_code) (dreg)->code) == REG)) |
1286 | dclass = get_reg_class (REGNO (dreg)(rhs_regno(dreg))); |
1287 | gcc_assert (dclass < LIM_REG_CLASSES && dclass >= NO_REGS)((void)(!(dclass < LIM_REG_CLASSES && dclass >= NO_REGS) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1287, __FUNCTION__), 0 : 0)); |
1288 | if (dclass == ALL_REGS) |
1289 | /* ALL_REGS is used for new pseudos created by transformations |
1290 | like reload of SUBREG_REG (see function |
1291 | simplify_operand_subreg). We don't know their class yet. We |
1292 | should figure out the class from processing the insn |
1293 | constraints not in this fast path function. Even if ALL_REGS |
1294 | were a right class for the pseudo, secondary_... hooks usually |
1295 | are not define for ALL_REGS. */ |
1296 | return false; |
1297 | if (REG_P (sreg)(((enum rtx_code) (sreg)->code) == REG)) |
1298 | sclass = get_reg_class (REGNO (sreg)(rhs_regno(sreg))); |
1299 | gcc_assert (sclass < LIM_REG_CLASSES && sclass >= NO_REGS)((void)(!(sclass < LIM_REG_CLASSES && sclass >= NO_REGS) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1299, __FUNCTION__), 0 : 0)); |
1300 | if (sclass == ALL_REGS) |
1301 | /* See comments above. */ |
1302 | return false; |
1303 | if (sclass == NO_REGS && dclass == NO_REGS) |
1304 | return false; |
1305 | if (targetm.secondary_memory_needed (GET_MODE (src)((machine_mode) (src)->mode), sclass, dclass) |
1306 | && ((sclass != NO_REGS && dclass != NO_REGS) |
1307 | || (GET_MODE (src)((machine_mode) (src)->mode) |
1308 | != targetm.secondary_memory_needed_mode (GET_MODE (src)((machine_mode) (src)->mode))))) |
1309 | { |
1310 | *sec_mem_p = true; |
1311 | return false; |
1312 | } |
1313 | if (! REG_P (dreg)(((enum rtx_code) (dreg)->code) == REG) || ! REG_P (sreg)(((enum rtx_code) (sreg)->code) == REG)) |
1314 | return false; |
1315 | sri.prev_sri = NULLnullptr; |
1316 | sri.icode = CODE_FOR_nothing; |
1317 | sri.extra_cost = 0; |
1318 | secondary_class = NO_REGS; |
1319 | /* Set up hard register for a reload pseudo for hook |
1320 | secondary_reload because some targets just ignore unassigned |
1321 | pseudos in the hook. */ |
1322 | if (dclass != NO_REGS && lra_get_regno_hard_regno (REGNO (dreg)(rhs_regno(dreg))) < 0) |
1323 | { |
1324 | dregno = REGNO (dreg)(rhs_regno(dreg)); |
1325 | reg_renumber[dregno] = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[dclass][0]; |
1326 | } |
1327 | else |
1328 | dregno = -1; |
1329 | if (sclass != NO_REGS && lra_get_regno_hard_regno (REGNO (sreg)(rhs_regno(sreg))) < 0) |
1330 | { |
1331 | sregno = REGNO (sreg)(rhs_regno(sreg)); |
1332 | reg_renumber[sregno] = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[sclass][0]; |
1333 | } |
1334 | else |
1335 | sregno = -1; |
1336 | if (sclass != NO_REGS) |
1337 | secondary_class |
1338 | = (enum reg_class) targetm.secondary_reload (false, dest, |
1339 | (reg_class_t) sclass, |
1340 | GET_MODE (src)((machine_mode) (src)->mode), &sri); |
1341 | if (sclass == NO_REGS |
1342 | || ((secondary_class != NO_REGS || sri.icode != CODE_FOR_nothing) |
1343 | && dclass != NO_REGS)) |
1344 | { |
1345 | enum reg_class old_sclass = secondary_class; |
1346 | secondary_reload_info old_sri = sri; |
1347 | |
1348 | sri.prev_sri = NULLnullptr; |
1349 | sri.icode = CODE_FOR_nothing; |
1350 | sri.extra_cost = 0; |
1351 | secondary_class |
1352 | = (enum reg_class) targetm.secondary_reload (true, src, |
1353 | (reg_class_t) dclass, |
1354 | GET_MODE (src)((machine_mode) (src)->mode), &sri); |
1355 | /* Check the target hook consistency. */ |
1356 | lra_assert((void)(!((secondary_class == NO_REGS && sri.icode == CODE_FOR_nothing) || (old_sclass == NO_REGS && old_sri .icode == CODE_FOR_nothing) || (secondary_class == old_sclass && sri.icode == old_sri.icode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1359, __FUNCTION__), 0 : 0)) |
1357 | ((secondary_class == NO_REGS && sri.icode == CODE_FOR_nothing)((void)(!((secondary_class == NO_REGS && sri.icode == CODE_FOR_nothing) || (old_sclass == NO_REGS && old_sri .icode == CODE_FOR_nothing) || (secondary_class == old_sclass && sri.icode == old_sri.icode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1359, __FUNCTION__), 0 : 0)) |
1358 | || (old_sclass == NO_REGS && old_sri.icode == CODE_FOR_nothing)((void)(!((secondary_class == NO_REGS && sri.icode == CODE_FOR_nothing) || (old_sclass == NO_REGS && old_sri .icode == CODE_FOR_nothing) || (secondary_class == old_sclass && sri.icode == old_sri.icode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1359, __FUNCTION__), 0 : 0)) |
1359 | || (secondary_class == old_sclass && sri.icode == old_sri.icode))((void)(!((secondary_class == NO_REGS && sri.icode == CODE_FOR_nothing) || (old_sclass == NO_REGS && old_sri .icode == CODE_FOR_nothing) || (secondary_class == old_sclass && sri.icode == old_sri.icode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1359, __FUNCTION__), 0 : 0)); |
1360 | } |
1361 | if (sregno >= 0) |
1362 | reg_renumber [sregno] = -1; |
1363 | if (dregno >= 0) |
1364 | reg_renumber [dregno] = -1; |
1365 | if (secondary_class == NO_REGS && sri.icode == CODE_FOR_nothing) |
1366 | return false; |
1367 | *change_p = true; |
1368 | new_reg = NULL_RTX(rtx) 0; |
1369 | if (secondary_class != NO_REGS) |
1370 | new_reg = lra_create_new_reg_with_unique_value (GET_MODE (src)((machine_mode) (src)->mode), NULL_RTX(rtx) 0, |
1371 | secondary_class, NULLnullptr, |
1372 | "secondary"); |
1373 | start_sequence (); |
1374 | if (sri.icode == CODE_FOR_nothing) |
1375 | lra_emit_move (new_reg, src); |
1376 | else |
1377 | { |
1378 | enum reg_class scratch_class; |
1379 | |
1380 | scratch_class = (reg_class_from_constraints |
1381 | (insn_data[sri.icode].operand[2].constraint)); |
1382 | scratch_reg = (lra_create_new_reg_with_unique_value |
1383 | (insn_data[sri.icode].operand[2].mode, NULL_RTX(rtx) 0, |
1384 | scratch_class, NULLnullptr, "scratch")); |
1385 | emit_insn (GEN_FCN (sri.icode)(insn_data[sri.icode].genfun) (new_reg != NULL_RTX(rtx) 0 ? new_reg : dest, |
1386 | src, scratch_reg)); |
1387 | } |
1388 | before = get_insns (); |
1389 | end_sequence (); |
1390 | lra_process_new_insns (curr_insn, before, NULLnullptr, "Inserting the move"); |
1391 | if (new_reg != NULL_RTX(rtx) 0) |
1392 | SET_SRC (curr_insn_set)(((curr_insn_set)->u.fld[1]).rt_rtx) = new_reg; |
1393 | else |
1394 | { |
1395 | if (lra_dump_file != NULLnullptr) |
1396 | { |
1397 | fprintf (lra_dump_file, "Deleting move %u\n", INSN_UID (curr_insn)); |
1398 | dump_insn_slim (lra_dump_file, curr_insn); |
1399 | } |
1400 | lra_set_insn_deleted (curr_insn); |
1401 | return true; |
1402 | } |
1403 | return false; |
1404 | } |
1405 | |
1406 | /* The following data describe the result of process_alt_operands. |
1407 | The data are used in curr_insn_transform to generate reloads. */ |
1408 | |
1409 | /* The chosen reg classes which should be used for the corresponding |
1410 | operands. */ |
1411 | static enum reg_class goal_alt[MAX_RECOG_OPERANDS30]; |
1412 | /* Hard registers which cannot be a start hard register for the corresponding |
1413 | operands. */ |
1414 | static HARD_REG_SET goal_alt_exclude_start_hard_regs[MAX_RECOG_OPERANDS30]; |
1415 | /* True if the operand should be the same as another operand and that |
1416 | other operand does not need a reload. */ |
1417 | static bool goal_alt_match_win[MAX_RECOG_OPERANDS30]; |
1418 | /* True if the operand does not need a reload. */ |
1419 | static bool goal_alt_win[MAX_RECOG_OPERANDS30]; |
1420 | /* True if the operand can be offsetable memory. */ |
1421 | static bool goal_alt_offmemok[MAX_RECOG_OPERANDS30]; |
1422 | /* The number of an operand to which given operand can be matched to. */ |
1423 | static int goal_alt_matches[MAX_RECOG_OPERANDS30]; |
1424 | /* The number of elements in the following array. */ |
1425 | static int goal_alt_dont_inherit_ops_num; |
1426 | /* Numbers of operands whose reload pseudos should not be inherited. */ |
1427 | static int goal_alt_dont_inherit_ops[MAX_RECOG_OPERANDS30]; |
1428 | /* True if the insn commutative operands should be swapped. */ |
1429 | static bool goal_alt_swapped; |
1430 | /* The chosen insn alternative. */ |
1431 | static int goal_alt_number; |
1432 | |
1433 | /* True if the corresponding operand is the result of an equivalence |
1434 | substitution. */ |
1435 | static bool equiv_substition_p[MAX_RECOG_OPERANDS30]; |
1436 | |
1437 | /* The following five variables are used to choose the best insn |
1438 | alternative. They reflect final characteristics of the best |
1439 | alternative. */ |
1440 | |
1441 | /* Number of necessary reloads and overall cost reflecting the |
1442 | previous value and other unpleasantness of the best alternative. */ |
1443 | static int best_losers, best_overall; |
1444 | /* Overall number hard registers used for reloads. For example, on |
1445 | some targets we need 2 general registers to reload DFmode and only |
1446 | one floating point register. */ |
1447 | static int best_reload_nregs; |
1448 | /* Overall number reflecting distances of previous reloading the same |
1449 | value. The distances are counted from the current BB start. It is |
1450 | used to improve inheritance chances. */ |
1451 | static int best_reload_sum; |
1452 | |
1453 | /* True if the current insn should have no correspondingly input or |
1454 | output reloads. */ |
1455 | static bool no_input_reloads_p, no_output_reloads_p; |
1456 | |
1457 | /* True if we swapped the commutative operands in the current |
1458 | insn. */ |
1459 | static int curr_swapped; |
1460 | |
1461 | /* if CHECK_ONLY_P is false, arrange for address element *LOC to be a |
1462 | register of class CL. Add any input reloads to list BEFORE. AFTER |
1463 | is nonnull if *LOC is an automodified value; handle that case by |
1464 | adding the required output reloads to list AFTER. Return true if |
1465 | the RTL was changed. |
1466 | |
1467 | if CHECK_ONLY_P is true, check that the *LOC is a correct address |
1468 | register. Return false if the address register is correct. */ |
1469 | static bool |
1470 | process_addr_reg (rtx *loc, bool check_only_p, rtx_insn **before, rtx_insn **after, |
1471 | enum reg_class cl) |
1472 | { |
1473 | int regno; |
1474 | enum reg_class rclass, new_class; |
1475 | rtx reg; |
1476 | rtx new_reg; |
1477 | machine_mode mode; |
1478 | bool subreg_p, before_p = false; |
1479 | |
1480 | subreg_p = GET_CODE (*loc)((enum rtx_code) (*loc)->code) == SUBREG; |
1481 | if (subreg_p) |
1482 | { |
1483 | reg = SUBREG_REG (*loc)(((*loc)->u.fld[0]).rt_rtx); |
1484 | mode = GET_MODE (reg)((machine_mode) (reg)->mode); |
1485 | |
1486 | /* For mode with size bigger than ptr_mode, there unlikely to be "mov" |
1487 | between two registers with different classes, but there normally will |
1488 | be "mov" which transfers element of vector register into the general |
1489 | register, and this normally will be a subreg which should be reloaded |
1490 | as a whole. This is particularly likely to be triggered when |
1491 | -fno-split-wide-types specified. */ |
1492 | if (!REG_P (reg)(((enum rtx_code) (reg)->code) == REG) |
1493 | || in_class_p (reg, cl, &new_class) |
1494 | || known_le (GET_MODE_SIZE (mode), GET_MODE_SIZE (ptr_mode))(!maybe_lt (GET_MODE_SIZE (ptr_mode), GET_MODE_SIZE (mode)))) |
1495 | loc = &SUBREG_REG (*loc)(((*loc)->u.fld[0]).rt_rtx); |
1496 | } |
1497 | |
1498 | reg = *loc; |
1499 | mode = GET_MODE (reg)((machine_mode) (reg)->mode); |
1500 | if (! REG_P (reg)(((enum rtx_code) (reg)->code) == REG)) |
1501 | { |
1502 | if (check_only_p) |
1503 | return true; |
1504 | /* Always reload memory in an address even if the target supports |
1505 | such addresses. */ |
1506 | new_reg = lra_create_new_reg_with_unique_value (mode, reg, cl, NULLnullptr, |
1507 | "address"); |
1508 | before_p = true; |
1509 | } |
1510 | else |
1511 | { |
1512 | regno = REGNO (reg)(rhs_regno(reg)); |
1513 | rclass = get_reg_class (regno); |
1514 | if (! check_only_p |
1515 | && (*loc = get_equiv_with_elimination (reg, curr_insn)) != reg) |
1516 | { |
1517 | if (lra_dump_file != NULLnullptr) |
1518 | { |
1519 | fprintf (lra_dump_file, |
1520 | "Changing pseudo %d in address of insn %u on equiv ", |
1521 | REGNO (reg)(rhs_regno(reg)), INSN_UID (curr_insn)); |
1522 | dump_value_slim (lra_dump_file, *loc, 1); |
1523 | fprintf (lra_dump_file, "\n"); |
1524 | } |
1525 | *loc = copy_rtx (*loc); |
1526 | } |
1527 | if (*loc != reg || ! in_class_p (reg, cl, &new_class)) |
1528 | { |
1529 | if (check_only_p) |
1530 | return true; |
1531 | reg = *loc; |
1532 | if (get_reload_reg (after == NULLnullptr ? OP_IN : OP_INOUT, |
1533 | mode, reg, cl, NULLnullptr, |
1534 | subreg_p, "address", &new_reg)) |
1535 | before_p = true; |
1536 | } |
1537 | else if (new_class != NO_REGS && rclass != new_class) |
1538 | { |
1539 | if (check_only_p) |
1540 | return true; |
1541 | lra_change_class (regno, new_class, " Change to", true); |
1542 | return false; |
1543 | } |
1544 | else |
1545 | return false; |
1546 | } |
1547 | if (before_p) |
1548 | { |
1549 | push_to_sequence (*before); |
1550 | lra_emit_move (new_reg, reg); |
1551 | *before = get_insns (); |
1552 | end_sequence (); |
1553 | } |
1554 | *loc = new_reg; |
1555 | if (after != NULLnullptr) |
1556 | { |
1557 | start_sequence (); |
1558 | lra_emit_move (before_p ? copy_rtx (reg) : reg, new_reg); |
1559 | emit_insn (*after); |
1560 | *after = get_insns (); |
1561 | end_sequence (); |
1562 | } |
1563 | return true; |
1564 | } |
1565 | |
1566 | /* Insert move insn in simplify_operand_subreg. BEFORE returns |
1567 | the insn to be inserted before curr insn. AFTER returns the |
1568 | the insn to be inserted after curr insn. ORIGREG and NEWREG |
1569 | are the original reg and new reg for reload. */ |
1570 | static void |
1571 | insert_move_for_subreg (rtx_insn **before, rtx_insn **after, rtx origreg, |
1572 | rtx newreg) |
1573 | { |
1574 | if (before) |
1575 | { |
1576 | push_to_sequence (*before); |
1577 | lra_emit_move (newreg, origreg); |
1578 | *before = get_insns (); |
1579 | end_sequence (); |
1580 | } |
1581 | if (after) |
1582 | { |
1583 | start_sequence (); |
1584 | lra_emit_move (origreg, newreg); |
1585 | emit_insn (*after); |
1586 | *after = get_insns (); |
1587 | end_sequence (); |
1588 | } |
1589 | } |
1590 | |
1591 | static int valid_address_p (machine_mode mode, rtx addr, addr_space_t as); |
1592 | static bool process_address (int, bool, rtx_insn **, rtx_insn **); |
1593 | |
1594 | /* Make reloads for subreg in operand NOP with internal subreg mode |
1595 | REG_MODE, add new reloads for further processing. Return true if |
1596 | any change was done. */ |
1597 | static bool |
1598 | simplify_operand_subreg (int nop, machine_mode reg_mode) |
1599 | { |
1600 | int hard_regno, inner_hard_regno; |
1601 | rtx_insn *before, *after; |
1602 | machine_mode mode, innermode; |
1603 | rtx reg, new_reg; |
1604 | rtx operand = *curr_id->operand_loc[nop]; |
1605 | enum reg_class regclass; |
1606 | enum op_type type; |
1607 | |
1608 | before = after = NULLnullptr; |
1609 | |
1610 | if (GET_CODE (operand)((enum rtx_code) (operand)->code) != SUBREG) |
1611 | return false; |
1612 | |
1613 | mode = GET_MODE (operand)((machine_mode) (operand)->mode); |
1614 | reg = SUBREG_REG (operand)(((operand)->u.fld[0]).rt_rtx); |
1615 | innermode = GET_MODE (reg)((machine_mode) (reg)->mode); |
1616 | type = curr_static_id->operand[nop].type; |
1617 | if (MEM_P (reg)(((enum rtx_code) (reg)->code) == MEM)) |
1618 | { |
1619 | const bool addr_was_valid |
1620 | = valid_address_p (innermode, XEXP (reg, 0)(((reg)->u.fld[0]).rt_rtx), MEM_ADDR_SPACE (reg)(get_mem_attrs (reg)->addrspace)); |
1621 | alter_subreg (curr_id->operand_loc[nop], false); |
1622 | rtx subst = *curr_id->operand_loc[nop]; |
1623 | lra_assert (MEM_P (subst))((void)(!((((enum rtx_code) (subst)->code) == MEM)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1623, __FUNCTION__), 0 : 0)); |
1624 | const bool addr_is_valid = valid_address_p (GET_MODE (subst)((machine_mode) (subst)->mode), |
1625 | XEXP (subst, 0)(((subst)->u.fld[0]).rt_rtx), |
1626 | MEM_ADDR_SPACE (subst)(get_mem_attrs (subst)->addrspace)); |
1627 | if (!addr_was_valid |
1628 | || addr_is_valid |
1629 | || ((get_constraint_type (lookup_constraint |
1630 | (curr_static_id->operand[nop].constraint)) |
1631 | != CT_SPECIAL_MEMORY) |
1632 | /* We still can reload address and if the address is |
1633 | valid, we can remove subreg without reloading its |
1634 | inner memory. */ |
1635 | && valid_address_p (GET_MODE (subst)((machine_mode) (subst)->mode), |
1636 | regno_reg_rtx |
1637 | [ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs) |
1638 | [base_reg_class (GET_MODE (subst)((machine_mode) (subst)->mode), |
1639 | MEM_ADDR_SPACE (subst)(get_mem_attrs (subst)->addrspace), |
1640 | ADDRESS, SCRATCH)][0]], |
1641 | MEM_ADDR_SPACE (subst)(get_mem_attrs (subst)->addrspace)))) |
1642 | { |
1643 | /* If we change the address for a paradoxical subreg of memory, the |
1644 | new address might violate the necessary alignment or the access |
1645 | might be slow; take this into consideration. We need not worry |
1646 | about accesses beyond allocated memory for paradoxical memory |
1647 | subregs as we don't substitute such equiv memory (see processing |
1648 | equivalences in function lra_constraints) and because for spilled |
1649 | pseudos we allocate stack memory enough for the biggest |
1650 | corresponding paradoxical subreg. |
1651 | |
1652 | However, do not blindly simplify a (subreg (mem ...)) for |
1653 | WORD_REGISTER_OPERATIONS targets as this may lead to loading junk |
1654 | data into a register when the inner is narrower than outer or |
1655 | missing important data from memory when the inner is wider than |
1656 | outer. This rule only applies to modes that are no wider than |
1657 | a word. |
1658 | |
1659 | If valid memory becomes invalid after subreg elimination |
1660 | and address might be different we still have to reload |
1661 | memory. |
1662 | */ |
1663 | if ((! addr_was_valid |
1664 | || addr_is_valid |
1665 | || known_eq (GET_MODE_SIZE (mode), GET_MODE_SIZE (innermode))(!maybe_ne (GET_MODE_SIZE (mode), GET_MODE_SIZE (innermode)))) |
1666 | && !(maybe_ne (GET_MODE_PRECISION (mode), |
1667 | GET_MODE_PRECISION (innermode)) |
1668 | && known_le (GET_MODE_SIZE (mode), UNITS_PER_WORD)(!maybe_lt ((((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4), GET_MODE_SIZE (mode))) |
1669 | && known_le (GET_MODE_SIZE (innermode), UNITS_PER_WORD)(!maybe_lt ((((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4), GET_MODE_SIZE (innermode))) |
1670 | && WORD_REGISTER_OPERATIONS0) |
1671 | && (!(MEM_ALIGN (subst)(get_mem_attrs (subst)->align) < GET_MODE_ALIGNMENT (mode)get_mode_alignment (mode) |
1672 | && targetm.slow_unaligned_access (mode, MEM_ALIGN (subst)(get_mem_attrs (subst)->align))) |
1673 | || (MEM_ALIGN (reg)(get_mem_attrs (reg)->align) < GET_MODE_ALIGNMENT (innermode)get_mode_alignment (innermode) |
1674 | && targetm.slow_unaligned_access (innermode, |
1675 | MEM_ALIGN (reg)(get_mem_attrs (reg)->align))))) |
1676 | return true; |
1677 | |
1678 | *curr_id->operand_loc[nop] = operand; |
1679 | |
1680 | /* But if the address was not valid, we cannot reload the MEM without |
1681 | reloading the address first. */ |
1682 | if (!addr_was_valid) |
1683 | process_address (nop, false, &before, &after); |
1684 | |
1685 | /* INNERMODE is fast, MODE slow. Reload the mem in INNERMODE. */ |
1686 | enum reg_class rclass |
1687 | = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
1688 | if (get_reload_reg (curr_static_id->operand[nop].type, innermode, |
1689 | reg, rclass, NULLnullptr, |
1690 | TRUEtrue, "slow/invalid mem", &new_reg)) |
1691 | { |
1692 | bool insert_before, insert_after; |
1693 | bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)(rhs_regno(new_reg))); |
1694 | |
1695 | insert_before = (type != OP_OUT |
1696 | || partial_subreg_p (mode, innermode)); |
1697 | insert_after = type != OP_IN; |
1698 | insert_move_for_subreg (insert_before ? &before : NULLnullptr, |
1699 | insert_after ? &after : NULLnullptr, |
1700 | reg, new_reg); |
1701 | } |
1702 | SUBREG_REG (operand)(((operand)->u.fld[0]).rt_rtx) = new_reg; |
1703 | |
1704 | /* Convert to MODE. */ |
1705 | reg = operand; |
1706 | rclass |
1707 | = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
1708 | if (get_reload_reg (curr_static_id->operand[nop].type, mode, reg, |
1709 | rclass, NULLnullptr, |
1710 | TRUEtrue, "slow/invalid mem", &new_reg)) |
1711 | { |
1712 | bool insert_before, insert_after; |
1713 | bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)(rhs_regno(new_reg))); |
1714 | |
1715 | insert_before = type != OP_OUT; |
1716 | insert_after = type != OP_IN; |
1717 | insert_move_for_subreg (insert_before ? &before : NULLnullptr, |
1718 | insert_after ? &after : NULLnullptr, |
1719 | reg, new_reg); |
1720 | } |
1721 | *curr_id->operand_loc[nop] = new_reg; |
1722 | lra_process_new_insns (curr_insn, before, after, |
1723 | "Inserting slow/invalid mem reload"); |
1724 | return true; |
1725 | } |
1726 | |
1727 | /* If the address was valid and became invalid, prefer to reload |
1728 | the memory. Typical case is when the index scale should |
1729 | correspond the memory. */ |
1730 | *curr_id->operand_loc[nop] = operand; |
1731 | /* Do not return false here as the MEM_P (reg) will be processed |
1732 | later in this function. */ |
1733 | } |
1734 | else if (REG_P (reg)(((enum rtx_code) (reg)->code) == REG) && REGNO (reg)(rhs_regno(reg)) < FIRST_PSEUDO_REGISTER76) |
1735 | { |
1736 | alter_subreg (curr_id->operand_loc[nop], false); |
1737 | return true; |
1738 | } |
1739 | else if (CONSTANT_P (reg)((rtx_class[(int) (((enum rtx_code) (reg)->code))]) == RTX_CONST_OBJ )) |
1740 | { |
1741 | /* Try to simplify subreg of constant. It is usually result of |
1742 | equivalence substitution. */ |
1743 | if (innermode == VOIDmode((void) 0, E_VOIDmode) |
1744 | && (innermode = original_subreg_reg_mode[nop]) == VOIDmode((void) 0, E_VOIDmode)) |
1745 | innermode = curr_static_id->operand[nop].mode; |
1746 | if ((new_reg = simplify_subreg (mode, reg, innermode, |
1747 | SUBREG_BYTE (operand)(((operand)->u.fld[1]).rt_subreg))) != NULL_RTX(rtx) 0) |
1748 | { |
1749 | *curr_id->operand_loc[nop] = new_reg; |
1750 | return true; |
1751 | } |
1752 | } |
1753 | /* Put constant into memory when we have mixed modes. It generates |
1754 | a better code in most cases as it does not need a secondary |
1755 | reload memory. It also prevents LRA looping when LRA is using |
1756 | secondary reload memory again and again. */ |
1757 | if (CONSTANT_P (reg)((rtx_class[(int) (((enum rtx_code) (reg)->code))]) == RTX_CONST_OBJ ) && CONST_POOL_OK_P (reg_mode, reg)((reg_mode) != ((void) 0, E_VOIDmode) && ((rtx_class[ (int) (((enum rtx_code) (reg)->code))]) == RTX_CONST_OBJ) && ((enum rtx_code) (reg)->code) != HIGH && GET_MODE_SIZE (reg_mode).is_constant () && !targetm.cannot_force_const_mem (reg_mode, reg)) |
1758 | && SCALAR_INT_MODE_P (reg_mode)(((enum mode_class) mode_class[reg_mode]) == MODE_INT || ((enum mode_class) mode_class[reg_mode]) == MODE_PARTIAL_INT) != SCALAR_INT_MODE_P (mode)(((enum mode_class) mode_class[mode]) == MODE_INT || ((enum mode_class ) mode_class[mode]) == MODE_PARTIAL_INT)) |
1759 | { |
1760 | SUBREG_REG (operand)(((operand)->u.fld[0]).rt_rtx) = force_const_mem (reg_mode, reg); |
1761 | alter_subreg (curr_id->operand_loc[nop], false); |
1762 | return true; |
1763 | } |
1764 | /* Force a reload of the SUBREG_REG if this is a constant or PLUS or |
1765 | if there may be a problem accessing OPERAND in the outer |
1766 | mode. */ |
1767 | if ((REG_P (reg)(((enum rtx_code) (reg)->code) == REG) |
1768 | && REGNO (reg)(rhs_regno(reg)) >= FIRST_PSEUDO_REGISTER76 |
1769 | && (hard_regno = lra_get_regno_hard_regno (REGNO (reg)(rhs_regno(reg)))) >= 0 |
1770 | /* Don't reload paradoxical subregs because we could be looping |
1771 | having repeatedly final regno out of hard regs range. */ |
1772 | && (hard_regno_nregs (hard_regno, innermode) |
1773 | >= hard_regno_nregs (hard_regno, mode)) |
1774 | && simplify_subreg_regno (hard_regno, innermode, |
1775 | SUBREG_BYTE (operand)(((operand)->u.fld[1]).rt_subreg), mode) < 0 |
1776 | /* Don't reload subreg for matching reload. It is actually |
1777 | valid subreg in LRA. */ |
1778 | && ! LRA_SUBREG_P (operand)(__extension__ ({ __typeof ((operand)) const _rtx = ((operand )); if (((enum rtx_code) (_rtx)->code) != SUBREG) rtl_check_failed_flag ("LRA_SUBREG_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1778, __FUNCTION__); _rtx; })->jump)) |
1779 | || CONSTANT_P (reg)((rtx_class[(int) (((enum rtx_code) (reg)->code))]) == RTX_CONST_OBJ ) || GET_CODE (reg)((enum rtx_code) (reg)->code) == PLUS || MEM_P (reg)(((enum rtx_code) (reg)->code) == MEM)) |
1780 | { |
1781 | enum reg_class rclass; |
1782 | |
1783 | if (REG_P (reg)(((enum rtx_code) (reg)->code) == REG)) |
1784 | /* There is a big probability that we will get the same class |
1785 | for the new pseudo and we will get the same insn which |
1786 | means infinite looping. So spill the new pseudo. */ |
1787 | rclass = NO_REGS; |
1788 | else |
1789 | /* The class will be defined later in curr_insn_transform. */ |
1790 | rclass |
1791 | = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
1792 | |
1793 | if (get_reload_reg (curr_static_id->operand[nop].type, reg_mode, reg, |
1794 | rclass, NULLnullptr, |
1795 | TRUEtrue, "subreg reg", &new_reg)) |
1796 | { |
1797 | bool insert_before, insert_after; |
1798 | bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)(rhs_regno(new_reg))); |
1799 | |
1800 | insert_before = (type != OP_OUT |
1801 | || read_modify_subreg_p (operand)); |
1802 | insert_after = (type != OP_IN); |
1803 | insert_move_for_subreg (insert_before ? &before : NULLnullptr, |
1804 | insert_after ? &after : NULLnullptr, |
1805 | reg, new_reg); |
1806 | } |
1807 | SUBREG_REG (operand)(((operand)->u.fld[0]).rt_rtx) = new_reg; |
1808 | lra_process_new_insns (curr_insn, before, after, |
1809 | "Inserting subreg reload"); |
1810 | return true; |
1811 | } |
1812 | /* Force a reload for a paradoxical subreg. For paradoxical subreg, |
1813 | IRA allocates hardreg to the inner pseudo reg according to its mode |
1814 | instead of the outermode, so the size of the hardreg may not be enough |
1815 | to contain the outermode operand, in that case we may need to insert |
1816 | reload for the reg. For the following two types of paradoxical subreg, |
1817 | we need to insert reload: |
1818 | 1. If the op_type is OP_IN, and the hardreg could not be paired with |
1819 | other hardreg to contain the outermode operand |
1820 | (checked by in_hard_reg_set_p), we need to insert the reload. |
1821 | 2. If the op_type is OP_OUT or OP_INOUT. |
1822 | |
1823 | Here is a paradoxical subreg example showing how the reload is generated: |
1824 | |
1825 | (insn 5 4 7 2 (set (reg:TI 106 [ __comp ]) |
1826 | (subreg:TI (reg:DI 107 [ __comp ]) 0)) {*movti_internal_rex64} |
1827 | |
1828 | In IRA, reg107 is allocated to a DImode hardreg. We use x86-64 as example |
1829 | here, if reg107 is assigned to hardreg R15, because R15 is the last |
1830 | hardreg, compiler cannot find another hardreg to pair with R15 to |
1831 | contain TImode data. So we insert a TImode reload reg180 for it. |
1832 | After reload is inserted: |
1833 | |
1834 | (insn 283 0 0 (set (subreg:DI (reg:TI 180 [orig:107 __comp ] [107]) 0) |
1835 | (reg:DI 107 [ __comp ])) -1 |
1836 | (insn 5 4 7 2 (set (reg:TI 106 [ __comp ]) |
1837 | (subreg:TI (reg:TI 180 [orig:107 __comp ] [107]) 0)) {*movti_internal_rex64} |
1838 | |
1839 | Two reload hard registers will be allocated to reg180 to save TImode data |
1840 | in LRA_assign. |
1841 | |
1842 | For LRA pseudos this should normally be handled by the biggest_mode |
1843 | mechanism. However, it's possible for new uses of an LRA pseudo |
1844 | to be introduced after we've allocated it, such as when undoing |
1845 | inheritance, and the allocated register might not then be appropriate |
1846 | for the new uses. */ |
1847 | else if (REG_P (reg)(((enum rtx_code) (reg)->code) == REG) |
1848 | && REGNO (reg)(rhs_regno(reg)) >= FIRST_PSEUDO_REGISTER76 |
1849 | && paradoxical_subreg_p (operand) |
1850 | && (inner_hard_regno = lra_get_regno_hard_regno (REGNO (reg)(rhs_regno(reg)))) >= 0 |
1851 | && ((hard_regno |
1852 | = simplify_subreg_regno (inner_hard_regno, innermode, |
1853 | SUBREG_BYTE (operand)(((operand)->u.fld[1]).rt_subreg), mode)) < 0 |
1854 | || ((hard_regno_nregs (inner_hard_regno, innermode) |
1855 | < hard_regno_nregs (hard_regno, mode)) |
1856 | && (regclass = lra_get_allocno_class (REGNO (reg)(rhs_regno(reg)))) |
1857 | && (type != OP_IN |
1858 | || !in_hard_reg_set_p (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[regclass], |
1859 | mode, hard_regno) |
1860 | || overlaps_hard_reg_set_p (lra_no_alloc_regs, |
1861 | mode, hard_regno))))) |
1862 | { |
1863 | /* The class will be defined later in curr_insn_transform. */ |
1864 | enum reg_class rclass |
1865 | = (enum reg_class) targetm.preferred_reload_class (reg, ALL_REGS); |
1866 | |
1867 | if (get_reload_reg (curr_static_id->operand[nop].type, mode, reg, |
1868 | rclass, NULLnullptr, |
1869 | TRUEtrue, "paradoxical subreg", &new_reg)) |
1870 | { |
1871 | rtx subreg; |
1872 | bool insert_before, insert_after; |
1873 | |
1874 | PUT_MODE (new_reg, mode); |
1875 | subreg = gen_lowpart_SUBREG (innermode, new_reg); |
1876 | bitmap_set_bit (&lra_subreg_reload_pseudos, REGNO (new_reg)(rhs_regno(new_reg))); |
1877 | |
1878 | insert_before = (type != OP_OUT); |
1879 | insert_after = (type != OP_IN); |
1880 | insert_move_for_subreg (insert_before ? &before : NULLnullptr, |
1881 | insert_after ? &after : NULLnullptr, |
1882 | reg, subreg); |
1883 | } |
1884 | SUBREG_REG (operand)(((operand)->u.fld[0]).rt_rtx) = new_reg; |
1885 | lra_process_new_insns (curr_insn, before, after, |
1886 | "Inserting paradoxical subreg reload"); |
1887 | return true; |
1888 | } |
1889 | return false; |
1890 | } |
1891 | |
1892 | /* Return TRUE if X refers for a hard register from SET. */ |
1893 | static bool |
1894 | uses_hard_regs_p (rtx x, HARD_REG_SET set) |
1895 | { |
1896 | int i, j, x_hard_regno; |
1897 | machine_mode mode; |
1898 | const char *fmt; |
1899 | enum rtx_code code; |
1900 | |
1901 | if (x == NULL_RTX(rtx) 0) |
1902 | return false; |
1903 | code = GET_CODE (x)((enum rtx_code) (x)->code); |
1904 | mode = GET_MODE (x)((machine_mode) (x)->mode); |
1905 | |
1906 | if (code == SUBREG) |
1907 | { |
1908 | /* For all SUBREGs we want to check whether the full multi-register |
1909 | overlaps the set. For normal SUBREGs this means 'get_hard_regno' of |
1910 | the inner register, for paradoxical SUBREGs this means the |
1911 | 'get_hard_regno' of the full SUBREG and for complete SUBREGs either is |
1912 | fine. Use the wider mode for all cases. */ |
1913 | rtx subreg = SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx); |
1914 | mode = wider_subreg_mode (x); |
1915 | if (mode == GET_MODE (subreg)((machine_mode) (subreg)->mode)) |
1916 | { |
1917 | x = subreg; |
1918 | code = GET_CODE (x)((enum rtx_code) (x)->code); |
1919 | } |
1920 | } |
1921 | |
1922 | if (REG_P (x)(((enum rtx_code) (x)->code) == REG) || SUBREG_P (x)(((enum rtx_code) (x)->code) == SUBREG)) |
1923 | { |
1924 | x_hard_regno = get_hard_regno (x); |
1925 | return (x_hard_regno >= 0 |
1926 | && overlaps_hard_reg_set_p (set, mode, x_hard_regno)); |
1927 | } |
1928 | fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
1929 | for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--) |
1930 | { |
1931 | if (fmt[i] == 'e') |
1932 | { |
1933 | if (uses_hard_regs_p (XEXP (x, i)(((x)->u.fld[i]).rt_rtx), set)) |
1934 | return true; |
1935 | } |
1936 | else if (fmt[i] == 'E') |
1937 | { |
1938 | for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--) |
1939 | if (uses_hard_regs_p (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), set)) |
1940 | return true; |
1941 | } |
1942 | } |
1943 | return false; |
1944 | } |
1945 | |
1946 | /* Return true if OP is a spilled pseudo. */ |
1947 | static inline bool |
1948 | spilled_pseudo_p (rtx op) |
1949 | { |
1950 | return (REG_P (op)(((enum rtx_code) (op)->code) == REG) |
1951 | && REGNO (op)(rhs_regno(op)) >= FIRST_PSEUDO_REGISTER76 && in_mem_p (REGNO (op)(rhs_regno(op)))); |
1952 | } |
1953 | |
1954 | /* Return true if X is a general constant. */ |
1955 | static inline bool |
1956 | general_constant_p (rtx x) |
1957 | { |
1958 | return CONSTANT_P (x)((rtx_class[(int) (((enum rtx_code) (x)->code))]) == RTX_CONST_OBJ ) && (! flag_picglobal_options.x_flag_pic || LEGITIMATE_PIC_OPERAND_P (x)legitimate_pic_operand_p (x)); |
1959 | } |
1960 | |
1961 | static bool |
1962 | reg_in_class_p (rtx reg, enum reg_class cl) |
1963 | { |
1964 | if (cl == NO_REGS) |
1965 | return get_reg_class (REGNO (reg)(rhs_regno(reg))) == NO_REGS; |
1966 | return in_class_p (reg, cl, NULLnullptr); |
1967 | } |
1968 | |
1969 | /* Return true if SET of RCLASS contains no hard regs which can be |
1970 | used in MODE. */ |
1971 | static bool |
1972 | prohibited_class_reg_set_mode_p (enum reg_class rclass, |
1973 | HARD_REG_SET &set, |
1974 | machine_mode mode) |
1975 | { |
1976 | HARD_REG_SET temp; |
1977 | |
1978 | lra_assert (hard_reg_set_subset_p (reg_class_contents[rclass], set))((void)(!(hard_reg_set_subset_p ((this_target_hard_regs->x_reg_class_contents )[rclass], set)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 1978, __FUNCTION__), 0 : 0)); |
1979 | temp = set & ~lra_no_alloc_regs; |
1980 | return (hard_reg_set_subset_p |
1981 | (temp, ira_prohibited_class_mode_regs(this_target_ira->x_ira_prohibited_class_mode_regs)[rclass][mode])); |
1982 | } |
1983 | |
1984 | |
1985 | /* Used to check validity info about small class input operands. It |
1986 | should be incremented at start of processing an insn |
1987 | alternative. */ |
1988 | static unsigned int curr_small_class_check = 0; |
1989 | |
1990 | /* Update number of used inputs of class OP_CLASS for operand NOP |
1991 | of alternative NALT. Return true if we have more such class operands |
1992 | than the number of available regs. */ |
1993 | static bool |
1994 | update_and_check_small_class_inputs (int nop, int nalt, |
1995 | enum reg_class op_class) |
1996 | { |
1997 | static unsigned int small_class_check[LIM_REG_CLASSES]; |
1998 | static int small_class_input_nums[LIM_REG_CLASSES]; |
1999 | |
2000 | if (SMALL_REGISTER_CLASS_P (op_class)((this_target_ira->x_ira_class_hard_regs_num) [(op_class)] == 1 || ((this_target_ira->x_ira_class_hard_regs_num) [(op_class )] >= 1 && targetm.class_likely_spilled_p (op_class ))) |
2001 | /* We are interesting in classes became small because of fixing |
2002 | some hard regs, e.g. by an user through GCC options. */ |
2003 | && hard_reg_set_intersect_p (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[op_class], |
2004 | ira_no_alloc_regs(this_target_ira->x_ira_no_alloc_regs)) |
2005 | && (curr_static_id->operand[nop].type != OP_OUT |
2006 | || TEST_BIT (curr_static_id->operand[nop].early_clobber_alts, nalt)(((curr_static_id->operand[nop].early_clobber_alts) >> (nalt)) & 1))) |
2007 | { |
2008 | if (small_class_check[op_class] == curr_small_class_check) |
2009 | small_class_input_nums[op_class]++; |
2010 | else |
2011 | { |
2012 | small_class_check[op_class] = curr_small_class_check; |
2013 | small_class_input_nums[op_class] = 1; |
2014 | } |
2015 | if (small_class_input_nums[op_class] > ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[op_class]) |
2016 | return true; |
2017 | } |
2018 | return false; |
2019 | } |
2020 | |
2021 | /* Major function to choose the current insn alternative and what |
2022 | operands should be reloaded and how. If ONLY_ALTERNATIVE is not |
2023 | negative we should consider only this alternative. Return false if |
2024 | we cannot choose the alternative or find how to reload the |
2025 | operands. */ |
2026 | static bool |
2027 | process_alt_operands (int only_alternative) |
2028 | { |
2029 | bool ok_p = false; |
2030 | int nop, overall, nalt; |
2031 | int n_alternatives = curr_static_id->n_alternatives; |
2032 | int n_operands = curr_static_id->n_operands; |
2033 | /* LOSERS counts the operands that don't fit this alternative and |
2034 | would require loading. */ |
2035 | int losers; |
2036 | int addr_losers; |
2037 | /* REJECT is a count of how undesirable this alternative says it is |
2038 | if any reloading is required. If the alternative matches exactly |
2039 | then REJECT is ignored, but otherwise it gets this much counted |
2040 | against it in addition to the reloading needed. */ |
2041 | int reject; |
2042 | /* This is defined by '!' or '?' alternative constraint and added to |
2043 | reject. But in some cases it can be ignored. */ |
2044 | int static_reject; |
2045 | int op_reject; |
2046 | /* The number of elements in the following array. */ |
2047 | int early_clobbered_regs_num; |
2048 | /* Numbers of operands which are early clobber registers. */ |
2049 | int early_clobbered_nops[MAX_RECOG_OPERANDS30]; |
2050 | enum reg_class curr_alt[MAX_RECOG_OPERANDS30]; |
2051 | HARD_REG_SET curr_alt_set[MAX_RECOG_OPERANDS30]; |
2052 | HARD_REG_SET curr_alt_exclude_start_hard_regs[MAX_RECOG_OPERANDS30]; |
2053 | bool curr_alt_match_win[MAX_RECOG_OPERANDS30]; |
2054 | bool curr_alt_win[MAX_RECOG_OPERANDS30]; |
2055 | bool curr_alt_offmemok[MAX_RECOG_OPERANDS30]; |
2056 | int curr_alt_matches[MAX_RECOG_OPERANDS30]; |
2057 | /* The number of elements in the following array. */ |
2058 | int curr_alt_dont_inherit_ops_num; |
2059 | /* Numbers of operands whose reload pseudos should not be inherited. */ |
2060 | int curr_alt_dont_inherit_ops[MAX_RECOG_OPERANDS30]; |
2061 | rtx op; |
2062 | /* The register when the operand is a subreg of register, otherwise the |
2063 | operand itself. */ |
2064 | rtx no_subreg_reg_operand[MAX_RECOG_OPERANDS30]; |
2065 | /* The register if the operand is a register or subreg of register, |
2066 | otherwise NULL. */ |
2067 | rtx operand_reg[MAX_RECOG_OPERANDS30]; |
2068 | int hard_regno[MAX_RECOG_OPERANDS30]; |
2069 | machine_mode biggest_mode[MAX_RECOG_OPERANDS30]; |
2070 | int reload_nregs, reload_sum; |
2071 | bool costly_p; |
2072 | enum reg_class cl; |
2073 | |
2074 | /* Calculate some data common for all alternatives to speed up the |
2075 | function. */ |
2076 | for (nop = 0; nop < n_operands; nop++) |
2077 | { |
2078 | rtx reg; |
2079 | |
2080 | op = no_subreg_reg_operand[nop] = *curr_id->operand_loc[nop]; |
2081 | /* The real hard regno of the operand after the allocation. */ |
2082 | hard_regno[nop] = get_hard_regno (op); |
2083 | |
2084 | operand_reg[nop] = reg = op; |
2085 | biggest_mode[nop] = GET_MODE (op)((machine_mode) (op)->mode); |
2086 | if (GET_CODE (op)((enum rtx_code) (op)->code) == SUBREG) |
2087 | { |
2088 | biggest_mode[nop] = wider_subreg_mode (op); |
2089 | operand_reg[nop] = reg = SUBREG_REG (op)(((op)->u.fld[0]).rt_rtx); |
2090 | } |
2091 | if (! REG_P (reg)(((enum rtx_code) (reg)->code) == REG)) |
2092 | operand_reg[nop] = NULL_RTX(rtx) 0; |
2093 | else if (REGNO (reg)(rhs_regno(reg)) >= FIRST_PSEUDO_REGISTER76 |
2094 | || ((int) REGNO (reg)(rhs_regno(reg)) |
2095 | == lra_get_elimination_hard_regno (REGNO (reg)(rhs_regno(reg))))) |
2096 | no_subreg_reg_operand[nop] = reg; |
2097 | else |
2098 | operand_reg[nop] = no_subreg_reg_operand[nop] |
2099 | /* Just use natural mode for elimination result. It should |
2100 | be enough for extra constraints hooks. */ |
2101 | = regno_reg_rtx[hard_regno[nop]]; |
2102 | } |
2103 | |
2104 | /* The constraints are made of several alternatives. Each operand's |
2105 | constraint looks like foo,bar,... with commas separating the |
2106 | alternatives. The first alternatives for all operands go |
2107 | together, the second alternatives go together, etc. |
2108 | |
2109 | First loop over alternatives. */ |
2110 | alternative_mask preferred = curr_id->preferred_alternatives; |
2111 | if (only_alternative >= 0) |
2112 | preferred &= ALTERNATIVE_BIT (only_alternative)((alternative_mask) 1 << (only_alternative)); |
2113 | |
2114 | for (nalt = 0; nalt < n_alternatives; nalt++) |
2115 | { |
2116 | /* Loop over operands for one constraint alternative. */ |
2117 | if (!TEST_BIT (preferred, nalt)(((preferred) >> (nalt)) & 1)) |
2118 | continue; |
2119 | |
2120 | bool matching_early_clobber[MAX_RECOG_OPERANDS30]; |
2121 | curr_small_class_check++; |
2122 | overall = losers = addr_losers = 0; |
2123 | static_reject = reject = reload_nregs = reload_sum = 0; |
2124 | for (nop = 0; nop < n_operands; nop++) |
2125 | { |
2126 | int inc = (curr_static_id |
2127 | ->operand_alternative[nalt * n_operands + nop].reject); |
2128 | if (lra_dump_file != NULLnullptr && inc != 0) |
2129 | fprintf (lra_dump_file, |
2130 | " Staticly defined alt reject+=%d\n", inc); |
2131 | static_reject += inc; |
2132 | matching_early_clobber[nop] = 0; |
2133 | } |
2134 | reject += static_reject; |
2135 | early_clobbered_regs_num = 0; |
2136 | |
2137 | for (nop = 0; nop < n_operands; nop++) |
2138 | { |
2139 | const char *p; |
2140 | char *end; |
2141 | int len, c, m, i, opalt_num, this_alternative_matches; |
2142 | bool win, did_match, offmemok, early_clobber_p; |
2143 | /* false => this operand can be reloaded somehow for this |
2144 | alternative. */ |
2145 | bool badop; |
2146 | /* true => this operand can be reloaded if the alternative |
2147 | allows regs. */ |
2148 | bool winreg; |
2149 | /* True if a constant forced into memory would be OK for |
2150 | this operand. */ |
2151 | bool constmemok; |
2152 | enum reg_class this_alternative, this_costly_alternative; |
2153 | HARD_REG_SET this_alternative_set, this_costly_alternative_set; |
2154 | HARD_REG_SET this_alternative_exclude_start_hard_regs; |
2155 | bool this_alternative_match_win, this_alternative_win; |
2156 | bool this_alternative_offmemok; |
2157 | bool scratch_p; |
2158 | machine_mode mode; |
2159 | enum constraint_num cn; |
2160 | |
2161 | opalt_num = nalt * n_operands + nop; |
2162 | if (curr_static_id->operand_alternative[opalt_num].anything_ok) |
2163 | { |
2164 | /* Fast track for no constraints at all. */ |
2165 | curr_alt[nop] = NO_REGS; |
2166 | CLEAR_HARD_REG_SET (curr_alt_set[nop]); |
2167 | curr_alt_win[nop] = true; |
2168 | curr_alt_match_win[nop] = false; |
2169 | curr_alt_offmemok[nop] = false; |
2170 | curr_alt_matches[nop] = -1; |
2171 | continue; |
2172 | } |
2173 | |
2174 | op = no_subreg_reg_operand[nop]; |
2175 | mode = curr_operand_mode[nop]; |
2176 | |
2177 | win = did_match = winreg = offmemok = constmemok = false; |
2178 | badop = true; |
2179 | |
2180 | early_clobber_p = false; |
2181 | p = curr_static_id->operand_alternative[opalt_num].constraint; |
2182 | |
2183 | this_costly_alternative = this_alternative = NO_REGS; |
2184 | /* We update set of possible hard regs besides its class |
2185 | because reg class might be inaccurate. For example, |
2186 | union of LO_REGS (l), HI_REGS(h), and STACK_REG(k) in ARM |
2187 | is translated in HI_REGS because classes are merged by |
2188 | pairs and there is no accurate intermediate class. */ |
2189 | CLEAR_HARD_REG_SET (this_alternative_set); |
2190 | CLEAR_HARD_REG_SET (this_costly_alternative_set); |
2191 | CLEAR_HARD_REG_SET (this_alternative_exclude_start_hard_regs); |
2192 | this_alternative_win = false; |
2193 | this_alternative_match_win = false; |
2194 | this_alternative_offmemok = false; |
2195 | this_alternative_matches = -1; |
2196 | |
2197 | /* An empty constraint should be excluded by the fast |
2198 | track. */ |
2199 | lra_assert (*p != 0 && *p != ',')((void)(!(*p != 0 && *p != ',') ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 2199, __FUNCTION__), 0 : 0)); |
2200 | |
2201 | op_reject = 0; |
2202 | /* Scan this alternative's specs for this operand; set WIN |
2203 | if the operand fits any letter in this alternative. |
2204 | Otherwise, clear BADOP if this operand could fit some |
2205 | letter after reloads, or set WINREG if this operand could |
2206 | fit after reloads provided the constraint allows some |
2207 | registers. */ |
2208 | costly_p = false; |
2209 | do |
2210 | { |
2211 | switch ((c = *p, len = CONSTRAINT_LEN (c, p)insn_constraint_len (c,p)), c) |
2212 | { |
2213 | case '\0': |
2214 | len = 0; |
2215 | break; |
2216 | case ',': |
2217 | c = '\0'; |
2218 | break; |
2219 | |
2220 | case '&': |
2221 | early_clobber_p = true; |
2222 | break; |
2223 | |
2224 | case '$': |
2225 | op_reject += LRA_MAX_REJECT600; |
2226 | break; |
2227 | case '^': |
2228 | op_reject += LRA_LOSER_COST_FACTOR6; |
2229 | break; |
2230 | |
2231 | case '#': |
2232 | /* Ignore rest of this alternative. */ |
2233 | c = '\0'; |
2234 | break; |
2235 | |
2236 | case '0': case '1': case '2': case '3': case '4': |
2237 | case '5': case '6': case '7': case '8': case '9': |
2238 | { |
2239 | int m_hregno; |
2240 | bool match_p; |
2241 | |
2242 | m = strtoul (p, &end, 10); |
2243 | p = end; |
2244 | len = 0; |
2245 | lra_assert (nop > m)((void)(!(nop > m) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 2245, __FUNCTION__), 0 : 0)); |
2246 | |
2247 | /* Reject matches if we don't know which operand is |
2248 | bigger. This situation would arguably be a bug in |
2249 | an .md pattern, but could also occur in a user asm. */ |
2250 | if (!ordered_p (GET_MODE_SIZE (biggest_mode[m]), |
2251 | GET_MODE_SIZE (biggest_mode[nop]))) |
2252 | break; |
2253 | |
2254 | /* Don't match wrong asm insn operands for proper |
2255 | diagnostic later. */ |
2256 | if (INSN_CODE (curr_insn)(((curr_insn)->u.fld[5]).rt_int) < 0 |
2257 | && (curr_operand_mode[m] == BLKmode((void) 0, E_BLKmode) |
2258 | || curr_operand_mode[nop] == BLKmode((void) 0, E_BLKmode)) |
2259 | && curr_operand_mode[m] != curr_operand_mode[nop]) |
2260 | break; |
2261 | |
2262 | m_hregno = get_hard_regno (*curr_id->operand_loc[m]); |
2263 | /* We are supposed to match a previous operand. |
2264 | If we do, we win if that one did. If we do |
2265 | not, count both of the operands as losers. |
2266 | (This is too conservative, since most of the |
2267 | time only a single reload insn will be needed |
2268 | to make the two operands win. As a result, |
2269 | this alternative may be rejected when it is |
2270 | actually desirable.) */ |
2271 | match_p = false; |
2272 | if (operands_match_p (*curr_id->operand_loc[nop], |
2273 | *curr_id->operand_loc[m], m_hregno)) |
2274 | { |
2275 | /* We should reject matching of an early |
2276 | clobber operand if the matching operand is |
2277 | not dying in the insn. */ |
2278 | if (!TEST_BIT (curr_static_id->operand[m](((curr_static_id->operand[m] .early_clobber_alts) >> (nalt)) & 1) |
2279 | .early_clobber_alts, nalt)(((curr_static_id->operand[m] .early_clobber_alts) >> (nalt)) & 1) |
2280 | || operand_reg[nop] == NULL_RTX(rtx) 0 |
2281 | || (find_regno_note (curr_insn, REG_DEAD, |
2282 | REGNO (op)(rhs_regno(op))) |
2283 | || REGNO (op)(rhs_regno(op)) == REGNO (operand_reg[m])(rhs_regno(operand_reg[m])))) |
2284 | match_p = true; |
2285 | } |
2286 | if (match_p) |
2287 | { |
2288 | /* If we are matching a non-offsettable |
2289 | address where an offsettable address was |
2290 | expected, then we must reject this |
2291 | combination, because we can't reload |
2292 | it. */ |
2293 | if (curr_alt_offmemok[m] |
2294 | && MEM_P (*curr_id->operand_loc[m])(((enum rtx_code) (*curr_id->operand_loc[m])->code) == MEM ) |
2295 | && curr_alt[m] == NO_REGS && ! curr_alt_win[m]) |
2296 | continue; |
2297 | } |
2298 | else |
2299 | { |
2300 | /* If the operands do not match and one |
2301 | operand is INOUT, we can not match them. |
2302 | Try other possibilities, e.g. other |
2303 | alternatives or commutative operand |
2304 | exchange. */ |
2305 | if (curr_static_id->operand[nop].type == OP_INOUT |
2306 | || curr_static_id->operand[m].type == OP_INOUT) |
2307 | break; |
2308 | /* Operands don't match. If the operands are |
2309 | different user defined explicit hard |
2310 | registers, then we cannot make them match |
2311 | when one is early clobber operand. */ |
2312 | if ((REG_P (*curr_id->operand_loc[nop])(((enum rtx_code) (*curr_id->operand_loc[nop])->code) == REG) |
2313 | || SUBREG_P (*curr_id->operand_loc[nop])(((enum rtx_code) (*curr_id->operand_loc[nop])->code) == SUBREG)) |
2314 | && (REG_P (*curr_id->operand_loc[m])(((enum rtx_code) (*curr_id->operand_loc[m])->code) == REG ) |
2315 | || SUBREG_P (*curr_id->operand_loc[m])(((enum rtx_code) (*curr_id->operand_loc[m])->code) == SUBREG ))) |
2316 | { |
2317 | rtx nop_reg = *curr_id->operand_loc[nop]; |
2318 | if (SUBREG_P (nop_reg)(((enum rtx_code) (nop_reg)->code) == SUBREG)) |
2319 | nop_reg = SUBREG_REG (nop_reg)(((nop_reg)->u.fld[0]).rt_rtx); |
2320 | rtx m_reg = *curr_id->operand_loc[m]; |
2321 | if (SUBREG_P (m_reg)(((enum rtx_code) (m_reg)->code) == SUBREG)) |
2322 | m_reg = SUBREG_REG (m_reg)(((m_reg)->u.fld[0]).rt_rtx); |
2323 | |
2324 | if (REG_P (nop_reg)(((enum rtx_code) (nop_reg)->code) == REG) |
2325 | && HARD_REGISTER_P (nop_reg)((((rhs_regno(nop_reg))) < 76)) |
2326 | && REG_USERVAR_P (nop_reg)(__extension__ ({ __typeof ((nop_reg)) const _rtx = ((nop_reg )); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("REG_USERVAR_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 2326, __FUNCTION__); _rtx; })->volatil) |
2327 | && REG_P (m_reg)(((enum rtx_code) (m_reg)->code) == REG) |
2328 | && HARD_REGISTER_P (m_reg)((((rhs_regno(m_reg))) < 76)) |
2329 | && REG_USERVAR_P (m_reg)(__extension__ ({ __typeof ((m_reg)) const _rtx = ((m_reg)); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("REG_USERVAR_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 2329, __FUNCTION__); _rtx; })->volatil)) |
2330 | { |
2331 | int i; |
2332 | |
2333 | for (i = 0; i < early_clobbered_regs_num; i++) |
2334 | if (m == early_clobbered_nops[i]) |
2335 | break; |
2336 | if (i < early_clobbered_regs_num |
2337 | || early_clobber_p) |
2338 | break; |
2339 | } |
2340 | } |
2341 | /* Both operands must allow a reload register, |
2342 | otherwise we cannot make them match. */ |
2343 | if (curr_alt[m] == NO_REGS) |
2344 | break; |
2345 | /* Retroactively mark the operand we had to |
2346 | match as a loser, if it wasn't already and |
2347 | it wasn't matched to a register constraint |
2348 | (e.g it might be matched by memory). */ |
2349 | if (curr_alt_win[m] |
2350 | && (operand_reg[m] == NULL_RTX(rtx) 0 |
2351 | || hard_regno[m] < 0)) |
2352 | { |
2353 | losers++; |
2354 | reload_nregs |
2355 | += (ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[curr_alt[m]] |
2356 | [GET_MODE (*curr_id->operand_loc[m])((machine_mode) (*curr_id->operand_loc[m])->mode)]); |
2357 | } |
2358 | |
2359 | /* Prefer matching earlyclobber alternative as |
2360 | it results in less hard regs required for |
2361 | the insn than a non-matching earlyclobber |
2362 | alternative. */ |
2363 | if (TEST_BIT (curr_static_id->operand[m](((curr_static_id->operand[m] .early_clobber_alts) >> (nalt)) & 1) |
2364 | .early_clobber_alts, nalt)(((curr_static_id->operand[m] .early_clobber_alts) >> (nalt)) & 1)) |
2365 | { |
2366 | if (lra_dump_file != NULLnullptr) |
2367 | fprintf |
2368 | (lra_dump_file, |
2369 | " %d Matching earlyclobber alt:" |
2370 | " reject--\n", |
2371 | nop); |
2372 | if (!matching_early_clobber[m]) |
2373 | { |
2374 | reject--; |
2375 | matching_early_clobber[m] = 1; |
2376 | } |
2377 | } |
2378 | /* Otherwise we prefer no matching |
2379 | alternatives because it gives more freedom |
2380 | in RA. */ |
2381 | else if (operand_reg[nop] == NULL_RTX(rtx) 0 |
2382 | || (find_regno_note (curr_insn, REG_DEAD, |
2383 | REGNO (operand_reg[nop])(rhs_regno(operand_reg[nop]))) |
2384 | == NULL_RTX(rtx) 0)) |
2385 | { |
2386 | if (lra_dump_file != NULLnullptr) |
2387 | fprintf |
2388 | (lra_dump_file, |
2389 | " %d Matching alt: reject+=2\n", |
2390 | nop); |
2391 | reject += 2; |
2392 | } |
2393 | } |
2394 | /* If we have to reload this operand and some |
2395 | previous operand also had to match the same |
2396 | thing as this operand, we don't know how to do |
2397 | that. */ |
2398 | if (!match_p || !curr_alt_win[m]) |
2399 | { |
2400 | for (i = 0; i < nop; i++) |
2401 | if (curr_alt_matches[i] == m) |
2402 | break; |
2403 | if (i < nop) |
2404 | break; |
2405 | } |
2406 | else |
2407 | did_match = true; |
2408 | |
2409 | this_alternative_matches = m; |
2410 | /* This can be fixed with reloads if the operand |
2411 | we are supposed to match can be fixed with |
2412 | reloads. */ |
2413 | badop = false; |
2414 | this_alternative = curr_alt[m]; |
2415 | this_alternative_set = curr_alt_set[m]; |
2416 | this_alternative_exclude_start_hard_regs |
2417 | = curr_alt_exclude_start_hard_regs[m]; |
2418 | winreg = this_alternative != NO_REGS; |
2419 | break; |
2420 | } |
2421 | |
2422 | case 'g': |
2423 | if (MEM_P (op)(((enum rtx_code) (op)->code) == MEM) |
2424 | || general_constant_p (op) |
2425 | || spilled_pseudo_p (op)) |
2426 | win = true; |
2427 | cl = GENERAL_REGS; |
2428 | goto reg; |
2429 | |
2430 | default: |
2431 | cn = lookup_constraint (p); |
2432 | switch (get_constraint_type (cn)) |
2433 | { |
2434 | case CT_REGISTER: |
2435 | cl = reg_class_for_constraint (cn); |
2436 | if (cl != NO_REGS) |
2437 | goto reg; |
2438 | break; |
2439 | |
2440 | case CT_CONST_INT: |
2441 | if (CONST_INT_P (op)(((enum rtx_code) (op)->code) == CONST_INT) |
2442 | && insn_const_int_ok_for_constraint (INTVAL (op)((op)->u.hwint[0]), cn)) |
2443 | win = true; |
2444 | break; |
2445 | |
2446 | case CT_MEMORY: |
2447 | case CT_RELAXED_MEMORY: |
2448 | if (MEM_P (op)(((enum rtx_code) (op)->code) == MEM) |
2449 | && satisfies_memory_constraint_p (op, cn)) |
2450 | win = true; |
2451 | else if (spilled_pseudo_p (op)) |
2452 | win = true; |
2453 | |
2454 | /* If we didn't already win, we can reload constants |
2455 | via force_const_mem or put the pseudo value into |
2456 | memory, or make other memory by reloading the |
2457 | address like for 'o'. */ |
2458 | if (CONST_POOL_OK_P (mode, op)((mode) != ((void) 0, E_VOIDmode) && ((rtx_class[(int ) (((enum rtx_code) (op)->code))]) == RTX_CONST_OBJ) && ((enum rtx_code) (op)->code) != HIGH && GET_MODE_SIZE (mode).is_constant () && !targetm.cannot_force_const_mem (mode, op)) |
2459 | || MEM_P (op)(((enum rtx_code) (op)->code) == MEM) || REG_P (op)(((enum rtx_code) (op)->code) == REG) |
2460 | /* We can restore the equiv insn by a |
2461 | reload. */ |
2462 | || equiv_substition_p[nop]) |
2463 | badop = false; |
2464 | constmemok = true; |
2465 | offmemok = true; |
2466 | break; |
2467 | |
2468 | case CT_ADDRESS: |
2469 | /* An asm operand with an address constraint |
2470 | that doesn't satisfy address_operand has |
2471 | is_address cleared, so that we don't try to |
2472 | make a non-address fit. */ |
2473 | if (!curr_static_id->operand[nop].is_address) |
2474 | break; |
2475 | /* If we didn't already win, we can reload the address |
2476 | into a base register. */ |
2477 | if (satisfies_address_constraint_p (op, cn)) |
2478 | win = true; |
2479 | cl = base_reg_class (VOIDmode((void) 0, E_VOIDmode), ADDR_SPACE_GENERIC0, |
2480 | ADDRESS, SCRATCH); |
2481 | badop = false; |
2482 | goto reg; |
2483 | |
2484 | case CT_FIXED_FORM: |
2485 | if (constraint_satisfied_p (op, cn)) |
2486 | win = true; |
2487 | break; |
2488 | |
2489 | case CT_SPECIAL_MEMORY: |
2490 | if (satisfies_memory_constraint_p (op, cn)) |
2491 | win = true; |
2492 | else if (spilled_pseudo_p (op)) |
2493 | win = true; |
2494 | break; |
2495 | } |
2496 | break; |
2497 | |
2498 | reg: |
2499 | if (mode == BLKmode((void) 0, E_BLKmode)) |
2500 | break; |
2501 | this_alternative = reg_class_subunion(this_target_hard_regs->x_reg_class_subunion)[this_alternative][cl]; |
2502 | if (hard_reg_set_subset_p (this_alternative_set, |
2503 | reg_class_contents(this_target_hard_regs->x_reg_class_contents)[cl])) |
2504 | this_alternative_exclude_start_hard_regs |
2505 | = ira_exclude_class_mode_regs(this_target_ira->x_ira_exclude_class_mode_regs)[cl][mode]; |
2506 | else if (!hard_reg_set_subset_p (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[cl], |
2507 | this_alternative_set)) |
2508 | this_alternative_exclude_start_hard_regs |
2509 | |= ira_exclude_class_mode_regs(this_target_ira->x_ira_exclude_class_mode_regs)[cl][mode]; |
2510 | this_alternative_set |= reg_class_contents(this_target_hard_regs->x_reg_class_contents)[cl]; |
2511 | if (costly_p) |
2512 | { |
2513 | this_costly_alternative |
2514 | = reg_class_subunion(this_target_hard_regs->x_reg_class_subunion)[this_costly_alternative][cl]; |
2515 | this_costly_alternative_set |= reg_class_contents(this_target_hard_regs->x_reg_class_contents)[cl]; |
2516 | } |
2517 | winreg = true; |
2518 | if (REG_P (op)(((enum rtx_code) (op)->code) == REG)) |
2519 | { |
2520 | if (hard_regno[nop] >= 0 |
2521 | && in_hard_reg_set_p (this_alternative_set, |
2522 | mode, hard_regno[nop]) |
2523 | && !TEST_HARD_REG_BIT |
2524 | (this_alternative_exclude_start_hard_regs, |
2525 | hard_regno[nop])) |
2526 | win = true; |
2527 | else if (hard_regno[nop] < 0 |
2528 | && in_class_p (op, this_alternative, NULLnullptr)) |
2529 | win = true; |
2530 | } |
2531 | break; |
2532 | } |
2533 | if (c != ' ' && c != '\t') |
2534 | costly_p = c == '*'; |
2535 | } |
2536 | while ((p += len), c); |
2537 | |
2538 | scratch_p = (operand_reg[nop] != NULL_RTX(rtx) 0 |
2539 | && ira_former_scratch_p (REGNO (operand_reg[nop])(rhs_regno(operand_reg[nop])))); |
2540 | /* Record which operands fit this alternative. */ |
2541 | if (win) |
2542 | { |
2543 | this_alternative_win = true; |
2544 | if (operand_reg[nop] != NULL_RTX(rtx) 0) |
2545 | { |
2546 | if (hard_regno[nop] >= 0) |
2547 | { |
2548 | if (in_hard_reg_set_p (this_costly_alternative_set, |
2549 | mode, hard_regno[nop])) |
2550 | { |
2551 | if (lra_dump_file != NULLnullptr) |
2552 | fprintf (lra_dump_file, |
2553 | " %d Costly set: reject++\n", |
2554 | nop); |
2555 | reject++; |
2556 | } |
2557 | } |
2558 | else |
2559 | { |
2560 | /* Prefer won reg to spilled pseudo under other |
2561 | equal conditions for possibe inheritance. */ |
2562 | if (! scratch_p) |
2563 | { |
2564 | if (lra_dump_file != NULLnullptr) |
2565 | fprintf |
2566 | (lra_dump_file, |
2567 | " %d Non pseudo reload: reject++\n", |
2568 | nop); |
2569 | reject++; |
2570 | } |
2571 | if (in_class_p (operand_reg[nop], |
2572 | this_costly_alternative, NULLnullptr)) |
2573 | { |
2574 | if (lra_dump_file != NULLnullptr) |
2575 | fprintf |
2576 | (lra_dump_file, |
2577 | " %d Non pseudo costly reload:" |
2578 | " reject++\n", |
2579 | nop); |
2580 | reject++; |
2581 | } |
2582 | } |
2583 | /* We simulate the behavior of old reload here. |
2584 | Although scratches need hard registers and it |
2585 | might result in spilling other pseudos, no reload |
2586 | insns are generated for the scratches. So it |
2587 | might cost something but probably less than old |
2588 | reload pass believes. */ |
2589 | if (scratch_p) |
2590 | { |
2591 | if (lra_dump_file != NULLnullptr) |
2592 | fprintf (lra_dump_file, |
2593 | " %d Scratch win: reject+=2\n", |
2594 | nop); |
2595 | reject += 2; |
2596 | } |
2597 | } |
2598 | } |
2599 | else if (did_match) |
2600 | this_alternative_match_win = true; |
2601 | else |
2602 | { |
2603 | int const_to_mem = 0; |
2604 | bool no_regs_p; |
2605 | |
2606 | reject += op_reject; |
2607 | /* Never do output reload of stack pointer. It makes |
2608 | impossible to do elimination when SP is changed in |
2609 | RTL. */ |
2610 | if (op == stack_pointer_rtx((this_target_rtl->x_global_rtl)[GR_STACK_POINTER]) && ! frame_pointer_needed((&x_rtl)->frame_pointer_needed) |
2611 | && curr_static_id->operand[nop].type != OP_IN) |
2612 | goto fail; |
2613 | |
2614 | /* If this alternative asks for a specific reg class, see if there |
2615 | is at least one allocatable register in that class. */ |
2616 | no_regs_p |
2617 | = (this_alternative == NO_REGS |
2618 | || (hard_reg_set_subset_p |
2619 | (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[this_alternative], |
2620 | lra_no_alloc_regs))); |
2621 | |
2622 | /* For asms, verify that the class for this alternative is possible |
2623 | for the mode that is specified. */ |
2624 | if (!no_regs_p && INSN_CODE (curr_insn)(((curr_insn)->u.fld[5]).rt_int) < 0) |
2625 | { |
2626 | int i; |
2627 | for (i = 0; i < FIRST_PSEUDO_REGISTER76; i++) |
2628 | if (targetm.hard_regno_mode_ok (i, mode) |
2629 | && in_hard_reg_set_p (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[this_alternative], |
2630 | mode, i)) |
2631 | break; |
2632 | if (i == FIRST_PSEUDO_REGISTER76) |
2633 | winreg = false; |
2634 | } |
2635 | |
2636 | /* If this operand accepts a register, and if the |
2637 | register class has at least one allocatable register, |
2638 | then this operand can be reloaded. */ |
2639 | if (winreg && !no_regs_p) |
2640 | badop = false; |
2641 | |
2642 | if (badop) |
2643 | { |
2644 | if (lra_dump_file != NULLnullptr) |
2645 | fprintf (lra_dump_file, |
2646 | " alt=%d: Bad operand -- refuse\n", |
2647 | nalt); |
2648 | goto fail; |
2649 | } |
2650 | |
2651 | if (this_alternative != NO_REGS) |
2652 | { |
2653 | HARD_REG_SET available_regs |
2654 | = (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[this_alternative] |
2655 | & ~((ira_prohibited_class_mode_regs(this_target_ira->x_ira_prohibited_class_mode_regs) |
2656 | [this_alternative][mode]) |
2657 | | lra_no_alloc_regs)); |
2658 | if (hard_reg_set_empty_p (available_regs)) |
2659 | { |
2660 | /* There are no hard regs holding a value of given |
2661 | mode. */ |
2662 | if (offmemok) |
2663 | { |
2664 | this_alternative = NO_REGS; |
2665 | if (lra_dump_file != NULLnullptr) |
2666 | fprintf (lra_dump_file, |
2667 | " %d Using memory because of" |
2668 | " a bad mode: reject+=2\n", |
2669 | nop); |
2670 | reject += 2; |
2671 | } |
2672 | else |
2673 | { |
2674 | if (lra_dump_file != NULLnullptr) |
2675 | fprintf (lra_dump_file, |
2676 | " alt=%d: Wrong mode -- refuse\n", |
2677 | nalt); |
2678 | goto fail; |
2679 | } |
2680 | } |
2681 | } |
2682 | |
2683 | /* If not assigned pseudo has a class which a subset of |
2684 | required reg class, it is a less costly alternative |
2685 | as the pseudo still can get a hard reg of necessary |
2686 | class. */ |
2687 | if (! no_regs_p && REG_P (op)(((enum rtx_code) (op)->code) == REG) && hard_regno[nop] < 0 |
2688 | && (cl = get_reg_class (REGNO (op)(rhs_regno(op)))) != NO_REGS |
2689 | && ira_class_subset_p(this_target_ira->x_ira_class_subset_p)[this_alternative][cl]) |
2690 | { |
2691 | if (lra_dump_file != NULLnullptr) |
2692 | fprintf |
2693 | (lra_dump_file, |
2694 | " %d Super set class reg: reject-=3\n", nop); |
2695 | reject -= 3; |
2696 | } |
2697 | |
2698 | this_alternative_offmemok = offmemok; |
2699 | if (this_costly_alternative != NO_REGS) |
2700 | { |
2701 | if (lra_dump_file != NULLnullptr) |
2702 | fprintf (lra_dump_file, |
2703 | " %d Costly loser: reject++\n", nop); |
2704 | reject++; |
2705 | } |
2706 | /* If the operand is dying, has a matching constraint, |
2707 | and satisfies constraints of the matched operand |
2708 | which failed to satisfy the own constraints, most probably |
2709 | the reload for this operand will be gone. */ |
2710 | if (this_alternative_matches >= 0 |
2711 | && !curr_alt_win[this_alternative_matches] |
2712 | && REG_P (op)(((enum rtx_code) (op)->code) == REG) |
2713 | && find_regno_note (curr_insn, REG_DEAD, REGNO (op)(rhs_regno(op))) |
2714 | && (hard_regno[nop] >= 0 |
2715 | ? in_hard_reg_set_p (this_alternative_set, |
2716 | mode, hard_regno[nop]) |
2717 | : in_class_p (op, this_alternative, NULLnullptr))) |
2718 | { |
2719 | if (lra_dump_file != NULLnullptr) |
2720 | fprintf |
2721 | (lra_dump_file, |
2722 | " %d Dying matched operand reload: reject++\n", |
2723 | nop); |
2724 | reject++; |
2725 | } |
2726 | else |
2727 | { |
2728 | /* Strict_low_part requires to reload the register |
2729 | not the sub-register. In this case we should |
2730 | check that a final reload hard reg can hold the |
2731 | value mode. */ |
2732 | if (curr_static_id->operand[nop].strict_low |
2733 | && REG_P (op)(((enum rtx_code) (op)->code) == REG) |
2734 | && hard_regno[nop] < 0 |
2735 | && GET_CODE (*curr_id->operand_loc[nop])((enum rtx_code) (*curr_id->operand_loc[nop])->code) == SUBREG |
2736 | && ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[this_alternative] > 0 |
2737 | && (!targetm.hard_regno_mode_ok |
2738 | (ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[this_alternative][0], |
2739 | GET_MODE (*curr_id->operand_loc[nop])((machine_mode) (*curr_id->operand_loc[nop])->mode)))) |
2740 | { |
2741 | if (lra_dump_file != NULLnullptr) |
2742 | fprintf |
2743 | (lra_dump_file, |
2744 | " alt=%d: Strict low subreg reload -- refuse\n", |
2745 | nalt); |
2746 | goto fail; |
2747 | } |
2748 | losers++; |
2749 | } |
2750 | if (operand_reg[nop] != NULL_RTX(rtx) 0 |
2751 | /* Output operands and matched input operands are |
2752 | not inherited. The following conditions do not |
2753 | exactly describe the previous statement but they |
2754 | are pretty close. */ |
2755 | && curr_static_id->operand[nop].type != OP_OUT |
2756 | && (this_alternative_matches < 0 |
2757 | || curr_static_id->operand[nop].type != OP_IN)) |
2758 | { |
2759 | int last_reload = (lra_reg_info[ORIGINAL_REGNO(__extension__ ({ __typeof ((operand_reg[nop])) const _rtx = ( (operand_reg[nop])); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("ORIGINAL_REGNO", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 2760, __FUNCTION__); _rtx; })->u2.original_regno) |
2760 | (operand_reg[nop])(__extension__ ({ __typeof ((operand_reg[nop])) const _rtx = ( (operand_reg[nop])); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("ORIGINAL_REGNO", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 2760, __FUNCTION__); _rtx; })->u2.original_regno)] |
2761 | .last_reload); |
2762 | |
2763 | /* The value of reload_sum has sense only if we |
2764 | process insns in their order. It happens only on |
2765 | the first constraints sub-pass when we do most of |
2766 | reload work. */ |
2767 | if (lra_constraint_iter == 1 && last_reload > bb_reload_num) |
2768 | reload_sum += last_reload - bb_reload_num; |
2769 | } |
2770 | /* If this is a constant that is reloaded into the |
2771 | desired class by copying it to memory first, count |
2772 | that as another reload. This is consistent with |
2773 | other code and is required to avoid choosing another |
2774 | alternative when the constant is moved into memory. |
2775 | Note that the test here is precisely the same as in |
2776 | the code below that calls force_const_mem. */ |
2777 | if (CONST_POOL_OK_P (mode, op)((mode) != ((void) 0, E_VOIDmode) && ((rtx_class[(int ) (((enum rtx_code) (op)->code))]) == RTX_CONST_OBJ) && ((enum rtx_code) (op)->code) != HIGH && GET_MODE_SIZE (mode).is_constant () && !targetm.cannot_force_const_mem (mode, op)) |
2778 | && ((targetm.preferred_reload_class |
2779 | (op, this_alternative) == NO_REGS) |
2780 | || no_input_reloads_p)) |
2781 | { |
2782 | const_to_mem = 1; |
2783 | if (! no_regs_p) |
2784 | losers++; |
2785 | } |
2786 | |
2787 | /* Alternative loses if it requires a type of reload not |
2788 | permitted for this insn. We can always reload |
2789 | objects with a REG_UNUSED note. */ |
2790 | if ((curr_static_id->operand[nop].type != OP_IN |
2791 | && no_output_reloads_p |
2792 | && ! find_reg_note (curr_insn, REG_UNUSED, op)) |
2793 | || (curr_static_id->operand[nop].type != OP_OUT |
2794 | && no_input_reloads_p && ! const_to_mem) |
2795 | || (this_alternative_matches >= 0 |
2796 | && (no_input_reloads_p |
2797 | || (no_output_reloads_p |
2798 | && (curr_static_id->operand |
2799 | [this_alternative_matches].type != OP_IN) |
2800 | && ! find_reg_note (curr_insn, REG_UNUSED, |
2801 | no_subreg_reg_operand |
2802 | [this_alternative_matches]))))) |
2803 | { |
2804 | if (lra_dump_file != NULLnullptr) |
2805 | fprintf |
2806 | (lra_dump_file, |
2807 | " alt=%d: No input/output reload -- refuse\n", |
2808 | nalt); |
2809 | goto fail; |
2810 | } |
2811 | |
2812 | /* Alternative loses if it required class pseudo cannot |
2813 | hold value of required mode. Such insns can be |
2814 | described by insn definitions with mode iterators. */ |
2815 | if (GET_MODE (*curr_id->operand_loc[nop])((machine_mode) (*curr_id->operand_loc[nop])->mode) != VOIDmode((void) 0, E_VOIDmode) |
2816 | && ! hard_reg_set_empty_p (this_alternative_set) |
2817 | /* It is common practice for constraints to use a |
2818 | class which does not have actually enough regs to |
2819 | hold the value (e.g. x86 AREG for mode requiring |
2820 | more one general reg). Therefore we have 2 |
2821 | conditions to check that the reload pseudo cannot |
2822 | hold the mode value. */ |
2823 | && (!targetm.hard_regno_mode_ok |
2824 | (ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[this_alternative][0], |
2825 | GET_MODE (*curr_id->operand_loc[nop])((machine_mode) (*curr_id->operand_loc[nop])->mode))) |
2826 | /* The above condition is not enough as the first |
2827 | reg in ira_class_hard_regs can be not aligned for |
2828 | multi-words mode values. */ |
2829 | && (prohibited_class_reg_set_mode_p |
2830 | (this_alternative, this_alternative_set, |
2831 | GET_MODE (*curr_id->operand_loc[nop])((machine_mode) (*curr_id->operand_loc[nop])->mode)))) |
2832 | { |
2833 | if (lra_dump_file != NULLnullptr) |
2834 | fprintf (lra_dump_file, |
2835 | " alt=%d: reload pseudo for op %d " |
2836 | "cannot hold the mode value -- refuse\n", |
2837 | nalt, nop); |
2838 | goto fail; |
2839 | } |
2840 | |
2841 | /* Check strong discouragement of reload of non-constant |
2842 | into class THIS_ALTERNATIVE. */ |
2843 | if (! CONSTANT_P (op)((rtx_class[(int) (((enum rtx_code) (op)->code))]) == RTX_CONST_OBJ ) && ! no_regs_p |
2844 | && (targetm.preferred_reload_class |
2845 | (op, this_alternative) == NO_REGS |
2846 | || (curr_static_id->operand[nop].type == OP_OUT |
2847 | && (targetm.preferred_output_reload_class |
2848 | (op, this_alternative) == NO_REGS)))) |
2849 | { |
2850 | if (offmemok && REG_P (op)(((enum rtx_code) (op)->code) == REG)) |
2851 | { |
2852 | if (lra_dump_file != NULLnullptr) |
2853 | fprintf |
2854 | (lra_dump_file, |
2855 | " %d Spill pseudo into memory: reject+=3\n", |
2856 | nop); |
2857 | reject += 3; |
2858 | } |
2859 | else |
2860 | { |
2861 | if (lra_dump_file != NULLnullptr) |
2862 | fprintf |
2863 | (lra_dump_file, |
2864 | " %d Non-prefered reload: reject+=%d\n", |
2865 | nop, LRA_MAX_REJECT600); |
2866 | reject += LRA_MAX_REJECT600; |
2867 | } |
2868 | } |
2869 | |
2870 | if (! (MEM_P (op)(((enum rtx_code) (op)->code) == MEM) && offmemok) |
2871 | && ! (const_to_mem && constmemok)) |
2872 | { |
2873 | /* We prefer to reload pseudos over reloading other |
2874 | things, since such reloads may be able to be |
2875 | eliminated later. So bump REJECT in other cases. |
2876 | Don't do this in the case where we are forcing a |
2877 | constant into memory and it will then win since |
2878 | we don't want to have a different alternative |
2879 | match then. */ |
2880 | if (! (REG_P (op)(((enum rtx_code) (op)->code) == REG) && REGNO (op)(rhs_regno(op)) >= FIRST_PSEUDO_REGISTER76)) |
2881 | { |
2882 | if (lra_dump_file != NULLnullptr) |
2883 | fprintf |
2884 | (lra_dump_file, |
2885 | " %d Non-pseudo reload: reject+=2\n", |
2886 | nop); |
2887 | reject += 2; |
2888 | } |
2889 | |
2890 | if (! no_regs_p) |
2891 | reload_nregs |
2892 | += ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[this_alternative][mode]; |
2893 | |
2894 | if (SMALL_REGISTER_CLASS_P (this_alternative)((this_target_ira->x_ira_class_hard_regs_num) [(this_alternative )] == 1 || ((this_target_ira->x_ira_class_hard_regs_num) [ (this_alternative)] >= 1 && targetm.class_likely_spilled_p (this_alternative)))) |
2895 | { |
2896 | if (lra_dump_file != NULLnullptr) |
2897 | fprintf |
2898 | (lra_dump_file, |
2899 | " %d Small class reload: reject+=%d\n", |
2900 | nop, LRA_LOSER_COST_FACTOR6 / 2); |
2901 | reject += LRA_LOSER_COST_FACTOR6 / 2; |
2902 | } |
2903 | } |
2904 | |
2905 | /* We are trying to spill pseudo into memory. It is |
2906 | usually more costly than moving to a hard register |
2907 | although it might takes the same number of |
2908 | reloads. |
2909 | |
2910 | Non-pseudo spill may happen also. Suppose a target allows both |
2911 | register and memory in the operand constraint alternatives, |
2912 | then it's typical that an eliminable register has a substition |
2913 | of "base + offset" which can either be reloaded by a simple |
2914 | "new_reg <= base + offset" which will match the register |
2915 | constraint, or a similar reg addition followed by further spill |
2916 | to and reload from memory which will match the memory |
2917 | constraint, but this memory spill will be much more costly |
2918 | usually. |
2919 | |
2920 | Code below increases the reject for both pseudo and non-pseudo |
2921 | spill. */ |
2922 | if (no_regs_p |
2923 | && !(MEM_P (op)(((enum rtx_code) (op)->code) == MEM) && offmemok) |
2924 | && !(REG_P (op)(((enum rtx_code) (op)->code) == REG) && hard_regno[nop] < 0)) |
2925 | { |
2926 | if (lra_dump_file != NULLnullptr) |
2927 | fprintf |
2928 | (lra_dump_file, |
2929 | " %d Spill %spseudo into memory: reject+=3\n", |
2930 | nop, REG_P (op)(((enum rtx_code) (op)->code) == REG) ? "" : "Non-"); |
2931 | reject += 3; |
2932 | if (VECTOR_MODE_P (mode)(((enum mode_class) mode_class[mode]) == MODE_VECTOR_BOOL || ( (enum mode_class) mode_class[mode]) == MODE_VECTOR_INT || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_FLOAT || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_FRACT || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_UFRACT || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_ACCUM || ((enum mode_class) mode_class[mode]) == MODE_VECTOR_UACCUM)) |
2933 | { |
2934 | /* Spilling vectors into memory is usually more |
2935 | costly as they contain big values. */ |
2936 | if (lra_dump_file != NULLnullptr) |
2937 | fprintf |
2938 | (lra_dump_file, |
2939 | " %d Spill vector pseudo: reject+=2\n", |
2940 | nop); |
2941 | reject += 2; |
2942 | } |
2943 | } |
2944 | |
2945 | /* When we use an operand requiring memory in given |
2946 | alternative, the insn should write *and* read the |
2947 | value to/from memory it is costly in comparison with |
2948 | an insn alternative which does not use memory |
2949 | (e.g. register or immediate operand). We exclude |
2950 | memory operand for such case as we can satisfy the |
2951 | memory constraints by reloading address. */ |
2952 | if (no_regs_p && offmemok && !MEM_P (op)(((enum rtx_code) (op)->code) == MEM)) |
2953 | { |
2954 | if (lra_dump_file != NULLnullptr) |
2955 | fprintf |
2956 | (lra_dump_file, |
2957 | " Using memory insn operand %d: reject+=3\n", |
2958 | nop); |
2959 | reject += 3; |
2960 | } |
2961 | |
2962 | /* If reload requires moving value through secondary |
2963 | memory, it will need one more insn at least. */ |
2964 | if (this_alternative != NO_REGS |
2965 | && REG_P (op)(((enum rtx_code) (op)->code) == REG) && (cl = get_reg_class (REGNO (op)(rhs_regno(op)))) != NO_REGS |
2966 | && ((curr_static_id->operand[nop].type != OP_OUT |
2967 | && targetm.secondary_memory_needed (GET_MODE (op)((machine_mode) (op)->mode), cl, |
2968 | this_alternative)) |
2969 | || (curr_static_id->operand[nop].type != OP_IN |
2970 | && (targetm.secondary_memory_needed |
2971 | (GET_MODE (op)((machine_mode) (op)->mode), this_alternative, cl))))) |
2972 | losers++; |
2973 | |
2974 | if (MEM_P (op)(((enum rtx_code) (op)->code) == MEM) && offmemok) |
2975 | addr_losers++; |
2976 | else |
2977 | { |
2978 | /* Input reloads can be inherited more often than |
2979 | output reloads can be removed, so penalize output |
2980 | reloads. */ |
2981 | if (!REG_P (op)(((enum rtx_code) (op)->code) == REG) || curr_static_id->operand[nop].type != OP_IN) |
2982 | { |
2983 | if (lra_dump_file != NULLnullptr) |
2984 | fprintf |
2985 | (lra_dump_file, |
2986 | " %d Non input pseudo reload: reject++\n", |
2987 | nop); |
2988 | reject++; |
2989 | } |
2990 | |
2991 | if (curr_static_id->operand[nop].type == OP_INOUT) |
2992 | { |
2993 | if (lra_dump_file != NULLnullptr) |
2994 | fprintf |
2995 | (lra_dump_file, |
2996 | " %d Input/Output reload: reject+=%d\n", |
2997 | nop, LRA_LOSER_COST_FACTOR6); |
2998 | reject += LRA_LOSER_COST_FACTOR6; |
2999 | } |
3000 | } |
3001 | } |
3002 | |
3003 | if (early_clobber_p && ! scratch_p) |
3004 | { |
3005 | if (lra_dump_file != NULLnullptr) |
3006 | fprintf (lra_dump_file, |
3007 | " %d Early clobber: reject++\n", nop); |
3008 | reject++; |
3009 | } |
3010 | /* ??? We check early clobbers after processing all operands |
3011 | (see loop below) and there we update the costs more. |
3012 | Should we update the cost (may be approximately) here |
3013 | because of early clobber register reloads or it is a rare |
3014 | or non-important thing to be worth to do it. */ |
3015 | overall = (losers * LRA_LOSER_COST_FACTOR6 + reject |
3016 | - (addr_losers == losers ? static_reject : 0)); |
3017 | if ((best_losers == 0 || losers != 0) && best_overall < overall) |
3018 | { |
3019 | if (lra_dump_file != NULLnullptr) |
3020 | fprintf (lra_dump_file, |
3021 | " alt=%d,overall=%d,losers=%d -- refuse\n", |
3022 | nalt, overall, losers); |
3023 | goto fail; |
3024 | } |
3025 | |
3026 | if (update_and_check_small_class_inputs (nop, nalt, |
3027 | this_alternative)) |
3028 | { |
3029 | if (lra_dump_file != NULLnullptr) |
3030 | fprintf (lra_dump_file, |
3031 | " alt=%d, not enough small class regs -- refuse\n", |
3032 | nalt); |
3033 | goto fail; |
3034 | } |
3035 | curr_alt[nop] = this_alternative; |
3036 | curr_alt_set[nop] = this_alternative_set; |
3037 | curr_alt_exclude_start_hard_regs[nop] |
3038 | = this_alternative_exclude_start_hard_regs; |
3039 | curr_alt_win[nop] = this_alternative_win; |
3040 | curr_alt_match_win[nop] = this_alternative_match_win; |
3041 | curr_alt_offmemok[nop] = this_alternative_offmemok; |
3042 | curr_alt_matches[nop] = this_alternative_matches; |
3043 | |
3044 | if (this_alternative_matches >= 0 |
3045 | && !did_match && !this_alternative_win) |
3046 | curr_alt_win[this_alternative_matches] = false; |
3047 | |
3048 | if (early_clobber_p && operand_reg[nop] != NULL_RTX(rtx) 0) |
3049 | early_clobbered_nops[early_clobbered_regs_num++] = nop; |
3050 | } |
3051 | |
3052 | if (curr_insn_set != NULL_RTX(rtx) 0 && n_operands == 2 |
3053 | /* Prevent processing non-move insns. */ |
3054 | && (GET_CODE (SET_SRC (curr_insn_set))((enum rtx_code) ((((curr_insn_set)->u.fld[1]).rt_rtx))-> code) == SUBREG |
3055 | || SET_SRC (curr_insn_set)(((curr_insn_set)->u.fld[1]).rt_rtx) == no_subreg_reg_operand[1]) |
3056 | && ((! curr_alt_win[0] && ! curr_alt_win[1] |
3057 | && REG_P (no_subreg_reg_operand[0])(((enum rtx_code) (no_subreg_reg_operand[0])->code) == REG ) |
3058 | && REG_P (no_subreg_reg_operand[1])(((enum rtx_code) (no_subreg_reg_operand[1])->code) == REG ) |
3059 | && (reg_in_class_p (no_subreg_reg_operand[0], curr_alt[1]) |
3060 | || reg_in_class_p (no_subreg_reg_operand[1], curr_alt[0]))) |
3061 | || (! curr_alt_win[0] && curr_alt_win[1] |
3062 | && REG_P (no_subreg_reg_operand[1])(((enum rtx_code) (no_subreg_reg_operand[1])->code) == REG ) |
3063 | /* Check that we reload memory not the memory |
3064 | address. */ |
3065 | && ! (curr_alt_offmemok[0] |
3066 | && MEM_P (no_subreg_reg_operand[0])(((enum rtx_code) (no_subreg_reg_operand[0])->code) == MEM )) |
3067 | && reg_in_class_p (no_subreg_reg_operand[1], curr_alt[0])) |
3068 | || (curr_alt_win[0] && ! curr_alt_win[1] |
3069 | && REG_P (no_subreg_reg_operand[0])(((enum rtx_code) (no_subreg_reg_operand[0])->code) == REG ) |
3070 | /* Check that we reload memory not the memory |
3071 | address. */ |
3072 | && ! (curr_alt_offmemok[1] |
3073 | && MEM_P (no_subreg_reg_operand[1])(((enum rtx_code) (no_subreg_reg_operand[1])->code) == MEM )) |
3074 | && reg_in_class_p (no_subreg_reg_operand[0], curr_alt[1]) |
3075 | && (! CONST_POOL_OK_P (curr_operand_mode[1],((curr_operand_mode[1]) != ((void) 0, E_VOIDmode) && ( (rtx_class[(int) (((enum rtx_code) (no_subreg_reg_operand[1]) ->code))]) == RTX_CONST_OBJ) && ((enum rtx_code) ( no_subreg_reg_operand[1])->code) != HIGH && GET_MODE_SIZE (curr_operand_mode[1]).is_constant () && !targetm.cannot_force_const_mem (curr_operand_mode[1], no_subreg_reg_operand[1])) |
3076 | no_subreg_reg_operand[1])((curr_operand_mode[1]) != ((void) 0, E_VOIDmode) && ( (rtx_class[(int) (((enum rtx_code) (no_subreg_reg_operand[1]) ->code))]) == RTX_CONST_OBJ) && ((enum rtx_code) ( no_subreg_reg_operand[1])->code) != HIGH && GET_MODE_SIZE (curr_operand_mode[1]).is_constant () && !targetm.cannot_force_const_mem (curr_operand_mode[1], no_subreg_reg_operand[1])) |
3077 | || (targetm.preferred_reload_class |
3078 | (no_subreg_reg_operand[1], |
3079 | (enum reg_class) curr_alt[1]) != NO_REGS)) |
3080 | /* If it is a result of recent elimination in move |
3081 | insn we can transform it into an add still by |
3082 | using this alternative. */ |
3083 | && GET_CODE (no_subreg_reg_operand[1])((enum rtx_code) (no_subreg_reg_operand[1])->code) != PLUS |
3084 | /* Likewise if the source has been replaced with an |
3085 | equivalent value. This only happens once -- the reload |
3086 | will use the equivalent value instead of the register it |
3087 | replaces -- so there should be no danger of cycling. */ |
3088 | && !equiv_substition_p[1]))) |
3089 | { |
3090 | /* We have a move insn and a new reload insn will be similar |
3091 | to the current insn. We should avoid such situation as |
3092 | it results in LRA cycling. */ |
3093 | if (lra_dump_file != NULLnullptr) |
3094 | fprintf (lra_dump_file, |
3095 | " Cycle danger: overall += LRA_MAX_REJECT\n"); |
3096 | overall += LRA_MAX_REJECT600; |
3097 | } |
3098 | ok_p = true; |
3099 | curr_alt_dont_inherit_ops_num = 0; |
3100 | for (nop = 0; nop < early_clobbered_regs_num; nop++) |
3101 | { |
3102 | int i, j, clobbered_hard_regno, first_conflict_j, last_conflict_j; |
3103 | HARD_REG_SET temp_set; |
3104 | |
3105 | i = early_clobbered_nops[nop]; |
3106 | if ((! curr_alt_win[i] && ! curr_alt_match_win[i]) |
3107 | || hard_regno[i] < 0) |
3108 | continue; |
3109 | lra_assert (operand_reg[i] != NULL_RTX)((void)(!(operand_reg[i] != (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3109, __FUNCTION__), 0 : 0)); |
3110 | clobbered_hard_regno = hard_regno[i]; |
3111 | CLEAR_HARD_REG_SET (temp_set); |
3112 | add_to_hard_reg_set (&temp_set, GET_MODE (*curr_id->operand_loc[i])((machine_mode) (*curr_id->operand_loc[i])->mode), |
3113 | clobbered_hard_regno); |
3114 | first_conflict_j = last_conflict_j = -1; |
3115 | for (j = 0; j < n_operands; j++) |
3116 | if (j == i |
3117 | /* We don't want process insides of match_operator and |
3118 | match_parallel because otherwise we would process |
3119 | their operands once again generating a wrong |
3120 | code. */ |
3121 | || curr_static_id->operand[j].is_operator) |
3122 | continue; |
3123 | else if ((curr_alt_matches[j] == i && curr_alt_match_win[j]) |
3124 | || (curr_alt_matches[i] == j && curr_alt_match_win[i])) |
3125 | continue; |
3126 | /* If we don't reload j-th operand, check conflicts. */ |
3127 | else if ((curr_alt_win[j] || curr_alt_match_win[j]) |
3128 | && uses_hard_regs_p (*curr_id->operand_loc[j], temp_set)) |
3129 | { |
3130 | if (first_conflict_j < 0) |
3131 | first_conflict_j = j; |
3132 | last_conflict_j = j; |
3133 | /* Both the earlyclobber operand and conflicting operand |
3134 | cannot both be user defined hard registers. */ |
3135 | if (HARD_REGISTER_P (operand_reg[i])((((rhs_regno(operand_reg[i]))) < 76)) |
3136 | && REG_USERVAR_P (operand_reg[i])(__extension__ ({ __typeof ((operand_reg[i])) const _rtx = (( operand_reg[i])); if (((enum rtx_code) (_rtx)->code) != REG ) rtl_check_failed_flag ("REG_USERVAR_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3136, __FUNCTION__); _rtx; })->volatil) |
3137 | && operand_reg[j] != NULL_RTX(rtx) 0 |
3138 | && HARD_REGISTER_P (operand_reg[j])((((rhs_regno(operand_reg[j]))) < 76)) |
3139 | && REG_USERVAR_P (operand_reg[j])(__extension__ ({ __typeof ((operand_reg[j])) const _rtx = (( operand_reg[j])); if (((enum rtx_code) (_rtx)->code) != REG ) rtl_check_failed_flag ("REG_USERVAR_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3139, __FUNCTION__); _rtx; })->volatil)) |
3140 | { |
3141 | /* For asm, let curr_insn_transform diagnose it. */ |
3142 | if (INSN_CODE (curr_insn)(((curr_insn)->u.fld[5]).rt_int) < 0) |
3143 | return false; |
3144 | fatal_insn ("unable to generate reloads for "_fatal_insn ("unable to generate reloads for " "impossible constraints:" , curr_insn, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3145, __FUNCTION__) |
3145 | "impossible constraints:", curr_insn)_fatal_insn ("unable to generate reloads for " "impossible constraints:" , curr_insn, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3145, __FUNCTION__); |
3146 | } |
3147 | } |
3148 | if (last_conflict_j < 0) |
3149 | continue; |
3150 | |
3151 | /* If an earlyclobber operand conflicts with another non-matching |
3152 | operand (ie, they have been assigned the same hard register), |
3153 | then it is better to reload the other operand, as there may |
3154 | exist yet another operand with a matching constraint associated |
3155 | with the earlyclobber operand. However, if one of the operands |
3156 | is an explicit use of a hard register, then we must reload the |
3157 | other non-hard register operand. */ |
3158 | if (HARD_REGISTER_P (operand_reg[i])((((rhs_regno(operand_reg[i]))) < 76)) |
3159 | || (first_conflict_j == last_conflict_j |
3160 | && operand_reg[last_conflict_j] != NULL_RTX(rtx) 0 |
3161 | && !curr_alt_match_win[last_conflict_j] |
3162 | && !HARD_REGISTER_P (operand_reg[last_conflict_j])((((rhs_regno(operand_reg[last_conflict_j]))) < 76)))) |
3163 | { |
3164 | curr_alt_win[last_conflict_j] = false; |
3165 | curr_alt_dont_inherit_ops[curr_alt_dont_inherit_ops_num++] |
3166 | = last_conflict_j; |
3167 | losers++; |
3168 | if (lra_dump_file != NULLnullptr) |
3169 | fprintf |
3170 | (lra_dump_file, |
3171 | " %d Conflict early clobber reload: reject--\n", |
3172 | i); |
3173 | } |
3174 | else |
3175 | { |
3176 | /* We need to reload early clobbered register and the |
3177 | matched registers. */ |
3178 | for (j = 0; j < n_operands; j++) |
3179 | if (curr_alt_matches[j] == i) |
3180 | { |
3181 | curr_alt_match_win[j] = false; |
3182 | losers++; |
3183 | overall += LRA_LOSER_COST_FACTOR6; |
3184 | } |
3185 | if (! curr_alt_match_win[i]) |
3186 | curr_alt_dont_inherit_ops[curr_alt_dont_inherit_ops_num++] = i; |
3187 | else |
3188 | { |
3189 | /* Remember pseudos used for match reloads are never |
3190 | inherited. */ |
3191 | lra_assert (curr_alt_matches[i] >= 0)((void)(!(curr_alt_matches[i] >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3191, __FUNCTION__), 0 : 0)); |
3192 | curr_alt_win[curr_alt_matches[i]] = false; |
3193 | } |
3194 | curr_alt_win[i] = curr_alt_match_win[i] = false; |
3195 | losers++; |
3196 | if (lra_dump_file != NULLnullptr) |
3197 | fprintf |
3198 | (lra_dump_file, |
3199 | " %d Matched conflict early clobber reloads: " |
3200 | "reject--\n", |
3201 | i); |
3202 | } |
3203 | /* Early clobber was already reflected in REJECT. */ |
3204 | if (!matching_early_clobber[i]) |
3205 | { |
3206 | lra_assert (reject > 0)((void)(!(reject > 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3206, __FUNCTION__), 0 : 0)); |
3207 | reject--; |
3208 | matching_early_clobber[i] = 1; |
3209 | } |
3210 | overall += LRA_LOSER_COST_FACTOR6 - 1; |
3211 | } |
3212 | if (lra_dump_file != NULLnullptr) |
3213 | fprintf (lra_dump_file, " alt=%d,overall=%d,losers=%d,rld_nregs=%d\n", |
3214 | nalt, overall, losers, reload_nregs); |
3215 | |
3216 | /* If this alternative can be made to work by reloading, and it |
3217 | needs less reloading than the others checked so far, record |
3218 | it as the chosen goal for reloading. */ |
3219 | if ((best_losers != 0 && losers == 0) |
3220 | || (((best_losers == 0 && losers == 0) |
3221 | || (best_losers != 0 && losers != 0)) |
3222 | && (best_overall > overall |
3223 | || (best_overall == overall |
3224 | /* If the cost of the reloads is the same, |
3225 | prefer alternative which requires minimal |
3226 | number of reload regs. */ |
3227 | && (reload_nregs < best_reload_nregs |
3228 | || (reload_nregs == best_reload_nregs |
3229 | && (best_reload_sum < reload_sum |
3230 | || (best_reload_sum == reload_sum |
3231 | && nalt < goal_alt_number)))))))) |
3232 | { |
3233 | for (nop = 0; nop < n_operands; nop++) |
3234 | { |
3235 | goal_alt_win[nop] = curr_alt_win[nop]; |
3236 | goal_alt_match_win[nop] = curr_alt_match_win[nop]; |
3237 | goal_alt_matches[nop] = curr_alt_matches[nop]; |
3238 | goal_alt[nop] = curr_alt[nop]; |
3239 | goal_alt_exclude_start_hard_regs[nop] |
3240 | = curr_alt_exclude_start_hard_regs[nop]; |
3241 | goal_alt_offmemok[nop] = curr_alt_offmemok[nop]; |
3242 | } |
3243 | goal_alt_dont_inherit_ops_num = curr_alt_dont_inherit_ops_num; |
3244 | for (nop = 0; nop < curr_alt_dont_inherit_ops_num; nop++) |
3245 | goal_alt_dont_inherit_ops[nop] = curr_alt_dont_inherit_ops[nop]; |
3246 | goal_alt_swapped = curr_swapped; |
3247 | best_overall = overall; |
3248 | best_losers = losers; |
3249 | best_reload_nregs = reload_nregs; |
3250 | best_reload_sum = reload_sum; |
3251 | goal_alt_number = nalt; |
3252 | } |
3253 | if (losers == 0) |
3254 | /* Everything is satisfied. Do not process alternatives |
3255 | anymore. */ |
3256 | break; |
3257 | fail: |
3258 | ; |
3259 | } |
3260 | return ok_p; |
3261 | } |
3262 | |
3263 | /* Make reload base reg from address AD. */ |
3264 | static rtx |
3265 | base_to_reg (struct address_info *ad) |
3266 | { |
3267 | enum reg_class cl; |
3268 | int code = -1; |
3269 | rtx new_inner = NULL_RTX(rtx) 0; |
3270 | rtx new_reg = NULL_RTX(rtx) 0; |
3271 | rtx_insn *insn; |
3272 | rtx_insn *last_insn = get_last_insn(); |
3273 | |
3274 | lra_assert (ad->disp == ad->disp_term)((void)(!(ad->disp == ad->disp_term) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3274, __FUNCTION__), 0 : 0)); |
3275 | cl = base_reg_class (ad->mode, ad->as, ad->base_outer_code, |
3276 | get_index_code (ad)); |
3277 | new_reg = lra_create_new_reg (GET_MODE (*ad->base)((machine_mode) (*ad->base)->mode), NULL_RTX(rtx) 0, cl, NULLnullptr, |
3278 | "base"); |
3279 | new_inner = simplify_gen_binary (PLUS, GET_MODE (new_reg)((machine_mode) (new_reg)->mode), new_reg, |
3280 | ad->disp_term == NULLnullptr |
3281 | ? const0_rtx(const_int_rtx[64]) |
3282 | : *ad->disp_term); |
3283 | if (!valid_address_p (ad->mode, new_inner, ad->as)) |
3284 | return NULL_RTX(rtx) 0; |
3285 | insn = emit_insn (gen_rtx_SET (new_reg, *ad->base)gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((new_reg )), ((*ad->base)) )); |
3286 | code = recog_memoized (insn); |
3287 | if (code < 0) |
3288 | { |
3289 | delete_insns_since (last_insn); |
3290 | return NULL_RTX(rtx) 0; |
3291 | } |
3292 | |
3293 | return new_inner; |
3294 | } |
3295 | |
3296 | /* Make reload base reg + DISP from address AD. Return the new pseudo. */ |
3297 | static rtx |
3298 | base_plus_disp_to_reg (struct address_info *ad, rtx disp) |
3299 | { |
3300 | enum reg_class cl; |
3301 | rtx new_reg; |
3302 | |
3303 | lra_assert (ad->base == ad->base_term)((void)(!(ad->base == ad->base_term) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3303, __FUNCTION__), 0 : 0)); |
3304 | cl = base_reg_class (ad->mode, ad->as, ad->base_outer_code, |
3305 | get_index_code (ad)); |
3306 | new_reg = lra_create_new_reg (GET_MODE (*ad->base_term)((machine_mode) (*ad->base_term)->mode), NULL_RTX(rtx) 0, cl, NULLnullptr, |
3307 | "base + disp"); |
3308 | lra_emit_add (new_reg, *ad->base_term, disp); |
3309 | return new_reg; |
3310 | } |
3311 | |
3312 | /* Make reload of index part of address AD. Return the new |
3313 | pseudo. */ |
3314 | static rtx |
3315 | index_part_to_reg (struct address_info *ad) |
3316 | { |
3317 | rtx new_reg; |
3318 | |
3319 | new_reg = lra_create_new_reg (GET_MODE (*ad->index)((machine_mode) (*ad->index)->mode), NULL_RTX(rtx) 0, |
3320 | INDEX_REG_CLASSINDEX_REGS, NULLnullptr, "index term"); |
3321 | expand_mult (GET_MODE (*ad->index)((machine_mode) (*ad->index)->mode), *ad->index_term, |
3322 | GEN_INT (get_index_scale (ad))gen_rtx_CONST_INT (((void) 0, E_VOIDmode), (get_index_scale ( ad))), new_reg, 1); |
3323 | return new_reg; |
3324 | } |
3325 | |
3326 | /* Return true if we can add a displacement to address AD, even if that |
3327 | makes the address invalid. The fix-up code requires any new address |
3328 | to be the sum of the BASE_TERM, INDEX and DISP_TERM fields. */ |
3329 | static bool |
3330 | can_add_disp_p (struct address_info *ad) |
3331 | { |
3332 | return (!ad->autoinc_p |
3333 | && ad->segment == NULLnullptr |
3334 | && ad->base == ad->base_term |
3335 | && ad->disp == ad->disp_term); |
3336 | } |
3337 | |
3338 | /* Make equiv substitution in address AD. Return true if a substitution |
3339 | was made. */ |
3340 | static bool |
3341 | equiv_address_substitution (struct address_info *ad) |
3342 | { |
3343 | rtx base_reg, new_base_reg, index_reg, new_index_reg, *base_term, *index_term; |
3344 | poly_int64 disp; |
3345 | HOST_WIDE_INTlong scale; |
3346 | bool change_p; |
3347 | |
3348 | base_term = strip_subreg (ad->base_term); |
3349 | if (base_term == NULLnullptr) |
3350 | base_reg = new_base_reg = NULL_RTX(rtx) 0; |
3351 | else |
3352 | { |
3353 | base_reg = *base_term; |
3354 | new_base_reg = get_equiv_with_elimination (base_reg, curr_insn); |
3355 | } |
3356 | index_term = strip_subreg (ad->index_term); |
3357 | if (index_term == NULLnullptr) |
3358 | index_reg = new_index_reg = NULL_RTX(rtx) 0; |
3359 | else |
3360 | { |
3361 | index_reg = *index_term; |
3362 | new_index_reg = get_equiv_with_elimination (index_reg, curr_insn); |
3363 | } |
3364 | if (base_reg == new_base_reg && index_reg == new_index_reg) |
3365 | return false; |
3366 | disp = 0; |
3367 | change_p = false; |
3368 | if (lra_dump_file != NULLnullptr) |
3369 | { |
3370 | fprintf (lra_dump_file, "Changing address in insn %d ", |
3371 | INSN_UID (curr_insn)); |
3372 | dump_value_slim (lra_dump_file, *ad->outer, 1); |
3373 | } |
3374 | if (base_reg != new_base_reg) |
3375 | { |
3376 | poly_int64 offset; |
3377 | if (REG_P (new_base_reg)(((enum rtx_code) (new_base_reg)->code) == REG)) |
3378 | { |
3379 | *base_term = new_base_reg; |
3380 | change_p = true; |
3381 | } |
3382 | else if (GET_CODE (new_base_reg)((enum rtx_code) (new_base_reg)->code) == PLUS |
3383 | && REG_P (XEXP (new_base_reg, 0))(((enum rtx_code) ((((new_base_reg)->u.fld[0]).rt_rtx))-> code) == REG) |
3384 | && poly_int_rtx_p (XEXP (new_base_reg, 1)(((new_base_reg)->u.fld[1]).rt_rtx), &offset) |
3385 | && can_add_disp_p (ad)) |
3386 | { |
3387 | disp += offset; |
3388 | *base_term = XEXP (new_base_reg, 0)(((new_base_reg)->u.fld[0]).rt_rtx); |
3389 | change_p = true; |
3390 | } |
3391 | if (ad->base_term2 != NULLnullptr) |
3392 | *ad->base_term2 = *ad->base_term; |
3393 | } |
3394 | if (index_reg != new_index_reg) |
3395 | { |
3396 | poly_int64 offset; |
3397 | if (REG_P (new_index_reg)(((enum rtx_code) (new_index_reg)->code) == REG)) |
3398 | { |
3399 | *index_term = new_index_reg; |
3400 | change_p = true; |
3401 | } |
3402 | else if (GET_CODE (new_index_reg)((enum rtx_code) (new_index_reg)->code) == PLUS |
3403 | && REG_P (XEXP (new_index_reg, 0))(((enum rtx_code) ((((new_index_reg)->u.fld[0]).rt_rtx))-> code) == REG) |
3404 | && poly_int_rtx_p (XEXP (new_index_reg, 1)(((new_index_reg)->u.fld[1]).rt_rtx), &offset) |
3405 | && can_add_disp_p (ad) |
3406 | && (scale = get_index_scale (ad))) |
3407 | { |
3408 | disp += offset * scale; |
3409 | *index_term = XEXP (new_index_reg, 0)(((new_index_reg)->u.fld[0]).rt_rtx); |
3410 | change_p = true; |
3411 | } |
3412 | } |
3413 | if (maybe_ne (disp, 0)) |
3414 | { |
3415 | if (ad->disp != NULLnullptr) |
3416 | *ad->disp = plus_constant (GET_MODE (*ad->inner)((machine_mode) (*ad->inner)->mode), *ad->disp, disp); |
3417 | else |
3418 | { |
3419 | *ad->inner = plus_constant (GET_MODE (*ad->inner)((machine_mode) (*ad->inner)->mode), *ad->inner, disp); |
3420 | update_address (ad); |
3421 | } |
3422 | change_p = true; |
3423 | } |
3424 | if (lra_dump_file != NULLnullptr) |
3425 | { |
3426 | if (! change_p) |
3427 | fprintf (lra_dump_file, " -- no change\n"); |
3428 | else |
3429 | { |
3430 | fprintf (lra_dump_file, " on equiv "); |
3431 | dump_value_slim (lra_dump_file, *ad->outer, 1); |
3432 | fprintf (lra_dump_file, "\n"); |
3433 | } |
3434 | } |
3435 | return change_p; |
3436 | } |
3437 | |
3438 | /* Skip all modifiers and whitespaces in constraint STR and return the |
3439 | result. */ |
3440 | static const char * |
3441 | skip_constraint_modifiers (const char *str) |
3442 | { |
3443 | for (;;str++) |
3444 | switch (*str) |
3445 | { |
3446 | case '+': case '&' : case '=': case '*': case ' ': case '\t': |
3447 | case '$': case '^' : case '%': case '?': case '!': |
3448 | break; |
3449 | default: return str; |
3450 | } |
3451 | } |
3452 | |
3453 | /* Major function to make reloads for an address in operand NOP or |
3454 | check its correctness (If CHECK_ONLY_P is true). The supported |
3455 | cases are: |
3456 | |
3457 | 1) an address that existed before LRA started, at which point it |
3458 | must have been valid. These addresses are subject to elimination |
3459 | and may have become invalid due to the elimination offset being out |
3460 | of range. |
3461 | |
3462 | 2) an address created by forcing a constant to memory |
3463 | (force_const_to_mem). The initial form of these addresses might |
3464 | not be valid, and it is this function's job to make them valid. |
3465 | |
3466 | 3) a frame address formed from a register and a (possibly zero) |
3467 | constant offset. As above, these addresses might not be valid and |
3468 | this function must make them so. |
3469 | |
3470 | Add reloads to the lists *BEFORE and *AFTER. We might need to add |
3471 | reloads to *AFTER because of inc/dec, {pre, post} modify in the |
3472 | address. Return true for any RTL change. |
3473 | |
3474 | The function is a helper function which does not produce all |
3475 | transformations (when CHECK_ONLY_P is false) which can be |
3476 | necessary. It does just basic steps. To do all necessary |
3477 | transformations use function process_address. */ |
3478 | static bool |
3479 | process_address_1 (int nop, bool check_only_p, |
3480 | rtx_insn **before, rtx_insn **after) |
3481 | { |
3482 | struct address_info ad; |
3483 | rtx new_reg; |
3484 | HOST_WIDE_INTlong scale; |
3485 | rtx op = *curr_id->operand_loc[nop]; |
3486 | rtx mem = extract_mem_from_operand (op); |
3487 | const char *constraint; |
3488 | enum constraint_num cn; |
3489 | bool change_p = false; |
3490 | |
3491 | if (MEM_P (mem)(((enum rtx_code) (mem)->code) == MEM) |
3492 | && GET_MODE (mem)((machine_mode) (mem)->mode) == BLKmode((void) 0, E_BLKmode) |
3493 | && GET_CODE (XEXP (mem, 0))((enum rtx_code) ((((mem)->u.fld[0]).rt_rtx))->code) == SCRATCH) |
3494 | return false; |
3495 | |
3496 | constraint |
3497 | = skip_constraint_modifiers (curr_static_id->operand[nop].constraint); |
3498 | if (IN_RANGE (constraint[0], '0', '9')((unsigned long) (constraint[0]) - (unsigned long) ('0') <= (unsigned long) ('9') - (unsigned long) ('0'))) |
3499 | { |
3500 | char *end; |
3501 | unsigned long dup = strtoul (constraint, &end, 10); |
3502 | constraint |
3503 | = skip_constraint_modifiers (curr_static_id->operand[dup].constraint); |
3504 | } |
3505 | cn = lookup_constraint (*constraint == '\0' ? "X" : constraint); |
3506 | /* If we have several alternatives or/and several constraints in an |
3507 | alternative and we can not say at this stage what constraint will be used, |
3508 | use unknown constraint. The exception is an address constraint. If |
3509 | operand has one address constraint, probably all others constraints are |
3510 | address ones. */ |
3511 | if (constraint[0] != '\0' && get_constraint_type (cn) != CT_ADDRESS |
3512 | && *skip_constraint_modifiers (constraint |
3513 | + CONSTRAINT_LEN (constraint[0],insn_constraint_len (constraint[0],constraint) |
3514 | constraint)insn_constraint_len (constraint[0],constraint)) != '\0') |
3515 | cn = CONSTRAINT__UNKNOWN; |
3516 | if (insn_extra_address_constraint (cn) |
3517 | /* When we find an asm operand with an address constraint that |
3518 | doesn't satisfy address_operand to begin with, we clear |
3519 | is_address, so that we don't try to make a non-address fit. |
3520 | If the asm statement got this far, it's because other |
3521 | constraints are available, and we'll use them, disregarding |
3522 | the unsatisfiable address ones. */ |
3523 | && curr_static_id->operand[nop].is_address) |
3524 | decompose_lea_address (&ad, curr_id->operand_loc[nop]); |
3525 | /* Do not attempt to decompose arbitrary addresses generated by combine |
3526 | for asm operands with loose constraints, e.g 'X'. |
3527 | Need to extract memory from op for special memory constraint, |
3528 | i.e. bcst_mem_operand in i386 backend. */ |
3529 | else if (MEM_P (mem)(((enum rtx_code) (mem)->code) == MEM) |
3530 | && !(INSN_CODE (curr_insn)(((curr_insn)->u.fld[5]).rt_int) < 0 |
3531 | && get_constraint_type (cn) == CT_FIXED_FORM |
3532 | && constraint_satisfied_p (op, cn))) |
3533 | decompose_mem_address (&ad, mem); |
3534 | else if (GET_CODE (op)((enum rtx_code) (op)->code) == SUBREG |
3535 | && MEM_P (SUBREG_REG (op))(((enum rtx_code) ((((op)->u.fld[0]).rt_rtx))->code) == MEM)) |
3536 | decompose_mem_address (&ad, SUBREG_REG (op)(((op)->u.fld[0]).rt_rtx)); |
3537 | else |
3538 | return false; |
3539 | /* If INDEX_REG_CLASS is assigned to base_term already and isn't to |
3540 | index_term, swap them so to avoid assigning INDEX_REG_CLASS to both |
3541 | when INDEX_REG_CLASS is a single register class. */ |
3542 | if (ad.base_term != NULLnullptr |
3543 | && ad.index_term != NULLnullptr |
3544 | && ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[INDEX_REG_CLASSINDEX_REGS] == 1 |
3545 | && REG_P (*ad.base_term)(((enum rtx_code) (*ad.base_term)->code) == REG) |
3546 | && REG_P (*ad.index_term)(((enum rtx_code) (*ad.index_term)->code) == REG) |
3547 | && in_class_p (*ad.base_term, INDEX_REG_CLASSINDEX_REGS, NULLnullptr) |
3548 | && ! in_class_p (*ad.index_term, INDEX_REG_CLASSINDEX_REGS, NULLnullptr)) |
3549 | { |
3550 | std::swap (ad.base, ad.index); |
3551 | std::swap (ad.base_term, ad.index_term); |
3552 | } |
3553 | if (! check_only_p) |
3554 | change_p = equiv_address_substitution (&ad); |
3555 | if (ad.base_term != NULLnullptr |
3556 | && (process_addr_reg |
3557 | (ad.base_term, check_only_p, before, |
3558 | (ad.autoinc_p |
3559 | && !(REG_P (*ad.base_term)(((enum rtx_code) (*ad.base_term)->code) == REG) |
3560 | && find_regno_note (curr_insn, REG_DEAD, |
3561 | REGNO (*ad.base_term)(rhs_regno(*ad.base_term))) != NULL_RTX(rtx) 0) |
3562 | ? after : NULLnullptr), |
3563 | base_reg_class (ad.mode, ad.as, ad.base_outer_code, |
3564 | get_index_code (&ad))))) |
3565 | { |
3566 | change_p = true; |
3567 | if (ad.base_term2 != NULLnullptr) |
3568 | *ad.base_term2 = *ad.base_term; |
3569 | } |
3570 | if (ad.index_term != NULLnullptr |
3571 | && process_addr_reg (ad.index_term, check_only_p, |
3572 | before, NULLnullptr, INDEX_REG_CLASSINDEX_REGS)) |
3573 | change_p = true; |
3574 | |
3575 | /* Target hooks sometimes don't treat extra-constraint addresses as |
3576 | legitimate address_operands, so handle them specially. */ |
3577 | if (insn_extra_address_constraint (cn) |
3578 | && satisfies_address_constraint_p (&ad, cn)) |
3579 | return change_p; |
3580 | |
3581 | if (check_only_p) |
3582 | return change_p; |
3583 | |
3584 | /* There are three cases where the shape of *AD.INNER may now be invalid: |
3585 | |
3586 | 1) the original address was valid, but either elimination or |
3587 | equiv_address_substitution was applied and that made |
3588 | the address invalid. |
3589 | |
3590 | 2) the address is an invalid symbolic address created by |
3591 | force_const_to_mem. |
3592 | |
3593 | 3) the address is a frame address with an invalid offset. |
3594 | |
3595 | 4) the address is a frame address with an invalid base. |
3596 | |
3597 | All these cases involve a non-autoinc address, so there is no |
3598 | point revalidating other types. */ |
3599 | if (ad.autoinc_p || valid_address_p (op, &ad, cn)) |
3600 | return change_p; |
3601 | |
3602 | /* Any index existed before LRA started, so we can assume that the |
3603 | presence and shape of the index is valid. */ |
3604 | push_to_sequence (*before); |
3605 | lra_assert (ad.disp == ad.disp_term)((void)(!(ad.disp == ad.disp_term) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3605, __FUNCTION__), 0 : 0)); |
3606 | if (ad.base == NULLnullptr) |
3607 | { |
3608 | if (ad.index == NULLnullptr) |
3609 | { |
3610 | rtx_insn *insn; |
3611 | rtx_insn *last = get_last_insn (); |
3612 | int code = -1; |
3613 | enum reg_class cl = base_reg_class (ad.mode, ad.as, |
3614 | SCRATCH, SCRATCH); |
3615 | rtx addr = *ad.inner; |
3616 | |
3617 | new_reg = lra_create_new_reg (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), NULL_RTX(rtx) 0, cl, NULLnullptr, "addr"); |
3618 | if (HAVE_lo_sum0) |
3619 | { |
3620 | /* addr => lo_sum (new_base, addr), case (2) above. */ |
3621 | insn = emit_insn (gen_rtx_SETgen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((new_reg )), ((gen_rtx_fmt_e_stat ((HIGH), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((copy_rtx (addr))) ))) ) |
3622 | (new_reg,gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((new_reg )), ((gen_rtx_fmt_e_stat ((HIGH), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((copy_rtx (addr))) ))) ) |
3623 | gen_rtx_HIGH (Pmode, copy_rtx (addr)))gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((new_reg )), ((gen_rtx_fmt_e_stat ((HIGH), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((copy_rtx (addr))) ))) )); |
3624 | code = recog_memoized (insn); |
3625 | if (code >= 0) |
3626 | { |
3627 | *ad.inner = gen_rtx_LO_SUM (Pmode, new_reg, addr)gen_rtx_fmt_ee_stat ((LO_SUM), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((new_reg)), ((addr)) ); |
3628 | if (!valid_address_p (op, &ad, cn)) |
3629 | { |
3630 | /* Try to put lo_sum into register. */ |
3631 | insn = emit_insn (gen_rtx_SETgen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((new_reg )), ((gen_rtx_fmt_ee_stat ((LO_SUM), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((new_reg)), ((addr)) ))) ) |
3632 | (new_reg,gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((new_reg )), ((gen_rtx_fmt_ee_stat ((LO_SUM), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((new_reg)), ((addr)) ))) ) |
3633 | gen_rtx_LO_SUM (Pmode, new_reg, addr))gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((new_reg )), ((gen_rtx_fmt_ee_stat ((LO_SUM), (((global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ((scalar_int_mode::from_int) E_DImode )) : (scalar_int_mode ((scalar_int_mode::from_int) E_SImode)) ))), ((new_reg)), ((addr)) ))) )); |
3634 | code = recog_memoized (insn); |
3635 | if (code >= 0) |
3636 | { |
3637 | *ad.inner = new_reg; |
3638 | if (!valid_address_p (op, &ad, cn)) |
3639 | { |
3640 | *ad.inner = addr; |
3641 | code = -1; |
3642 | } |
3643 | } |
3644 | |
3645 | } |
3646 | } |
3647 | if (code < 0) |
3648 | delete_insns_since (last); |
3649 | } |
3650 | |
3651 | if (code < 0) |
3652 | { |
3653 | /* addr => new_base, case (2) above. */ |
3654 | lra_emit_move (new_reg, addr); |
3655 | |
3656 | for (insn = last == NULL_RTX(rtx) 0 ? get_insns () : NEXT_INSN (last); |
3657 | insn != NULL_RTX(rtx) 0; |
3658 | insn = NEXT_INSN (insn)) |
3659 | if (recog_memoized (insn) < 0) |
3660 | break; |
3661 | if (insn != NULL_RTX(rtx) 0) |
3662 | { |
3663 | /* Do nothing if we cannot generate right insns. |
3664 | This is analogous to reload pass behavior. */ |
3665 | delete_insns_since (last); |
3666 | end_sequence (); |
3667 | return false; |
3668 | } |
3669 | *ad.inner = new_reg; |
3670 | } |
3671 | } |
3672 | else |
3673 | { |
3674 | /* index * scale + disp => new base + index * scale, |
3675 | case (1) above. */ |
3676 | enum reg_class cl = base_reg_class (ad.mode, ad.as, PLUS, |
3677 | GET_CODE (*ad.index)((enum rtx_code) (*ad.index)->code)); |
3678 | |
3679 | lra_assert (INDEX_REG_CLASS != NO_REGS)((void)(!(INDEX_REGS != NO_REGS) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3679, __FUNCTION__), 0 : 0)); |
3680 | new_reg = lra_create_new_reg (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), NULL_RTX(rtx) 0, cl, NULLnullptr, "disp"); |
3681 | lra_emit_move (new_reg, *ad.disp); |
3682 | *ad.inner = simplify_gen_binary (PLUS, GET_MODE (new_reg)((machine_mode) (new_reg)->mode), |
3683 | new_reg, *ad.index); |
3684 | } |
3685 | } |
3686 | else if (ad.index == NULLnullptr) |
3687 | { |
3688 | int regno; |
3689 | enum reg_class cl; |
3690 | rtx set; |
3691 | rtx_insn *insns, *last_insn; |
3692 | /* Try to reload base into register only if the base is invalid |
3693 | for the address but with valid offset, case (4) above. */ |
3694 | start_sequence (); |
3695 | new_reg = base_to_reg (&ad); |
3696 | |
3697 | /* base + disp => new base, cases (1) and (3) above. */ |
3698 | /* Another option would be to reload the displacement into an |
3699 | index register. However, postreload has code to optimize |
3700 | address reloads that have the same base and different |
3701 | displacements, so reloading into an index register would |
3702 | not necessarily be a win. */ |
3703 | if (new_reg == NULL_RTX(rtx) 0) |
3704 | { |
3705 | /* See if the target can split the displacement into a |
3706 | legitimate new displacement from a local anchor. */ |
3707 | gcc_assert (ad.disp == ad.disp_term)((void)(!(ad.disp == ad.disp_term) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3707, __FUNCTION__), 0 : 0)); |
3708 | poly_int64 orig_offset; |
3709 | rtx offset1, offset2; |
3710 | if (poly_int_rtx_p (*ad.disp, &orig_offset) |
3711 | && targetm.legitimize_address_displacement (&offset1, &offset2, |
3712 | orig_offset, |
3713 | ad.mode)) |
3714 | { |
3715 | new_reg = base_plus_disp_to_reg (&ad, offset1); |
3716 | new_reg = gen_rtx_PLUS (GET_MODE (new_reg), new_reg, offset2)gen_rtx_fmt_ee_stat ((PLUS), ((((machine_mode) (new_reg)-> mode))), ((new_reg)), ((offset2)) ); |
3717 | } |
3718 | else |
3719 | new_reg = base_plus_disp_to_reg (&ad, *ad.disp); |
3720 | } |
3721 | insns = get_insns (); |
3722 | last_insn = get_last_insn (); |
3723 | /* If we generated at least two insns, try last insn source as |
3724 | an address. If we succeed, we generate one less insn. */ |
3725 | if (REG_P (new_reg)(((enum rtx_code) (new_reg)->code) == REG) |
3726 | && last_insn != insns |
3727 | && (set = single_set (last_insn)) != NULL_RTX(rtx) 0 |
3728 | && GET_CODE (SET_SRC (set))((enum rtx_code) ((((set)->u.fld[1]).rt_rtx))->code) == PLUS |
3729 | && REG_P (XEXP (SET_SRC (set), 0))(((enum rtx_code) (((((((set)->u.fld[1]).rt_rtx))->u.fld [0]).rt_rtx))->code) == REG) |
3730 | && CONSTANT_P (XEXP (SET_SRC (set), 1))((rtx_class[(int) (((enum rtx_code) (((((((set)->u.fld[1]) .rt_rtx))->u.fld[1]).rt_rtx))->code))]) == RTX_CONST_OBJ )) |
3731 | { |
3732 | *ad.inner = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
3733 | if (valid_address_p (op, &ad, cn)) |
3734 | { |
3735 | *ad.base_term = XEXP (SET_SRC (set), 0)((((((set)->u.fld[1]).rt_rtx))->u.fld[0]).rt_rtx); |
3736 | *ad.disp_term = XEXP (SET_SRC (set), 1)((((((set)->u.fld[1]).rt_rtx))->u.fld[1]).rt_rtx); |
3737 | cl = base_reg_class (ad.mode, ad.as, ad.base_outer_code, |
3738 | get_index_code (&ad)); |
3739 | regno = REGNO (*ad.base_term)(rhs_regno(*ad.base_term)); |
3740 | if (regno >= FIRST_PSEUDO_REGISTER76 |
3741 | && cl != lra_get_allocno_class (regno)) |
3742 | lra_change_class (regno, cl, " Change to", true); |
3743 | new_reg = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
3744 | delete_insns_since (PREV_INSN (last_insn)); |
3745 | } |
3746 | } |
3747 | end_sequence (); |
3748 | emit_insn (insns); |
3749 | *ad.inner = new_reg; |
3750 | } |
3751 | else if (ad.disp_term != NULLnullptr) |
3752 | { |
3753 | /* base + scale * index + disp => new base + scale * index, |
3754 | case (1) above. */ |
3755 | gcc_assert (ad.disp == ad.disp_term)((void)(!(ad.disp == ad.disp_term) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3755, __FUNCTION__), 0 : 0)); |
3756 | new_reg = base_plus_disp_to_reg (&ad, *ad.disp); |
3757 | *ad.inner = simplify_gen_binary (PLUS, GET_MODE (new_reg)((machine_mode) (new_reg)->mode), |
3758 | new_reg, *ad.index); |
3759 | } |
3760 | else if ((scale = get_index_scale (&ad)) == 1) |
3761 | { |
3762 | /* The last transformation to one reg will be made in |
3763 | curr_insn_transform function. */ |
3764 | end_sequence (); |
3765 | return false; |
3766 | } |
3767 | else if (scale != 0) |
3768 | { |
3769 | /* base + scale * index => base + new_reg, |
3770 | case (1) above. |
3771 | Index part of address may become invalid. For example, we |
3772 | changed pseudo on the equivalent memory and a subreg of the |
3773 | pseudo onto the memory of different mode for which the scale is |
3774 | prohibitted. */ |
3775 | new_reg = index_part_to_reg (&ad); |
3776 | *ad.inner = simplify_gen_binary (PLUS, GET_MODE (new_reg)((machine_mode) (new_reg)->mode), |
3777 | *ad.base_term, new_reg); |
3778 | } |
3779 | else |
3780 | { |
3781 | enum reg_class cl = base_reg_class (ad.mode, ad.as, |
3782 | SCRATCH, SCRATCH); |
3783 | rtx addr = *ad.inner; |
3784 | |
3785 | new_reg = lra_create_new_reg (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), NULL_RTX(rtx) 0, cl, NULLnullptr, "addr"); |
3786 | /* addr => new_base. */ |
3787 | lra_emit_move (new_reg, addr); |
3788 | *ad.inner = new_reg; |
3789 | } |
3790 | *before = get_insns (); |
3791 | end_sequence (); |
3792 | return true; |
3793 | } |
3794 | |
3795 | /* If CHECK_ONLY_P is false, do address reloads until it is necessary. |
3796 | Use process_address_1 as a helper function. Return true for any |
3797 | RTL changes. |
3798 | |
3799 | If CHECK_ONLY_P is true, just check address correctness. Return |
3800 | false if the address correct. */ |
3801 | static bool |
3802 | process_address (int nop, bool check_only_p, |
3803 | rtx_insn **before, rtx_insn **after) |
3804 | { |
3805 | bool res = false; |
3806 | |
3807 | while (process_address_1 (nop, check_only_p, before, after)) |
3808 | { |
3809 | if (check_only_p) |
3810 | return true; |
3811 | res = true; |
3812 | } |
3813 | return res; |
3814 | } |
3815 | |
3816 | /* Emit insns to reload VALUE into a new register. VALUE is an |
3817 | auto-increment or auto-decrement RTX whose operand is a register or |
3818 | memory location; so reloading involves incrementing that location. |
3819 | IN is either identical to VALUE, or some cheaper place to reload |
3820 | value being incremented/decremented from. |
3821 | |
3822 | INC_AMOUNT is the number to increment or decrement by (always |
3823 | positive and ignored for POST_MODIFY/PRE_MODIFY). |
3824 | |
3825 | Return pseudo containing the result. */ |
3826 | static rtx |
3827 | emit_inc (enum reg_class new_rclass, rtx in, rtx value, poly_int64 inc_amount) |
3828 | { |
3829 | /* REG or MEM to be copied and incremented. */ |
3830 | rtx incloc = XEXP (value, 0)(((value)->u.fld[0]).rt_rtx); |
3831 | /* Nonzero if increment after copying. */ |
3832 | int post = (GET_CODE (value)((enum rtx_code) (value)->code) == POST_DEC || GET_CODE (value)((enum rtx_code) (value)->code) == POST_INC |
3833 | || GET_CODE (value)((enum rtx_code) (value)->code) == POST_MODIFY); |
3834 | rtx_insn *last; |
3835 | rtx inc; |
3836 | rtx_insn *add_insn; |
3837 | int code; |
3838 | rtx real_in = in == value ? incloc : in; |
3839 | rtx result; |
3840 | bool plus_p = true; |
3841 | |
3842 | if (GET_CODE (value)((enum rtx_code) (value)->code) == PRE_MODIFY || GET_CODE (value)((enum rtx_code) (value)->code) == POST_MODIFY) |
3843 | { |
3844 | lra_assert (GET_CODE (XEXP (value, 1)) == PLUS((void)(!(((enum rtx_code) ((((value)->u.fld[1]).rt_rtx))-> code) == PLUS || ((enum rtx_code) ((((value)->u.fld[1]).rt_rtx ))->code) == MINUS) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3845, __FUNCTION__), 0 : 0)) |
3845 | || GET_CODE (XEXP (value, 1)) == MINUS)((void)(!(((enum rtx_code) ((((value)->u.fld[1]).rt_rtx))-> code) == PLUS || ((enum rtx_code) ((((value)->u.fld[1]).rt_rtx ))->code) == MINUS) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3845, __FUNCTION__), 0 : 0)); |
3846 | lra_assert (rtx_equal_p (XEXP (XEXP (value, 1), 0), XEXP (value, 0)))((void)(!(rtx_equal_p (((((((value)->u.fld[1]).rt_rtx))-> u.fld[0]).rt_rtx), (((value)->u.fld[0]).rt_rtx))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3846, __FUNCTION__), 0 : 0)); |
3847 | plus_p = GET_CODE (XEXP (value, 1))((enum rtx_code) ((((value)->u.fld[1]).rt_rtx))->code) == PLUS; |
3848 | inc = XEXP (XEXP (value, 1), 1)((((((value)->u.fld[1]).rt_rtx))->u.fld[1]).rt_rtx); |
3849 | } |
3850 | else |
3851 | { |
3852 | if (GET_CODE (value)((enum rtx_code) (value)->code) == PRE_DEC || GET_CODE (value)((enum rtx_code) (value)->code) == POST_DEC) |
3853 | inc_amount = -inc_amount; |
3854 | |
3855 | inc = gen_int_mode (inc_amount, GET_MODE (value)((machine_mode) (value)->mode)); |
3856 | } |
3857 | |
3858 | if (! post && REG_P (incloc)(((enum rtx_code) (incloc)->code) == REG)) |
3859 | result = incloc; |
3860 | else |
3861 | result = lra_create_new_reg (GET_MODE (value)((machine_mode) (value)->mode), value, new_rclass, NULLnullptr, |
3862 | "INC/DEC result"); |
3863 | |
3864 | if (real_in != result) |
3865 | { |
3866 | /* First copy the location to the result register. */ |
3867 | lra_assert (REG_P (result))((void)(!((((enum rtx_code) (result)->code) == REG)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3867, __FUNCTION__), 0 : 0)); |
3868 | emit_insn (gen_move_insn (result, real_in)); |
3869 | } |
3870 | |
3871 | /* We suppose that there are insns to add/sub with the constant |
3872 | increment permitted in {PRE/POST)_{DEC/INC/MODIFY}. At least the |
3873 | old reload worked with this assumption. If the assumption |
3874 | becomes wrong, we should use approach in function |
3875 | base_plus_disp_to_reg. */ |
3876 | if (in == value) |
3877 | { |
3878 | /* See if we can directly increment INCLOC. */ |
3879 | last = get_last_insn (); |
3880 | add_insn = emit_insn (plus_p |
3881 | ? gen_add2_insn (incloc, inc) |
3882 | : gen_sub2_insn (incloc, inc)); |
3883 | |
3884 | code = recog_memoized (add_insn); |
3885 | if (code >= 0) |
3886 | { |
3887 | if (! post && result != incloc) |
3888 | emit_insn (gen_move_insn (result, incloc)); |
3889 | return result; |
3890 | } |
3891 | delete_insns_since (last); |
3892 | } |
3893 | |
3894 | /* If couldn't do the increment directly, must increment in RESULT. |
3895 | The way we do this depends on whether this is pre- or |
3896 | post-increment. For pre-increment, copy INCLOC to the reload |
3897 | register, increment it there, then save back. */ |
3898 | if (! post) |
3899 | { |
3900 | if (real_in != result) |
3901 | emit_insn (gen_move_insn (result, real_in)); |
3902 | if (plus_p) |
3903 | emit_insn (gen_add2_insn (result, inc)); |
3904 | else |
3905 | emit_insn (gen_sub2_insn (result, inc)); |
3906 | if (result != incloc) |
3907 | emit_insn (gen_move_insn (incloc, result)); |
3908 | } |
3909 | else |
3910 | { |
3911 | /* Post-increment. |
3912 | |
3913 | Because this might be a jump insn or a compare, and because |
3914 | RESULT may not be available after the insn in an input |
3915 | reload, we must do the incrementing before the insn being |
3916 | reloaded for. |
3917 | |
3918 | We have already copied IN to RESULT. Increment the copy in |
3919 | RESULT, save that back, then decrement RESULT so it has |
3920 | the original value. */ |
3921 | if (plus_p) |
3922 | emit_insn (gen_add2_insn (result, inc)); |
3923 | else |
3924 | emit_insn (gen_sub2_insn (result, inc)); |
3925 | emit_insn (gen_move_insn (incloc, result)); |
3926 | /* Restore non-modified value for the result. We prefer this |
3927 | way because it does not require an additional hard |
3928 | register. */ |
3929 | if (plus_p) |
3930 | { |
3931 | poly_int64 offset; |
3932 | if (poly_int_rtx_p (inc, &offset)) |
3933 | emit_insn (gen_add2_insn (result, |
3934 | gen_int_mode (-offset, |
3935 | GET_MODE (result)((machine_mode) (result)->mode)))); |
3936 | else |
3937 | emit_insn (gen_sub2_insn (result, inc)); |
3938 | } |
3939 | else |
3940 | emit_insn (gen_add2_insn (result, inc)); |
3941 | } |
3942 | return result; |
3943 | } |
3944 | |
3945 | /* Return true if the current move insn does not need processing as we |
3946 | already know that it satisfies its constraints. */ |
3947 | static bool |
3948 | simple_move_p (void) |
3949 | { |
3950 | rtx dest, src; |
3951 | enum reg_class dclass, sclass; |
3952 | |
3953 | lra_assert (curr_insn_set != NULL_RTX)((void)(!(curr_insn_set != (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 3953, __FUNCTION__), 0 : 0)); |
3954 | dest = SET_DEST (curr_insn_set)(((curr_insn_set)->u.fld[0]).rt_rtx); |
3955 | src = SET_SRC (curr_insn_set)(((curr_insn_set)->u.fld[1]).rt_rtx); |
3956 | |
3957 | /* If the instruction has multiple sets we need to process it even if it |
3958 | is single_set. This can happen if one or more of the SETs are dead. |
3959 | See PR73650. */ |
3960 | if (multiple_sets (curr_insn)) |
3961 | return false; |
3962 | |
3963 | return ((dclass = get_op_class (dest)) != NO_REGS |
3964 | && (sclass = get_op_class (src)) != NO_REGS |
3965 | /* The backend guarantees that register moves of cost 2 |
3966 | never need reloads. */ |
3967 | && targetm.register_move_cost (GET_MODE (src)((machine_mode) (src)->mode), sclass, dclass) == 2); |
3968 | } |
3969 | |
3970 | /* Swap operands NOP and NOP + 1. */ |
3971 | static inline void |
3972 | swap_operands (int nop) |
3973 | { |
3974 | std::swap (curr_operand_mode[nop], curr_operand_mode[nop + 1]); |
3975 | std::swap (original_subreg_reg_mode[nop], original_subreg_reg_mode[nop + 1]); |
3976 | std::swap (*curr_id->operand_loc[nop], *curr_id->operand_loc[nop + 1]); |
3977 | std::swap (equiv_substition_p[nop], equiv_substition_p[nop + 1]); |
3978 | /* Swap the duplicates too. */ |
3979 | lra_update_dup (curr_id, nop); |
3980 | lra_update_dup (curr_id, nop + 1); |
3981 | } |
3982 | |
3983 | /* Main entry point of the constraint code: search the body of the |
3984 | current insn to choose the best alternative. It is mimicking insn |
3985 | alternative cost calculation model of former reload pass. That is |
3986 | because machine descriptions were written to use this model. This |
3987 | model can be changed in future. Make commutative operand exchange |
3988 | if it is chosen. |
3989 | |
3990 | if CHECK_ONLY_P is false, do RTL changes to satisfy the |
3991 | constraints. Return true if any change happened during function |
3992 | call. |
3993 | |
3994 | If CHECK_ONLY_P is true then don't do any transformation. Just |
3995 | check that the insn satisfies all constraints. If the insn does |
3996 | not satisfy any constraint, return true. */ |
3997 | static bool |
3998 | curr_insn_transform (bool check_only_p) |
3999 | { |
4000 | int i, j, k; |
4001 | int n_operands; |
4002 | int n_alternatives; |
4003 | int n_outputs; |
4004 | int commutative; |
4005 | signed char goal_alt_matched[MAX_RECOG_OPERANDS30][MAX_RECOG_OPERANDS30]; |
4006 | signed char match_inputs[MAX_RECOG_OPERANDS30 + 1]; |
4007 | signed char outputs[MAX_RECOG_OPERANDS30 + 1]; |
4008 | rtx_insn *before, *after; |
4009 | bool alt_p = false; |
4010 | /* Flag that the insn has been changed through a transformation. */ |
4011 | bool change_p; |
4012 | bool sec_mem_p; |
4013 | bool use_sec_mem_p; |
4014 | int max_regno_before; |
4015 | int reused_alternative_num; |
4016 | |
4017 | curr_insn_set = single_set (curr_insn); |
4018 | if (curr_insn_set != NULL_RTX(rtx) 0 && simple_move_p ()) |
4019 | { |
4020 | /* We assume that the corresponding insn alternative has no |
4021 | earlier clobbers. If it is not the case, don't define move |
4022 | cost equal to 2 for the corresponding register classes. */ |
4023 | lra_set_used_insn_alternative (curr_insn, LRA_NON_CLOBBERED_ALT-2); |
4024 | return false; |
4025 | } |
4026 | |
4027 | no_input_reloads_p = no_output_reloads_p = false; |
4028 | goal_alt_number = -1; |
4029 | change_p = sec_mem_p = false; |
4030 | |
4031 | /* CALL_INSNs are not allowed to have any output reloads. */ |
4032 | if (CALL_P (curr_insn)(((enum rtx_code) (curr_insn)->code) == CALL_INSN)) |
4033 | no_output_reloads_p = true; |
4034 | |
4035 | n_operands = curr_static_id->n_operands; |
4036 | n_alternatives = curr_static_id->n_alternatives; |
4037 | |
4038 | /* Just return "no reloads" if insn has no operands with |
4039 | constraints. */ |
4040 | if (n_operands == 0 || n_alternatives == 0) |
4041 | return false; |
4042 | |
4043 | max_regno_before = max_reg_num (); |
4044 | |
4045 | for (i = 0; i < n_operands; i++) |
4046 | { |
4047 | goal_alt_matched[i][0] = -1; |
4048 | goal_alt_matches[i] = -1; |
4049 | } |
4050 | |
4051 | commutative = curr_static_id->commutative; |
4052 | |
4053 | /* Now see what we need for pseudos that didn't get hard regs or got |
4054 | the wrong kind of hard reg. For this, we must consider all the |
4055 | operands together against the register constraints. */ |
4056 | |
4057 | best_losers = best_overall = INT_MAX2147483647; |
4058 | best_reload_sum = 0; |
4059 | |
4060 | curr_swapped = false; |
4061 | goal_alt_swapped = false; |
4062 | |
4063 | if (! check_only_p) |
4064 | /* Make equivalence substitution and memory subreg elimination |
4065 | before address processing because an address legitimacy can |
4066 | depend on memory mode. */ |
4067 | for (i = 0; i < n_operands; i++) |
4068 | { |
4069 | rtx op, subst, old; |
4070 | bool op_change_p = false; |
4071 | |
4072 | if (curr_static_id->operand[i].is_operator) |
4073 | continue; |
4074 | |
4075 | old = op = *curr_id->operand_loc[i]; |
4076 | if (GET_CODE (old)((enum rtx_code) (old)->code) == SUBREG) |
4077 | old = SUBREG_REG (old)(((old)->u.fld[0]).rt_rtx); |
4078 | subst = get_equiv_with_elimination (old, curr_insn); |
4079 | original_subreg_reg_mode[i] = VOIDmode((void) 0, E_VOIDmode); |
4080 | equiv_substition_p[i] = false; |
4081 | if (subst != old) |
4082 | { |
4083 | equiv_substition_p[i] = true; |
4084 | subst = copy_rtx (subst); |
4085 | lra_assert (REG_P (old))((void)(!((((enum rtx_code) (old)->code) == REG)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4085, __FUNCTION__), 0 : 0)); |
4086 | if (GET_CODE (op)((enum rtx_code) (op)->code) != SUBREG) |
4087 | *curr_id->operand_loc[i] = subst; |
4088 | else |
4089 | { |
4090 | SUBREG_REG (op)(((op)->u.fld[0]).rt_rtx) = subst; |
4091 | if (GET_MODE (subst)((machine_mode) (subst)->mode) == VOIDmode((void) 0, E_VOIDmode)) |
4092 | original_subreg_reg_mode[i] = GET_MODE (old)((machine_mode) (old)->mode); |
4093 | } |
4094 | if (lra_dump_file != NULLnullptr) |
4095 | { |
4096 | fprintf (lra_dump_file, |
4097 | "Changing pseudo %d in operand %i of insn %u on equiv ", |
4098 | REGNO (old)(rhs_regno(old)), i, INSN_UID (curr_insn)); |
4099 | dump_value_slim (lra_dump_file, subst, 1); |
4100 | fprintf (lra_dump_file, "\n"); |
4101 | } |
4102 | op_change_p = change_p = true; |
4103 | } |
4104 | if (simplify_operand_subreg (i, GET_MODE (old)((machine_mode) (old)->mode)) || op_change_p) |
4105 | { |
4106 | change_p = true; |
4107 | lra_update_dup (curr_id, i); |
4108 | } |
4109 | } |
4110 | |
4111 | /* Reload address registers and displacements. We do it before |
4112 | finding an alternative because of memory constraints. */ |
4113 | before = after = NULLnullptr; |
4114 | for (i = 0; i < n_operands; i++) |
4115 | if (! curr_static_id->operand[i].is_operator |
4116 | && process_address (i, check_only_p, &before, &after)) |
4117 | { |
4118 | if (check_only_p) |
4119 | return true; |
4120 | change_p = true; |
4121 | lra_update_dup (curr_id, i); |
4122 | } |
4123 | |
4124 | if (change_p) |
4125 | /* If we've changed the instruction then any alternative that |
4126 | we chose previously may no longer be valid. */ |
4127 | lra_set_used_insn_alternative (curr_insn, LRA_UNKNOWN_ALT-1); |
4128 | |
4129 | if (! check_only_p && curr_insn_set != NULL_RTX(rtx) 0 |
4130 | && check_and_process_move (&change_p, &sec_mem_p)) |
4131 | return change_p; |
4132 | |
4133 | try_swapped: |
4134 | |
4135 | reused_alternative_num = check_only_p ? LRA_UNKNOWN_ALT-1 : curr_id->used_insn_alternative; |
4136 | if (lra_dump_file != NULLnullptr && reused_alternative_num >= 0) |
4137 | fprintf (lra_dump_file, "Reusing alternative %d for insn #%u\n", |
4138 | reused_alternative_num, INSN_UID (curr_insn)); |
4139 | |
4140 | if (process_alt_operands (reused_alternative_num)) |
4141 | alt_p = true; |
4142 | |
4143 | if (check_only_p) |
4144 | return ! alt_p || best_losers != 0; |
4145 | |
4146 | /* If insn is commutative (it's safe to exchange a certain pair of |
4147 | operands) then we need to try each alternative twice, the second |
4148 | time matching those two operands as if we had exchanged them. To |
4149 | do this, really exchange them in operands. |
4150 | |
4151 | If we have just tried the alternatives the second time, return |
4152 | operands to normal and drop through. */ |
4153 | |
4154 | if (reused_alternative_num < 0 && commutative >= 0) |
4155 | { |
4156 | curr_swapped = !curr_swapped; |
4157 | if (curr_swapped) |
4158 | { |
4159 | swap_operands (commutative); |
4160 | goto try_swapped; |
4161 | } |
4162 | else |
4163 | swap_operands (commutative); |
4164 | } |
4165 | |
4166 | if (! alt_p && ! sec_mem_p) |
4167 | { |
4168 | /* No alternative works with reloads?? */ |
4169 | if (INSN_CODE (curr_insn)(((curr_insn)->u.fld[5]).rt_int) >= 0) |
4170 | fatal_insn ("unable to generate reloads for:", curr_insn)_fatal_insn ("unable to generate reloads for:", curr_insn, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4170, __FUNCTION__); |
4171 | error_for_asm (curr_insn, |
4172 | "inconsistent operand constraints in an %<asm%>"); |
4173 | lra_asm_error_p = true; |
4174 | if (! JUMP_P (curr_insn)(((enum rtx_code) (curr_insn)->code) == JUMP_INSN)) |
4175 | { |
4176 | /* Avoid further trouble with this insn. Don't generate use |
4177 | pattern here as we could use the insn SP offset. */ |
4178 | lra_set_insn_deleted (curr_insn); |
4179 | } |
4180 | else |
4181 | { |
4182 | lra_invalidate_insn_data (curr_insn); |
4183 | ira_nullify_asm_goto (curr_insn); |
4184 | lra_update_insn_regno_info (curr_insn); |
4185 | } |
4186 | return true; |
4187 | } |
4188 | |
4189 | /* If the best alternative is with operands 1 and 2 swapped, swap |
4190 | them. Update the operand numbers of any reloads already |
4191 | pushed. */ |
4192 | |
4193 | if (goal_alt_swapped) |
4194 | { |
4195 | if (lra_dump_file != NULLnullptr) |
4196 | fprintf (lra_dump_file, " Commutative operand exchange in insn %u\n", |
4197 | INSN_UID (curr_insn)); |
4198 | |
4199 | /* Swap the duplicates too. */ |
4200 | swap_operands (commutative); |
4201 | change_p = true; |
4202 | } |
4203 | |
4204 | /* Some targets' TARGET_SECONDARY_MEMORY_NEEDED (e.g. x86) are defined |
4205 | too conservatively. So we use the secondary memory only if there |
4206 | is no any alternative without reloads. */ |
4207 | use_sec_mem_p = false; |
4208 | if (! alt_p) |
4209 | use_sec_mem_p = true; |
4210 | else if (sec_mem_p) |
4211 | { |
4212 | for (i = 0; i < n_operands; i++) |
4213 | if (! goal_alt_win[i] && ! goal_alt_match_win[i]) |
4214 | break; |
4215 | use_sec_mem_p = i < n_operands; |
4216 | } |
4217 | |
4218 | if (use_sec_mem_p) |
4219 | { |
4220 | int in = -1, out = -1; |
4221 | rtx new_reg, src, dest, rld; |
4222 | machine_mode sec_mode, rld_mode; |
4223 | |
4224 | lra_assert (curr_insn_set != NULL_RTX && sec_mem_p)((void)(!(curr_insn_set != (rtx) 0 && sec_mem_p) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4224, __FUNCTION__), 0 : 0)); |
4225 | dest = SET_DEST (curr_insn_set)(((curr_insn_set)->u.fld[0]).rt_rtx); |
4226 | src = SET_SRC (curr_insn_set)(((curr_insn_set)->u.fld[1]).rt_rtx); |
4227 | for (i = 0; i < n_operands; i++) |
4228 | if (*curr_id->operand_loc[i] == dest) |
4229 | out = i; |
4230 | else if (*curr_id->operand_loc[i] == src) |
4231 | in = i; |
4232 | for (i = 0; i < curr_static_id->n_dups; i++) |
4233 | if (out < 0 && *curr_id->dup_loc[i] == dest) |
4234 | out = curr_static_id->dup_num[i]; |
4235 | else if (in < 0 && *curr_id->dup_loc[i] == src) |
4236 | in = curr_static_id->dup_num[i]; |
4237 | lra_assert (out >= 0 && in >= 0((void)(!(out >= 0 && in >= 0 && curr_static_id ->operand[out].type == OP_OUT && curr_static_id-> operand[in].type == OP_IN) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4239, __FUNCTION__), 0 : 0)) |
4238 | && curr_static_id->operand[out].type == OP_OUT((void)(!(out >= 0 && in >= 0 && curr_static_id ->operand[out].type == OP_OUT && curr_static_id-> operand[in].type == OP_IN) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4239, __FUNCTION__), 0 : 0)) |
4239 | && curr_static_id->operand[in].type == OP_IN)((void)(!(out >= 0 && in >= 0 && curr_static_id ->operand[out].type == OP_OUT && curr_static_id-> operand[in].type == OP_IN) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4239, __FUNCTION__), 0 : 0)); |
4240 | rld = partial_subreg_p (GET_MODE (src)((machine_mode) (src)->mode), GET_MODE (dest)((machine_mode) (dest)->mode)) ? src : dest; |
4241 | rld_mode = GET_MODE (rld)((machine_mode) (rld)->mode); |
4242 | sec_mode = targetm.secondary_memory_needed_mode (rld_mode); |
4243 | new_reg = lra_create_new_reg (sec_mode, NULL_RTX(rtx) 0, NO_REGS, NULLnullptr, |
4244 | "secondary"); |
4245 | /* If the mode is changed, it should be wider. */ |
4246 | lra_assert (!partial_subreg_p (sec_mode, rld_mode))((void)(!(!partial_subreg_p (sec_mode, rld_mode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4246, __FUNCTION__), 0 : 0)); |
4247 | if (sec_mode != rld_mode) |
4248 | { |
4249 | /* If the target says specifically to use another mode for |
4250 | secondary memory moves we cannot reuse the original |
4251 | insn. */ |
4252 | after = emit_spill_move (false, new_reg, dest); |
4253 | lra_process_new_insns (curr_insn, NULLnullptr, after, |
4254 | "Inserting the sec. move"); |
4255 | /* We may have non null BEFORE here (e.g. after address |
4256 | processing. */ |
4257 | push_to_sequence (before); |
4258 | before = emit_spill_move (true, new_reg, src); |
4259 | emit_insn (before); |
4260 | before = get_insns (); |
4261 | end_sequence (); |
4262 | lra_process_new_insns (curr_insn, before, NULLnullptr, "Changing on"); |
4263 | lra_set_insn_deleted (curr_insn); |
4264 | } |
4265 | else if (dest == rld) |
4266 | { |
4267 | *curr_id->operand_loc[out] = new_reg; |
4268 | lra_update_dup (curr_id, out); |
4269 | after = emit_spill_move (false, new_reg, dest); |
4270 | lra_process_new_insns (curr_insn, NULLnullptr, after, |
4271 | "Inserting the sec. move"); |
4272 | } |
4273 | else |
4274 | { |
4275 | *curr_id->operand_loc[in] = new_reg; |
4276 | lra_update_dup (curr_id, in); |
4277 | /* See comments above. */ |
4278 | push_to_sequence (before); |
4279 | before = emit_spill_move (true, new_reg, src); |
4280 | emit_insn (before); |
4281 | before = get_insns (); |
4282 | end_sequence (); |
4283 | lra_process_new_insns (curr_insn, before, NULLnullptr, |
4284 | "Inserting the sec. move"); |
4285 | } |
4286 | lra_update_insn_regno_info (curr_insn); |
4287 | return true; |
4288 | } |
4289 | |
4290 | lra_assert (goal_alt_number >= 0)((void)(!(goal_alt_number >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4290, __FUNCTION__), 0 : 0)); |
4291 | lra_set_used_insn_alternative (curr_insn, goal_alt_number); |
4292 | |
4293 | if (lra_dump_file != NULLnullptr) |
4294 | { |
4295 | const char *p; |
4296 | |
4297 | fprintf (lra_dump_file, " Choosing alt %d in insn %u:", |
4298 | goal_alt_number, INSN_UID (curr_insn)); |
4299 | for (i = 0; i < n_operands; i++) |
4300 | { |
4301 | p = (curr_static_id->operand_alternative |
4302 | [goal_alt_number * n_operands + i].constraint); |
4303 | if (*p == '\0') |
4304 | continue; |
4305 | fprintf (lra_dump_file, " (%d) ", i); |
4306 | for (; *p != '\0' && *p != ',' && *p != '#'; p++) |
4307 | fputc (*p, lra_dump_file); |
4308 | } |
4309 | if (INSN_CODE (curr_insn)(((curr_insn)->u.fld[5]).rt_int) >= 0 |
4310 | && (p = get_insn_name (INSN_CODE (curr_insn)(((curr_insn)->u.fld[5]).rt_int))) != NULLnullptr) |
4311 | fprintf (lra_dump_file, " {%s}", p); |
4312 | if (maybe_ne (curr_id->sp_offset, 0)) |
4313 | { |
4314 | fprintf (lra_dump_file, " (sp_off="); |
4315 | print_dec (curr_id->sp_offset, lra_dump_file); |
4316 | fprintf (lra_dump_file, ")"); |
4317 | } |
4318 | fprintf (lra_dump_file, "\n"); |
4319 | } |
4320 | |
4321 | /* Right now, for any pair of operands I and J that are required to |
4322 | match, with J < I, goal_alt_matches[I] is J. Add I to |
4323 | goal_alt_matched[J]. */ |
4324 | |
4325 | for (i = 0; i < n_operands; i++) |
4326 | if ((j = goal_alt_matches[i]) >= 0) |
4327 | { |
4328 | for (k = 0; goal_alt_matched[j][k] >= 0; k++) |
4329 | ; |
4330 | /* We allow matching one output operand and several input |
4331 | operands. */ |
4332 | lra_assert (k == 0((void)(!(k == 0 || (curr_static_id->operand[j].type == OP_OUT && curr_static_id->operand[i].type == OP_IN && (curr_static_id->operand [goal_alt_matched[j][0]].type == OP_IN))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4336, __FUNCTION__), 0 : 0)) |
4333 | || (curr_static_id->operand[j].type == OP_OUT((void)(!(k == 0 || (curr_static_id->operand[j].type == OP_OUT && curr_static_id->operand[i].type == OP_IN && (curr_static_id->operand [goal_alt_matched[j][0]].type == OP_IN))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4336, __FUNCTION__), 0 : 0)) |
4334 | && curr_static_id->operand[i].type == OP_IN((void)(!(k == 0 || (curr_static_id->operand[j].type == OP_OUT && curr_static_id->operand[i].type == OP_IN && (curr_static_id->operand [goal_alt_matched[j][0]].type == OP_IN))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4336, __FUNCTION__), 0 : 0)) |
4335 | && (curr_static_id->operand((void)(!(k == 0 || (curr_static_id->operand[j].type == OP_OUT && curr_static_id->operand[i].type == OP_IN && (curr_static_id->operand [goal_alt_matched[j][0]].type == OP_IN))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4336, __FUNCTION__), 0 : 0)) |
4336 | [goal_alt_matched[j][0]].type == OP_IN)))((void)(!(k == 0 || (curr_static_id->operand[j].type == OP_OUT && curr_static_id->operand[i].type == OP_IN && (curr_static_id->operand [goal_alt_matched[j][0]].type == OP_IN))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4336, __FUNCTION__), 0 : 0)); |
4337 | goal_alt_matched[j][k] = i; |
4338 | goal_alt_matched[j][k + 1] = -1; |
4339 | } |
4340 | |
4341 | for (i = 0; i < n_operands; i++) |
4342 | goal_alt_win[i] |= goal_alt_match_win[i]; |
4343 | |
4344 | /* Any constants that aren't allowed and can't be reloaded into |
4345 | registers are here changed into memory references. */ |
4346 | for (i = 0; i < n_operands; i++) |
4347 | if (goal_alt_win[i]) |
4348 | { |
4349 | int regno; |
4350 | enum reg_class new_class; |
4351 | rtx reg = *curr_id->operand_loc[i]; |
4352 | |
4353 | if (GET_CODE (reg)((enum rtx_code) (reg)->code) == SUBREG) |
4354 | reg = SUBREG_REG (reg)(((reg)->u.fld[0]).rt_rtx); |
4355 | |
4356 | if (REG_P (reg)(((enum rtx_code) (reg)->code) == REG) && (regno = REGNO (reg)(rhs_regno(reg))) >= FIRST_PSEUDO_REGISTER76) |
4357 | { |
4358 | bool ok_p = in_class_p (reg, goal_alt[i], &new_class); |
4359 | |
4360 | if (new_class != NO_REGS && get_reg_class (regno) != new_class) |
4361 | { |
4362 | lra_assert (ok_p)((void)(!(ok_p) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4362, __FUNCTION__), 0 : 0)); |
4363 | lra_change_class (regno, new_class, " Change to", true); |
4364 | } |
4365 | } |
4366 | } |
4367 | else |
4368 | { |
4369 | const char *constraint; |
4370 | char c; |
4371 | rtx op = *curr_id->operand_loc[i]; |
4372 | rtx subreg = NULL_RTX(rtx) 0; |
4373 | machine_mode mode = curr_operand_mode[i]; |
4374 | |
4375 | if (GET_CODE (op)((enum rtx_code) (op)->code) == SUBREG) |
4376 | { |
4377 | subreg = op; |
4378 | op = SUBREG_REG (op)(((op)->u.fld[0]).rt_rtx); |
4379 | mode = GET_MODE (op)((machine_mode) (op)->mode); |
4380 | } |
4381 | |
4382 | if (CONST_POOL_OK_P (mode, op)((mode) != ((void) 0, E_VOIDmode) && ((rtx_class[(int ) (((enum rtx_code) (op)->code))]) == RTX_CONST_OBJ) && ((enum rtx_code) (op)->code) != HIGH && GET_MODE_SIZE (mode).is_constant () && !targetm.cannot_force_const_mem (mode, op)) |
4383 | && ((targetm.preferred_reload_class |
4384 | (op, (enum reg_class) goal_alt[i]) == NO_REGS) |
4385 | || no_input_reloads_p)) |
4386 | { |
4387 | rtx tem = force_const_mem (mode, op); |
4388 | |
4389 | change_p = true; |
4390 | if (subreg != NULL_RTX(rtx) 0) |
4391 | tem = gen_rtx_SUBREG (mode, tem, SUBREG_BYTE (subreg)(((subreg)->u.fld[1]).rt_subreg)); |
4392 | |
4393 | *curr_id->operand_loc[i] = tem; |
4394 | lra_update_dup (curr_id, i); |
4395 | process_address (i, false, &before, &after); |
4396 | |
4397 | /* If the alternative accepts constant pool refs directly |
4398 | there will be no reload needed at all. */ |
4399 | if (subreg != NULL_RTX(rtx) 0) |
4400 | continue; |
4401 | /* Skip alternatives before the one requested. */ |
4402 | constraint = (curr_static_id->operand_alternative |
4403 | [goal_alt_number * n_operands + i].constraint); |
4404 | for (; |
4405 | (c = *constraint) && c != ',' && c != '#'; |
4406 | constraint += CONSTRAINT_LEN (c, constraint)insn_constraint_len (c,constraint)) |
4407 | { |
4408 | enum constraint_num cn = lookup_constraint (constraint); |
4409 | if ((insn_extra_memory_constraint (cn) |
4410 | || insn_extra_special_memory_constraint (cn) |
4411 | || insn_extra_relaxed_memory_constraint (cn)) |
4412 | && satisfies_memory_constraint_p (tem, cn)) |
4413 | break; |
4414 | } |
4415 | if (c == '\0' || c == ',' || c == '#') |
4416 | continue; |
4417 | |
4418 | goal_alt_win[i] = true; |
4419 | } |
4420 | } |
4421 | |
4422 | n_outputs = 0; |
4423 | for (i = 0; i < n_operands; i++) |
4424 | if (curr_static_id->operand[i].type == OP_OUT) |
4425 | outputs[n_outputs++] = i; |
4426 | outputs[n_outputs] = -1; |
4427 | for (i = 0; i < n_operands; i++) |
4428 | { |
4429 | int regno; |
4430 | bool optional_p = false; |
4431 | rtx old, new_reg; |
4432 | rtx op = *curr_id->operand_loc[i]; |
4433 | |
4434 | if (goal_alt_win[i]) |
4435 | { |
4436 | if (goal_alt[i] == NO_REGS |
4437 | && REG_P (op)(((enum rtx_code) (op)->code) == REG) |
4438 | /* When we assign NO_REGS it means that we will not |
4439 | assign a hard register to the scratch pseudo by |
4440 | assigment pass and the scratch pseudo will be |
4441 | spilled. Spilled scratch pseudos are transformed |
4442 | back to scratches at the LRA end. */ |
4443 | && ira_former_scratch_operand_p (curr_insn, i) |
4444 | && ira_former_scratch_p (REGNO (op)(rhs_regno(op)))) |
4445 | { |
4446 | int regno = REGNO (op)(rhs_regno(op)); |
4447 | lra_change_class (regno, NO_REGS, " Change to", true); |
4448 | if (lra_get_regno_hard_regno (regno) >= 0) |
4449 | /* We don't have to mark all insn affected by the |
4450 | spilled pseudo as there is only one such insn, the |
4451 | current one. */ |
4452 | reg_renumber[regno] = -1; |
4453 | lra_assert (bitmap_single_bit_set_p((void)(!(bitmap_single_bit_set_p (&lra_reg_info[(rhs_regno (op))].insn_bitmap)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4454, __FUNCTION__), 0 : 0)) |
4454 | (&lra_reg_info[REGNO (op)].insn_bitmap))((void)(!(bitmap_single_bit_set_p (&lra_reg_info[(rhs_regno (op))].insn_bitmap)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4454, __FUNCTION__), 0 : 0)); |
4455 | } |
4456 | /* We can do an optional reload. If the pseudo got a hard |
4457 | reg, we might improve the code through inheritance. If |
4458 | it does not get a hard register we coalesce memory/memory |
4459 | moves later. Ignore move insns to avoid cycling. */ |
4460 | if (! lra_simple_p |
4461 | && lra_undo_inheritance_iter < LRA_MAX_INHERITANCE_PASSES2 |
4462 | && goal_alt[i] != NO_REGS && REG_P (op)(((enum rtx_code) (op)->code) == REG) |
4463 | && (regno = REGNO (op)(rhs_regno(op))) >= FIRST_PSEUDO_REGISTER76 |
4464 | && regno < new_regno_start |
4465 | && ! ira_former_scratch_p (regno) |
4466 | && reg_renumber[regno] < 0 |
4467 | /* Check that the optional reload pseudo will be able to |
4468 | hold given mode value. */ |
4469 | && ! (prohibited_class_reg_set_mode_p |
4470 | (goal_alt[i], reg_class_contents(this_target_hard_regs->x_reg_class_contents)[goal_alt[i]], |
4471 | PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode))) |
4472 | && (curr_insn_set == NULL_RTX(rtx) 0 |
4473 | || !((REG_P (SET_SRC (curr_insn_set))(((enum rtx_code) ((((curr_insn_set)->u.fld[1]).rt_rtx))-> code) == REG) |
4474 | || MEM_P (SET_SRC (curr_insn_set))(((enum rtx_code) ((((curr_insn_set)->u.fld[1]).rt_rtx))-> code) == MEM) |
4475 | || GET_CODE (SET_SRC (curr_insn_set))((enum rtx_code) ((((curr_insn_set)->u.fld[1]).rt_rtx))-> code) == SUBREG) |
4476 | && (REG_P (SET_DEST (curr_insn_set))(((enum rtx_code) ((((curr_insn_set)->u.fld[0]).rt_rtx))-> code) == REG) |
4477 | || MEM_P (SET_DEST (curr_insn_set))(((enum rtx_code) ((((curr_insn_set)->u.fld[0]).rt_rtx))-> code) == MEM) |
4478 | || GET_CODE (SET_DEST (curr_insn_set))((enum rtx_code) ((((curr_insn_set)->u.fld[0]).rt_rtx))-> code) == SUBREG)))) |
4479 | optional_p = true; |
4480 | else if (goal_alt_matched[i][0] != -1 |
4481 | && curr_static_id->operand[i].type == OP_OUT |
4482 | && (curr_static_id->operand_alternative |
4483 | [goal_alt_number * n_operands + i].earlyclobber) |
4484 | && REG_P (op)(((enum rtx_code) (op)->code) == REG)) |
4485 | { |
4486 | for (j = 0; goal_alt_matched[i][j] != -1; j++) |
4487 | { |
4488 | rtx op2 = *curr_id->operand_loc[goal_alt_matched[i][j]]; |
4489 | |
4490 | if (REG_P (op2)(((enum rtx_code) (op2)->code) == REG) && REGNO (op)(rhs_regno(op)) != REGNO (op2)(rhs_regno(op2))) |
4491 | break; |
4492 | } |
4493 | if (goal_alt_matched[i][j] != -1) |
4494 | { |
4495 | /* Generate reloads for different output and matched |
4496 | input registers. This is the easiest way to avoid |
4497 | creation of non-existing register conflicts in |
4498 | lra-lives.cc. */ |
4499 | match_reload (i, goal_alt_matched[i], outputs, goal_alt[i], |
4500 | &goal_alt_exclude_start_hard_regs[i], &before, |
4501 | &after, TRUEtrue); |
4502 | } |
4503 | continue; |
4504 | } |
4505 | else |
4506 | continue; |
4507 | } |
4508 | |
4509 | /* Operands that match previous ones have already been handled. */ |
4510 | if (goal_alt_matches[i] >= 0) |
4511 | continue; |
4512 | |
4513 | /* We should not have an operand with a non-offsettable address |
4514 | appearing where an offsettable address will do. It also may |
4515 | be a case when the address should be special in other words |
4516 | not a general one (e.g. it needs no index reg). */ |
4517 | if (goal_alt_matched[i][0] == -1 && goal_alt_offmemok[i] && MEM_P (op)(((enum rtx_code) (op)->code) == MEM)) |
4518 | { |
4519 | enum reg_class rclass; |
4520 | rtx *loc = &XEXP (op, 0)(((op)->u.fld[0]).rt_rtx); |
4521 | enum rtx_code code = GET_CODE (*loc)((enum rtx_code) (*loc)->code); |
4522 | |
4523 | push_to_sequence (before); |
4524 | rclass = base_reg_class (GET_MODE (op)((machine_mode) (op)->mode), MEM_ADDR_SPACE (op)(get_mem_attrs (op)->addrspace), |
4525 | MEM, SCRATCH); |
4526 | if (GET_RTX_CLASS (code)(rtx_class[(int) (code)]) == RTX_AUTOINC) |
4527 | new_reg = emit_inc (rclass, *loc, *loc, |
4528 | /* This value does not matter for MODIFY. */ |
4529 | GET_MODE_SIZE (GET_MODE (op)((machine_mode) (op)->mode))); |
4530 | else if (get_reload_reg (OP_IN, Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode))), *loc, rclass, |
4531 | NULLnullptr, FALSEfalse, |
4532 | "offsetable address", &new_reg)) |
4533 | { |
4534 | rtx addr = *loc; |
4535 | enum rtx_code code = GET_CODE (addr)((enum rtx_code) (addr)->code); |
4536 | bool align_p = false; |
4537 | |
4538 | if (code == AND && CONST_INT_P (XEXP (addr, 1))(((enum rtx_code) ((((addr)->u.fld[1]).rt_rtx))->code) == CONST_INT)) |
4539 | { |
4540 | /* (and ... (const_int -X)) is used to align to X bytes. */ |
4541 | align_p = true; |
4542 | addr = XEXP (*loc, 0)(((*loc)->u.fld[0]).rt_rtx); |
4543 | } |
4544 | else |
4545 | addr = canonicalize_reload_addr (addr); |
4546 | |
4547 | lra_emit_move (new_reg, addr); |
4548 | if (align_p) |
4549 | emit_move_insn (new_reg, gen_rtx_AND (GET_MODE (new_reg), new_reg, XEXP (*loc, 1))gen_rtx_fmt_ee_stat ((AND), ((((machine_mode) (new_reg)->mode ))), ((new_reg)), (((((*loc)->u.fld[1]).rt_rtx))) )); |
4550 | } |
4551 | before = get_insns (); |
4552 | end_sequence (); |
4553 | *loc = new_reg; |
4554 | lra_update_dup (curr_id, i); |
4555 | } |
4556 | else if (goal_alt_matched[i][0] == -1) |
4557 | { |
4558 | machine_mode mode; |
4559 | rtx reg, *loc; |
4560 | int hard_regno; |
4561 | enum op_type type = curr_static_id->operand[i].type; |
4562 | |
4563 | loc = curr_id->operand_loc[i]; |
4564 | mode = curr_operand_mode[i]; |
4565 | if (GET_CODE (*loc)((enum rtx_code) (*loc)->code) == SUBREG) |
4566 | { |
4567 | reg = SUBREG_REG (*loc)(((*loc)->u.fld[0]).rt_rtx); |
4568 | poly_int64 byte = SUBREG_BYTE (*loc)(((*loc)->u.fld[1]).rt_subreg); |
4569 | if (REG_P (reg)(((enum rtx_code) (reg)->code) == REG) |
4570 | /* Strict_low_part requires reloading the register and not |
4571 | just the subreg. Likewise for a strict subreg no wider |
4572 | than a word for WORD_REGISTER_OPERATIONS targets. */ |
4573 | && (curr_static_id->operand[i].strict_low |
4574 | || (!paradoxical_subreg_p (mode, GET_MODE (reg)((machine_mode) (reg)->mode)) |
4575 | && (hard_regno |
4576 | = get_try_hard_regno (REGNO (reg)(rhs_regno(reg)))) >= 0 |
4577 | && (simplify_subreg_regno |
4578 | (hard_regno, |
4579 | GET_MODE (reg)((machine_mode) (reg)->mode), byte, mode) < 0) |
4580 | && (goal_alt[i] == NO_REGS |
4581 | || (simplify_subreg_regno |
4582 | (ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[goal_alt[i]][0], |
4583 | GET_MODE (reg)((machine_mode) (reg)->mode), byte, mode) >= 0))) |
4584 | || (partial_subreg_p (mode, GET_MODE (reg)((machine_mode) (reg)->mode)) |
4585 | && known_le (GET_MODE_SIZE (GET_MODE (reg)),(!maybe_lt ((((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4), GET_MODE_SIZE (((machine_mode) (reg)-> mode)))) |
4586 | UNITS_PER_WORD)(!maybe_lt ((((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 8 : 4), GET_MODE_SIZE (((machine_mode) (reg)-> mode)))) |
4587 | && WORD_REGISTER_OPERATIONS0)) |
4588 | /* Avoid the situation when there are no available hard regs |
4589 | for the pseudo mode but there are ones for the subreg |
4590 | mode: */ |
4591 | && !(goal_alt[i] != NO_REGS |
4592 | && REGNO (reg)(rhs_regno(reg)) >= FIRST_PSEUDO_REGISTER76 |
4593 | && (prohibited_class_reg_set_mode_p |
4594 | (goal_alt[i], reg_class_contents(this_target_hard_regs->x_reg_class_contents)[goal_alt[i]], |
4595 | GET_MODE (reg)((machine_mode) (reg)->mode))) |
4596 | && !(prohibited_class_reg_set_mode_p |
4597 | (goal_alt[i], reg_class_contents(this_target_hard_regs->x_reg_class_contents)[goal_alt[i]], |
4598 | mode)))) |
4599 | { |
4600 | /* An OP_INOUT is required when reloading a subreg of a |
4601 | mode wider than a word to ensure that data beyond the |
4602 | word being reloaded is preserved. Also automatically |
4603 | ensure that strict_low_part reloads are made into |
4604 | OP_INOUT which should already be true from the backend |
4605 | constraints. */ |
4606 | if (type == OP_OUT |
4607 | && (curr_static_id->operand[i].strict_low |
4608 | || read_modify_subreg_p (*loc))) |
4609 | type = OP_INOUT; |
4610 | loc = &SUBREG_REG (*loc)(((*loc)->u.fld[0]).rt_rtx); |
4611 | mode = GET_MODE (*loc)((machine_mode) (*loc)->mode); |
4612 | } |
4613 | } |
4614 | old = *loc; |
4615 | if (get_reload_reg (type, mode, old, goal_alt[i], |
4616 | &goal_alt_exclude_start_hard_regs[i], |
4617 | loc != curr_id->operand_loc[i], "", &new_reg) |
4618 | && type != OP_OUT) |
4619 | { |
4620 | push_to_sequence (before); |
4621 | lra_emit_move (new_reg, old); |
4622 | before = get_insns (); |
4623 | end_sequence (); |
4624 | } |
4625 | *loc = new_reg; |
4626 | if (type != OP_IN |
4627 | && find_reg_note (curr_insn, REG_UNUSED, old) == NULL_RTX(rtx) 0) |
4628 | { |
4629 | start_sequence (); |
4630 | lra_emit_move (type == OP_INOUT ? copy_rtx (old) : old, new_reg); |
4631 | emit_insn (after); |
4632 | after = get_insns (); |
4633 | end_sequence (); |
4634 | *loc = new_reg; |
4635 | } |
4636 | for (j = 0; j < goal_alt_dont_inherit_ops_num; j++) |
4637 | if (goal_alt_dont_inherit_ops[j] == i) |
4638 | { |
4639 | lra_set_regno_unique_value (REGNO (new_reg)(rhs_regno(new_reg))); |
4640 | break; |
4641 | } |
4642 | lra_update_dup (curr_id, i); |
4643 | } |
4644 | else if (curr_static_id->operand[i].type == OP_IN |
4645 | && (curr_static_id->operand[goal_alt_matched[i][0]].type |
4646 | == OP_OUT |
4647 | || (curr_static_id->operand[goal_alt_matched[i][0]].type |
4648 | == OP_INOUT |
4649 | && (operands_match_p |
4650 | (*curr_id->operand_loc[i], |
4651 | *curr_id->operand_loc[goal_alt_matched[i][0]], |
4652 | -1))))) |
4653 | { |
4654 | /* generate reloads for input and matched outputs. */ |
4655 | match_inputs[0] = i; |
4656 | match_inputs[1] = -1; |
4657 | match_reload (goal_alt_matched[i][0], match_inputs, outputs, |
4658 | goal_alt[i], &goal_alt_exclude_start_hard_regs[i], |
4659 | &before, &after, |
4660 | curr_static_id->operand_alternative |
4661 | [goal_alt_number * n_operands + goal_alt_matched[i][0]] |
4662 | .earlyclobber); |
4663 | } |
4664 | else if ((curr_static_id->operand[i].type == OP_OUT |
4665 | || (curr_static_id->operand[i].type == OP_INOUT |
4666 | && (operands_match_p |
4667 | (*curr_id->operand_loc[i], |
4668 | *curr_id->operand_loc[goal_alt_matched[i][0]], |
4669 | -1)))) |
4670 | && (curr_static_id->operand[goal_alt_matched[i][0]].type |
4671 | == OP_IN)) |
4672 | /* Generate reloads for output and matched inputs. */ |
4673 | match_reload (i, goal_alt_matched[i], outputs, goal_alt[i], |
4674 | &goal_alt_exclude_start_hard_regs[i], &before, &after, |
4675 | curr_static_id->operand_alternative |
4676 | [goal_alt_number * n_operands + i].earlyclobber); |
4677 | else if (curr_static_id->operand[i].type == OP_IN |
4678 | && (curr_static_id->operand[goal_alt_matched[i][0]].type |
4679 | == OP_IN)) |
4680 | { |
4681 | /* Generate reloads for matched inputs. */ |
4682 | match_inputs[0] = i; |
4683 | for (j = 0; (k = goal_alt_matched[i][j]) >= 0; j++) |
4684 | match_inputs[j + 1] = k; |
4685 | match_inputs[j + 1] = -1; |
4686 | match_reload (-1, match_inputs, outputs, goal_alt[i], |
4687 | &goal_alt_exclude_start_hard_regs[i], |
4688 | &before, &after, false); |
4689 | } |
4690 | else |
4691 | /* We must generate code in any case when function |
4692 | process_alt_operands decides that it is possible. */ |
4693 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4693, __FUNCTION__)); |
4694 | |
4695 | if (optional_p) |
4696 | { |
4697 | rtx reg = op; |
4698 | |
4699 | lra_assert (REG_P (reg))((void)(!((((enum rtx_code) (reg)->code) == REG)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4699, __FUNCTION__), 0 : 0)); |
4700 | regno = REGNO (reg)(rhs_regno(reg)); |
4701 | op = *curr_id->operand_loc[i]; /* Substitution. */ |
4702 | if (GET_CODE (op)((enum rtx_code) (op)->code) == SUBREG) |
4703 | op = SUBREG_REG (op)(((op)->u.fld[0]).rt_rtx); |
4704 | gcc_assert (REG_P (op) && (int) REGNO (op) >= new_regno_start)((void)(!((((enum rtx_code) (op)->code) == REG) && (int) (rhs_regno(op)) >= new_regno_start) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4704, __FUNCTION__), 0 : 0)); |
4705 | bitmap_set_bit (&lra_optional_reload_pseudos, REGNO (op)(rhs_regno(op))); |
4706 | lra_reg_info[REGNO (op)(rhs_regno(op))].restore_rtx = reg; |
4707 | if (lra_dump_file != NULLnullptr) |
4708 | fprintf (lra_dump_file, |
4709 | " Making reload reg %d for reg %d optional\n", |
4710 | REGNO (op)(rhs_regno(op)), regno); |
4711 | } |
4712 | } |
4713 | if (before != NULL_RTX(rtx) 0 || after != NULL_RTX(rtx) 0 |
4714 | || max_regno_before != max_reg_num ()) |
4715 | change_p = true; |
4716 | if (change_p) |
4717 | { |
4718 | lra_update_operator_dups (curr_id); |
4719 | /* Something changes -- process the insn. */ |
4720 | lra_update_insn_regno_info (curr_insn); |
4721 | } |
4722 | lra_process_new_insns (curr_insn, before, after, "Inserting insn reload"); |
4723 | return change_p; |
4724 | } |
4725 | |
4726 | /* Return true if INSN satisfies all constraints. In other words, no |
4727 | reload insns are needed. */ |
4728 | bool |
4729 | lra_constrain_insn (rtx_insn *insn) |
4730 | { |
4731 | int saved_new_regno_start = new_regno_start; |
4732 | int saved_new_insn_uid_start = new_insn_uid_start; |
4733 | bool change_p; |
4734 | |
4735 | curr_insn = insn; |
4736 | curr_id = lra_get_insn_recog_data (curr_insn); |
4737 | curr_static_id = curr_id->insn_static_data; |
4738 | new_insn_uid_start = get_max_uid (); |
4739 | new_regno_start = max_reg_num (); |
4740 | change_p = curr_insn_transform (true); |
4741 | new_regno_start = saved_new_regno_start; |
4742 | new_insn_uid_start = saved_new_insn_uid_start; |
4743 | return ! change_p; |
4744 | } |
4745 | |
4746 | /* Return true if X is in LIST. */ |
4747 | static bool |
4748 | in_list_p (rtx x, rtx list) |
4749 | { |
4750 | for (; list != NULL_RTX(rtx) 0; list = XEXP (list, 1)(((list)->u.fld[1]).rt_rtx)) |
4751 | if (XEXP (list, 0)(((list)->u.fld[0]).rt_rtx) == x) |
4752 | return true; |
4753 | return false; |
4754 | } |
4755 | |
4756 | /* Return true if X contains an allocatable hard register (if |
4757 | HARD_REG_P) or a (spilled if SPILLED_P) pseudo. */ |
4758 | static bool |
4759 | contains_reg_p (rtx x, bool hard_reg_p, bool spilled_p) |
4760 | { |
4761 | int i, j; |
4762 | const char *fmt; |
4763 | enum rtx_code code; |
4764 | |
4765 | code = GET_CODE (x)((enum rtx_code) (x)->code); |
4766 | if (REG_P (x)(((enum rtx_code) (x)->code) == REG)) |
4767 | { |
4768 | int regno = REGNO (x)(rhs_regno(x)); |
4769 | HARD_REG_SET alloc_regs; |
4770 | |
4771 | if (hard_reg_p) |
4772 | { |
4773 | if (regno >= FIRST_PSEUDO_REGISTER76) |
4774 | regno = lra_get_regno_hard_regno (regno); |
4775 | if (regno < 0) |
4776 | return false; |
4777 | alloc_regs = ~lra_no_alloc_regs; |
4778 | return overlaps_hard_reg_set_p (alloc_regs, GET_MODE (x)((machine_mode) (x)->mode), regno); |
4779 | } |
4780 | else |
4781 | { |
4782 | if (regno < FIRST_PSEUDO_REGISTER76) |
4783 | return false; |
4784 | if (! spilled_p) |
4785 | return true; |
4786 | return lra_get_regno_hard_regno (regno) < 0; |
4787 | } |
4788 | } |
4789 | fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
4790 | for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--) |
4791 | { |
4792 | if (fmt[i] == 'e') |
4793 | { |
4794 | if (contains_reg_p (XEXP (x, i)(((x)->u.fld[i]).rt_rtx), hard_reg_p, spilled_p)) |
4795 | return true; |
4796 | } |
4797 | else if (fmt[i] == 'E') |
4798 | { |
4799 | for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--) |
4800 | if (contains_reg_p (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), hard_reg_p, spilled_p)) |
4801 | return true; |
4802 | } |
4803 | } |
4804 | return false; |
4805 | } |
4806 | |
4807 | /* Process all regs in location *LOC and change them on equivalent |
4808 | substitution. Return true if any change was done. */ |
4809 | static bool |
4810 | loc_equivalence_change_p (rtx *loc) |
4811 | { |
4812 | rtx subst, reg, x = *loc; |
4813 | bool result = false; |
4814 | enum rtx_code code = GET_CODE (x)((enum rtx_code) (x)->code); |
4815 | const char *fmt; |
4816 | int i, j; |
4817 | |
4818 | if (code == SUBREG) |
4819 | { |
4820 | reg = SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx); |
4821 | if ((subst = get_equiv_with_elimination (reg, curr_insn)) != reg |
4822 | && GET_MODE (subst)((machine_mode) (subst)->mode) == VOIDmode((void) 0, E_VOIDmode)) |
4823 | { |
4824 | /* We cannot reload debug location. Simplify subreg here |
4825 | while we know the inner mode. */ |
4826 | *loc = simplify_gen_subreg (GET_MODE (x)((machine_mode) (x)->mode), subst, |
4827 | GET_MODE (reg)((machine_mode) (reg)->mode), SUBREG_BYTE (x)(((x)->u.fld[1]).rt_subreg)); |
4828 | return true; |
4829 | } |
4830 | } |
4831 | if (code == REG && (subst = get_equiv_with_elimination (x, curr_insn)) != x) |
4832 | { |
4833 | *loc = subst; |
4834 | return true; |
4835 | } |
4836 | |
4837 | /* Scan all the operand sub-expressions. */ |
4838 | fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
4839 | for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--) |
4840 | { |
4841 | if (fmt[i] == 'e') |
4842 | result = loc_equivalence_change_p (&XEXP (x, i)(((x)->u.fld[i]).rt_rtx)) || result; |
4843 | else if (fmt[i] == 'E') |
4844 | for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--) |
4845 | result |
4846 | = loc_equivalence_change_p (&XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j])) || result; |
4847 | } |
4848 | return result; |
4849 | } |
4850 | |
4851 | /* Similar to loc_equivalence_change_p, but for use as |
4852 | simplify_replace_fn_rtx callback. DATA is insn for which the |
4853 | elimination is done. If it null we don't do the elimination. */ |
4854 | static rtx |
4855 | loc_equivalence_callback (rtx loc, const_rtx, void *data) |
4856 | { |
4857 | if (!REG_P (loc)(((enum rtx_code) (loc)->code) == REG)) |
4858 | return NULL_RTX(rtx) 0; |
4859 | |
4860 | rtx subst = (data == NULLnullptr |
4861 | ? get_equiv (loc) : get_equiv_with_elimination (loc, (rtx_insn *) data)); |
4862 | if (subst != loc) |
4863 | return subst; |
4864 | |
4865 | return NULL_RTX(rtx) 0; |
4866 | } |
4867 | |
4868 | /* Maximum number of generated reload insns per an insn. It is for |
4869 | preventing this pass cycling in a bug case. */ |
4870 | #define MAX_RELOAD_INSNS_NUMBER(30 * 3) LRA_MAX_INSN_RELOADS(30 * 3) |
4871 | |
4872 | /* The current iteration number of this LRA pass. */ |
4873 | int lra_constraint_iter; |
4874 | |
4875 | /* True if we should during assignment sub-pass check assignment |
4876 | correctness for all pseudos and spill some of them to correct |
4877 | conflicts. It can be necessary when we substitute equiv which |
4878 | needs checking register allocation correctness because the |
4879 | equivalent value contains allocatable hard registers, or when we |
4880 | restore multi-register pseudo, or when we change the insn code and |
4881 | its operand became INOUT operand when it was IN one before. */ |
4882 | bool check_and_force_assignment_correctness_p; |
4883 | |
4884 | /* Return true if REGNO is referenced in more than one block. */ |
4885 | static bool |
4886 | multi_block_pseudo_p (int regno) |
4887 | { |
4888 | basic_block bb = NULLnullptr; |
4889 | unsigned int uid; |
4890 | bitmap_iterator bi; |
4891 | |
4892 | if (regno < FIRST_PSEUDO_REGISTER76) |
4893 | return false; |
4894 | |
4895 | EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi)for (bmp_iter_set_init (&(bi), (&lra_reg_info[regno]. insn_bitmap), (0), &(uid)); bmp_iter_set (&(bi), & (uid)); bmp_iter_next (&(bi), &(uid))) |
4896 | if (bb == NULLnullptr) |
4897 | bb = BLOCK_FOR_INSN (lra_insn_recog_data[uid]->insn); |
4898 | else if (BLOCK_FOR_INSN (lra_insn_recog_data[uid]->insn) != bb) |
4899 | return true; |
4900 | return false; |
4901 | } |
4902 | |
4903 | /* Return true if LIST contains a deleted insn. */ |
4904 | static bool |
4905 | contains_deleted_insn_p (rtx_insn_list *list) |
4906 | { |
4907 | for (; list != NULL_RTX(rtx) 0; list = list->next ()) |
4908 | if (NOTE_P (list->insn ())(((enum rtx_code) (list->insn ())->code) == NOTE) |
4909 | && NOTE_KIND (list->insn ())(((list->insn ())->u.fld[4]).rt_int) == NOTE_INSN_DELETED) |
4910 | return true; |
4911 | return false; |
4912 | } |
4913 | |
4914 | /* Return true if X contains a pseudo dying in INSN. */ |
4915 | static bool |
4916 | dead_pseudo_p (rtx x, rtx_insn *insn) |
4917 | { |
4918 | int i, j; |
4919 | const char *fmt; |
4920 | enum rtx_code code; |
4921 | |
4922 | if (REG_P (x)(((enum rtx_code) (x)->code) == REG)) |
4923 | return (insn != NULL_RTX(rtx) 0 |
4924 | && find_regno_note (insn, REG_DEAD, REGNO (x)(rhs_regno(x))) != NULL_RTX(rtx) 0); |
4925 | code = GET_CODE (x)((enum rtx_code) (x)->code); |
4926 | fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
4927 | for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--) |
4928 | { |
4929 | if (fmt[i] == 'e') |
4930 | { |
4931 | if (dead_pseudo_p (XEXP (x, i)(((x)->u.fld[i]).rt_rtx), insn)) |
4932 | return true; |
4933 | } |
4934 | else if (fmt[i] == 'E') |
4935 | { |
4936 | for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--) |
4937 | if (dead_pseudo_p (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), insn)) |
4938 | return true; |
4939 | } |
4940 | } |
4941 | return false; |
4942 | } |
4943 | |
4944 | /* Return true if INSN contains a dying pseudo in INSN right hand |
4945 | side. */ |
4946 | static bool |
4947 | insn_rhs_dead_pseudo_p (rtx_insn *insn) |
4948 | { |
4949 | rtx set = single_set (insn); |
4950 | |
4951 | gcc_assert (set != NULL)((void)(!(set != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 4951, __FUNCTION__), 0 : 0)); |
4952 | return dead_pseudo_p (SET_SRC (set)(((set)->u.fld[1]).rt_rtx), insn); |
4953 | } |
4954 | |
4955 | /* Return true if any init insn of REGNO contains a dying pseudo in |
4956 | insn right hand side. */ |
4957 | static bool |
4958 | init_insn_rhs_dead_pseudo_p (int regno) |
4959 | { |
4960 | rtx_insn_list *insns = ira_reg_equiv[regno].init_insns; |
4961 | |
4962 | if (insns == NULLnullptr) |
4963 | return false; |
4964 | for (; insns != NULL_RTX(rtx) 0; insns = insns->next ()) |
4965 | if (insn_rhs_dead_pseudo_p (insns->insn ())) |
4966 | return true; |
4967 | return false; |
4968 | } |
4969 | |
4970 | /* Return TRUE if REGNO has a reverse equivalence. The equivalence is |
4971 | reverse only if we have one init insn with given REGNO as a |
4972 | source. */ |
4973 | static bool |
4974 | reverse_equiv_p (int regno) |
4975 | { |
4976 | rtx_insn_list *insns = ira_reg_equiv[regno].init_insns; |
4977 | rtx set; |
4978 | |
4979 | if (insns == NULLnullptr) |
4980 | return false; |
4981 | if (! INSN_P (insns->insn ())(((((enum rtx_code) (insns->insn ())->code) == INSN) || (((enum rtx_code) (insns->insn ())->code) == JUMP_INSN ) || (((enum rtx_code) (insns->insn ())->code) == CALL_INSN )) || (((enum rtx_code) (insns->insn ())->code) == DEBUG_INSN )) |
4982 | || insns->next () != NULLnullptr) |
4983 | return false; |
4984 | if ((set = single_set (insns->insn ())) == NULL_RTX(rtx) 0) |
4985 | return false; |
4986 | return REG_P (SET_SRC (set))(((enum rtx_code) ((((set)->u.fld[1]).rt_rtx))->code) == REG) && (int) REGNO (SET_SRC (set))(rhs_regno((((set)->u.fld[1]).rt_rtx))) == regno; |
4987 | } |
4988 | |
4989 | /* Return TRUE if REGNO was reloaded in an equivalence init insn. We |
4990 | call this function only for non-reverse equivalence. */ |
4991 | static bool |
4992 | contains_reloaded_insn_p (int regno) |
4993 | { |
4994 | rtx set; |
4995 | rtx_insn_list *list = ira_reg_equiv[regno].init_insns; |
4996 | |
4997 | for (; list != NULLnullptr; list = list->next ()) |
4998 | if ((set = single_set (list->insn ())) == NULL_RTX(rtx) 0 |
4999 | || ! REG_P (SET_DEST (set))(((enum rtx_code) ((((set)->u.fld[0]).rt_rtx))->code) == REG) |
5000 | || (int) REGNO (SET_DEST (set))(rhs_regno((((set)->u.fld[0]).rt_rtx))) != regno) |
5001 | return true; |
5002 | return false; |
5003 | } |
5004 | |
5005 | /* Try combine secondary memory reload insn FROM for insn TO into TO insn. |
5006 | FROM should be a load insn (usually a secondary memory reload insn). Return |
5007 | TRUE in case of success. */ |
5008 | static bool |
5009 | combine_reload_insn (rtx_insn *from, rtx_insn *to) |
5010 | { |
5011 | bool ok_p; |
5012 | rtx_insn *saved_insn; |
5013 | rtx set, from_reg, to_reg, op; |
5014 | enum reg_class to_class, from_class; |
5015 | int n, nop; |
5016 | signed char changed_nops[MAX_RECOG_OPERANDS30 + 1]; |
5017 | |
5018 | /* Check conditions for second memory reload and original insn: */ |
5019 | if ((targetm.secondary_memory_needed |
5020 | == hook_bool_mode_reg_class_t_reg_class_t_false) |
5021 | || NEXT_INSN (from) != to |
5022 | || !NONDEBUG_INSN_P (to)((((enum rtx_code) (to)->code) == INSN) || (((enum rtx_code ) (to)->code) == JUMP_INSN) || (((enum rtx_code) (to)-> code) == CALL_INSN)) |
5023 | || CALL_P (to)(((enum rtx_code) (to)->code) == CALL_INSN)) |
5024 | return false; |
5025 | |
5026 | lra_insn_recog_data_t id = lra_get_insn_recog_data (to); |
5027 | struct lra_static_insn_data *static_id = id->insn_static_data; |
5028 | |
5029 | if (id->used_insn_alternative == LRA_UNKNOWN_ALT-1 |
5030 | || (set = single_set (from)) == NULL_RTX(rtx) 0) |
5031 | return false; |
5032 | from_reg = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); |
5033 | to_reg = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
5034 | /* Ignore optional reloads: */ |
5035 | if (! REG_P (from_reg)(((enum rtx_code) (from_reg)->code) == REG) || ! REG_P (to_reg)(((enum rtx_code) (to_reg)->code) == REG) |
5036 | || bitmap_bit_p (&lra_optional_reload_pseudos, REGNO (from_reg)(rhs_regno(from_reg)))) |
5037 | return false; |
5038 | to_class = lra_get_allocno_class (REGNO (to_reg)(rhs_regno(to_reg))); |
5039 | from_class = lra_get_allocno_class (REGNO (from_reg)(rhs_regno(from_reg))); |
5040 | /* Check that reload insn is a load: */ |
5041 | if (to_class != NO_REGS || from_class == NO_REGS) |
5042 | return false; |
5043 | for (n = nop = 0; nop < static_id->n_operands; nop++) |
5044 | { |
5045 | if (static_id->operand[nop].type != OP_IN) |
5046 | continue; |
5047 | op = *id->operand_loc[nop]; |
5048 | if (!REG_P (op)(((enum rtx_code) (op)->code) == REG) || REGNO (op)(rhs_regno(op)) != REGNO (from_reg)(rhs_regno(from_reg))) |
5049 | continue; |
5050 | *id->operand_loc[nop] = to_reg; |
5051 | changed_nops[n++] = nop; |
5052 | } |
5053 | changed_nops[n] = -1; |
5054 | lra_update_dups (id, changed_nops); |
5055 | lra_update_insn_regno_info (to); |
5056 | ok_p = recog_memoized (to) >= 0; |
5057 | if (ok_p) |
5058 | { |
5059 | /* Check that combined insn does not need any reloads: */ |
5060 | saved_insn = curr_insn; |
5061 | curr_insn = to; |
5062 | curr_id = lra_get_insn_recog_data (curr_insn); |
5063 | curr_static_id = curr_id->insn_static_data; |
5064 | ok_p = !curr_insn_transform (true); |
5065 | curr_insn = saved_insn; |
5066 | curr_id = lra_get_insn_recog_data (curr_insn); |
5067 | curr_static_id = curr_id->insn_static_data; |
5068 | } |
5069 | if (ok_p) |
5070 | { |
5071 | id->used_insn_alternative = -1; |
5072 | lra_push_insn_and_update_insn_regno_info (to); |
5073 | if (lra_dump_file != NULLnullptr) |
5074 | { |
5075 | fprintf (lra_dump_file, " Use combined insn:\n"); |
5076 | dump_insn_slim (lra_dump_file, to); |
5077 | } |
5078 | return true; |
5079 | } |
5080 | if (lra_dump_file != NULLnullptr) |
5081 | { |
5082 | fprintf (lra_dump_file, " Failed combined insn:\n"); |
5083 | dump_insn_slim (lra_dump_file, to); |
5084 | } |
5085 | for (int i = 0; i < n; i++) |
5086 | { |
5087 | nop = changed_nops[i]; |
5088 | *id->operand_loc[nop] = from_reg; |
5089 | } |
5090 | lra_update_dups (id, changed_nops); |
5091 | lra_update_insn_regno_info (to); |
5092 | if (lra_dump_file != NULLnullptr) |
5093 | { |
5094 | fprintf (lra_dump_file, " Restoring insn after failed combining:\n"); |
5095 | dump_insn_slim (lra_dump_file, to); |
5096 | } |
5097 | return false; |
5098 | } |
5099 | |
5100 | /* Entry function of LRA constraint pass. Return true if the |
5101 | constraint pass did change the code. */ |
5102 | bool |
5103 | lra_constraints (bool first_p) |
5104 | { |
5105 | bool changed_p; |
5106 | int i, hard_regno, new_insns_num; |
5107 | unsigned int min_len, new_min_len, uid; |
5108 | rtx set, x, reg, dest_reg; |
5109 | rtx_insn *original_insn; |
5110 | basic_block last_bb; |
5111 | bitmap_iterator bi; |
5112 | |
5113 | lra_constraint_iter++; |
5114 | if (lra_dump_file != NULLnullptr) |
5115 | fprintf (lra_dump_file, "\n********** Local #%d: **********\n\n", |
5116 | lra_constraint_iter); |
5117 | changed_p = false; |
Value stored to 'changed_p' is never read | |
5118 | if (pic_offset_table_rtx(this_target_rtl->x_pic_offset_table_rtx) |
5119 | && REGNO (pic_offset_table_rtx)(rhs_regno((this_target_rtl->x_pic_offset_table_rtx))) >= FIRST_PSEUDO_REGISTER76) |
5120 | check_and_force_assignment_correctness_p = true; |
5121 | else if (first_p) |
5122 | /* On the first iteration we should check IRA assignment |
5123 | correctness. In rare cases, the assignments can be wrong as |
5124 | early clobbers operands are ignored in IRA or usages of |
5125 | paradoxical sub-registers are not taken into account by |
5126 | IRA. */ |
5127 | check_and_force_assignment_correctness_p = true; |
5128 | new_insn_uid_start = get_max_uid (); |
5129 | new_regno_start = first_p ? lra_constraint_new_regno_start : max_reg_num (); |
5130 | /* Mark used hard regs for target stack size calulations. */ |
5131 | for (i = FIRST_PSEUDO_REGISTER76; i < new_regno_start; i++) |
5132 | if (lra_reg_info[i].nrefs != 0 |
5133 | && (hard_regno = lra_get_regno_hard_regno (i)) >= 0) |
5134 | { |
5135 | int j, nregs; |
5136 | |
5137 | nregs = hard_regno_nregs (hard_regno, lra_reg_info[i].biggest_mode); |
5138 | for (j = 0; j < nregs; j++) |
5139 | df_set_regs_ever_live (hard_regno + j, true); |
5140 | } |
5141 | /* Do elimination before the equivalence processing as we can spill |
5142 | some pseudos during elimination. */ |
5143 | lra_eliminate (false, first_p); |
5144 | auto_bitmap equiv_insn_bitmap (®_obstack); |
5145 | for (i = FIRST_PSEUDO_REGISTER76; i < new_regno_start; i++) |
5146 | if (lra_reg_info[i].nrefs != 0) |
5147 | { |
5148 | ira_reg_equiv[i].profitable_p = true; |
5149 | reg = regno_reg_rtx[i]; |
5150 | if (lra_get_regno_hard_regno (i) < 0 && (x = get_equiv (reg)) != reg) |
5151 | { |
5152 | bool pseudo_p = contains_reg_p (x, false, false); |
5153 | |
5154 | /* After RTL transformation, we cannot guarantee that |
5155 | pseudo in the substitution was not reloaded which might |
5156 | make equivalence invalid. For example, in reverse |
5157 | equiv of p0 |
5158 | |
5159 | p0 <- ... |
5160 | ... |
5161 | equiv_mem <- p0 |
5162 | |
5163 | the memory address register was reloaded before the 2nd |
5164 | insn. */ |
5165 | if ((! first_p && pseudo_p) |
5166 | /* We don't use DF for compilation speed sake. So it |
5167 | is problematic to update live info when we use an |
5168 | equivalence containing pseudos in more than one |
5169 | BB. */ |
5170 | || (pseudo_p && multi_block_pseudo_p (i)) |
5171 | /* If an init insn was deleted for some reason, cancel |
5172 | the equiv. We could update the equiv insns after |
5173 | transformations including an equiv insn deletion |
5174 | but it is not worthy as such cases are extremely |
5175 | rare. */ |
5176 | || contains_deleted_insn_p (ira_reg_equiv[i].init_insns) |
5177 | /* If it is not a reverse equivalence, we check that a |
5178 | pseudo in rhs of the init insn is not dying in the |
5179 | insn. Otherwise, the live info at the beginning of |
5180 | the corresponding BB might be wrong after we |
5181 | removed the insn. When the equiv can be a |
5182 | constant, the right hand side of the init insn can |
5183 | be a pseudo. */ |
5184 | || (! reverse_equiv_p (i) |
5185 | && (init_insn_rhs_dead_pseudo_p (i) |
5186 | /* If we reloaded the pseudo in an equivalence |
5187 | init insn, we cannot remove the equiv init |
5188 | insns and the init insns might write into |
5189 | const memory in this case. */ |
5190 | || contains_reloaded_insn_p (i))) |
5191 | /* Prevent access beyond equivalent memory for |
5192 | paradoxical subregs. */ |
5193 | || (MEM_P (x)(((enum rtx_code) (x)->code) == MEM) |
5194 | && maybe_gt (GET_MODE_SIZE (lra_reg_info[i].biggest_mode),maybe_lt (GET_MODE_SIZE (((machine_mode) (x)->mode)), GET_MODE_SIZE (lra_reg_info[i].biggest_mode)) |
5195 | GET_MODE_SIZE (GET_MODE (x)))maybe_lt (GET_MODE_SIZE (((machine_mode) (x)->mode)), GET_MODE_SIZE (lra_reg_info[i].biggest_mode))) |
5196 | || (pic_offset_table_rtx(this_target_rtl->x_pic_offset_table_rtx) |
5197 | && ((CONST_POOL_OK_P (PSEUDO_REGNO_MODE (i), x)((((machine_mode) (regno_reg_rtx[i])->mode)) != ((void) 0, E_VOIDmode) && ((rtx_class[(int) (((enum rtx_code) ( x)->code))]) == RTX_CONST_OBJ) && ((enum rtx_code) (x)->code) != HIGH && GET_MODE_SIZE (((machine_mode ) (regno_reg_rtx[i])->mode)).is_constant () && !targetm .cannot_force_const_mem (((machine_mode) (regno_reg_rtx[i])-> mode), x)) |
5198 | && (targetm.preferred_reload_class |
5199 | (x, lra_get_allocno_class (i)) == NO_REGS)) |
5200 | || contains_symbol_ref_p (x)))) |
5201 | ira_reg_equiv[i].defined_p |
5202 | = ira_reg_equiv[i].caller_save_p = false; |
5203 | if (contains_reg_p (x, false, true)) |
5204 | ira_reg_equiv[i].profitable_p = false; |
5205 | if (get_equiv (reg) != reg) |
5206 | bitmap_ior_into (equiv_insn_bitmap, &lra_reg_info[i].insn_bitmap); |
5207 | } |
5208 | } |
5209 | for (i = FIRST_PSEUDO_REGISTER76; i < new_regno_start; i++) |
5210 | update_equiv (i); |
5211 | /* We should add all insns containing pseudos which should be |
5212 | substituted by their equivalences. */ |
5213 | EXECUTE_IF_SET_IN_BITMAP (equiv_insn_bitmap, 0, uid, bi)for (bmp_iter_set_init (&(bi), (equiv_insn_bitmap), (0), & (uid)); bmp_iter_set (&(bi), &(uid)); bmp_iter_next ( &(bi), &(uid))) |
5214 | lra_push_insn_by_uid (uid); |
5215 | min_len = lra_insn_stack_length (); |
5216 | new_insns_num = 0; |
5217 | last_bb = NULLnullptr; |
5218 | changed_p = false; |
5219 | original_insn = NULLnullptr; |
5220 | while ((new_min_len = lra_insn_stack_length ()) != 0) |
5221 | { |
5222 | curr_insn = lra_pop_insn (); |
5223 | --new_min_len; |
5224 | curr_bb = BLOCK_FOR_INSN (curr_insn); |
5225 | if (curr_bb != last_bb) |
5226 | { |
5227 | last_bb = curr_bb; |
5228 | bb_reload_num = lra_curr_reload_num; |
5229 | } |
5230 | if (min_len > new_min_len) |
5231 | { |
5232 | min_len = new_min_len; |
5233 | new_insns_num = 0; |
5234 | original_insn = curr_insn; |
5235 | } |
5236 | else if (combine_reload_insn (curr_insn, original_insn)) |
5237 | { |
5238 | continue; |
5239 | } |
5240 | if (new_insns_num > MAX_RELOAD_INSNS_NUMBER(30 * 3)) |
5241 | internal_error |
5242 | ("maximum number of generated reload insns per insn achieved (%d)", |
5243 | MAX_RELOAD_INSNS_NUMBER(30 * 3)); |
5244 | new_insns_num++; |
5245 | if (DEBUG_INSN_P (curr_insn)(((enum rtx_code) (curr_insn)->code) == DEBUG_INSN)) |
5246 | { |
5247 | /* We need to check equivalence in debug insn and change |
5248 | pseudo to the equivalent value if necessary. */ |
5249 | curr_id = lra_get_insn_recog_data (curr_insn); |
5250 | if (bitmap_bit_p (equiv_insn_bitmap, INSN_UID (curr_insn))) |
5251 | { |
5252 | rtx old = *curr_id->operand_loc[0]; |
5253 | *curr_id->operand_loc[0] |
5254 | = simplify_replace_fn_rtx (old, NULL_RTX(rtx) 0, |
5255 | loc_equivalence_callback, curr_insn); |
5256 | if (old != *curr_id->operand_loc[0]) |
5257 | { |
5258 | lra_update_insn_regno_info (curr_insn); |
5259 | changed_p = true; |
5260 | } |
5261 | } |
5262 | } |
5263 | else if (INSN_P (curr_insn)(((((enum rtx_code) (curr_insn)->code) == INSN) || (((enum rtx_code) (curr_insn)->code) == JUMP_INSN) || (((enum rtx_code ) (curr_insn)->code) == CALL_INSN)) || (((enum rtx_code) ( curr_insn)->code) == DEBUG_INSN))) |
5264 | { |
5265 | if ((set = single_set (curr_insn)) != NULL_RTX(rtx) 0) |
5266 | { |
5267 | dest_reg = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); |
5268 | /* The equivalence pseudo could be set up as SUBREG in a |
5269 | case when it is a call restore insn in a mode |
5270 | different from the pseudo mode. */ |
5271 | if (GET_CODE (dest_reg)((enum rtx_code) (dest_reg)->code) == SUBREG) |
5272 | dest_reg = SUBREG_REG (dest_reg)(((dest_reg)->u.fld[0]).rt_rtx); |
5273 | if ((REG_P (dest_reg)(((enum rtx_code) (dest_reg)->code) == REG) |
5274 | && (x = get_equiv (dest_reg)) != dest_reg |
5275 | /* Remove insns which set up a pseudo whose value |
5276 | cannot be changed. Such insns might be not in |
5277 | init_insns because we don't update equiv data |
5278 | during insn transformations. |
5279 | |
5280 | As an example, let suppose that a pseudo got |
5281 | hard register and on the 1st pass was not |
5282 | changed to equivalent constant. We generate an |
5283 | additional insn setting up the pseudo because of |
5284 | secondary memory movement. Then the pseudo is |
5285 | spilled and we use the equiv constant. In this |
5286 | case we should remove the additional insn and |
5287 | this insn is not init_insns list. */ |
5288 | && (! MEM_P (x)(((enum rtx_code) (x)->code) == MEM) || MEM_READONLY_P (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != MEM) rtl_check_failed_flag ("MEM_READONLY_P" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 5288, __FUNCTION__); _rtx; })->unchanging) |
5289 | /* Check that this is actually an insn setting |
5290 | up the equivalence. */ |
5291 | || in_list_p (curr_insn, |
5292 | ira_reg_equiv |
5293 | [REGNO (dest_reg)(rhs_regno(dest_reg))].init_insns))) |
5294 | || (((x = get_equiv (SET_SRC (set)(((set)->u.fld[1]).rt_rtx))) != SET_SRC (set)(((set)->u.fld[1]).rt_rtx)) |
5295 | && in_list_p (curr_insn, |
5296 | ira_reg_equiv |
5297 | [REGNO (SET_SRC (set))(rhs_regno((((set)->u.fld[1]).rt_rtx)))].init_insns))) |
5298 | { |
5299 | /* This is equiv init insn of pseudo which did not get a |
5300 | hard register -- remove the insn. */ |
5301 | if (lra_dump_file != NULLnullptr) |
5302 | { |
5303 | fprintf (lra_dump_file, |
5304 | " Removing equiv init insn %i (freq=%d)\n", |
5305 | INSN_UID (curr_insn), |
5306 | REG_FREQ_FROM_BB (BLOCK_FOR_INSN (curr_insn))((optimize_function_for_size_p ((cfun + 0)) || !(cfun + 0)-> cfg->count_max.initialized_p ()) ? 1000 : ((BLOCK_FOR_INSN (curr_insn))->count.to_frequency ((cfun + 0)) * 1000 / 10000 ) ? ((BLOCK_FOR_INSN (curr_insn))->count.to_frequency ((cfun + 0)) * 1000 / 10000) : 1)); |
5307 | dump_insn_slim (lra_dump_file, curr_insn); |
5308 | } |
5309 | if (contains_reg_p (x, true, false)) |
5310 | check_and_force_assignment_correctness_p = true; |
5311 | lra_set_insn_deleted (curr_insn); |
5312 | continue; |
5313 | } |
5314 | } |
5315 | curr_id = lra_get_insn_recog_data (curr_insn); |
5316 | curr_static_id = curr_id->insn_static_data; |
5317 | init_curr_insn_input_reloads (); |
5318 | init_curr_operand_mode (); |
5319 | if (curr_insn_transform (false)) |
5320 | changed_p = true; |
5321 | /* Check non-transformed insns too for equiv change as USE |
5322 | or CLOBBER don't need reloads but can contain pseudos |
5323 | being changed on their equivalences. */ |
5324 | else if (bitmap_bit_p (equiv_insn_bitmap, INSN_UID (curr_insn)) |
5325 | && loc_equivalence_change_p (&PATTERN (curr_insn))) |
5326 | { |
5327 | lra_update_insn_regno_info (curr_insn); |
5328 | changed_p = true; |
5329 | } |
5330 | } |
5331 | } |
5332 | |
5333 | /* If we used a new hard regno, changed_p should be true because the |
5334 | hard reg is assigned to a new pseudo. */ |
5335 | if (flag_checkingglobal_options.x_flag_checking && !changed_p) |
5336 | { |
5337 | for (i = FIRST_PSEUDO_REGISTER76; i < new_regno_start; i++) |
5338 | if (lra_reg_info[i].nrefs != 0 |
5339 | && (hard_regno = lra_get_regno_hard_regno (i)) >= 0) |
5340 | { |
5341 | int j, nregs = hard_regno_nregs (hard_regno, |
5342 | PSEUDO_REGNO_MODE (i)((machine_mode) (regno_reg_rtx[i])->mode)); |
5343 | |
5344 | for (j = 0; j < nregs; j++) |
5345 | lra_assert (df_regs_ever_live_p (hard_regno + j))((void)(!(df_regs_ever_live_p (hard_regno + j)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 5345, __FUNCTION__), 0 : 0)); |
5346 | } |
5347 | } |
5348 | return changed_p; |
5349 | } |
5350 | |
5351 | static void initiate_invariants (void); |
5352 | static void finish_invariants (void); |
5353 | |
5354 | /* Initiate the LRA constraint pass. It is done once per |
5355 | function. */ |
5356 | void |
5357 | lra_constraints_init (void) |
5358 | { |
5359 | initiate_invariants (); |
5360 | } |
5361 | |
5362 | /* Finalize the LRA constraint pass. It is done once per |
5363 | function. */ |
5364 | void |
5365 | lra_constraints_finish (void) |
5366 | { |
5367 | finish_invariants (); |
5368 | } |
5369 | |
5370 | |
5371 | |
5372 | /* Structure describes invariants for ineheritance. */ |
5373 | struct lra_invariant |
5374 | { |
5375 | /* The order number of the invariant. */ |
5376 | int num; |
5377 | /* The invariant RTX. */ |
5378 | rtx invariant_rtx; |
5379 | /* The origin insn of the invariant. */ |
5380 | rtx_insn *insn; |
5381 | }; |
5382 | |
5383 | typedef lra_invariant invariant_t; |
5384 | typedef invariant_t *invariant_ptr_t; |
5385 | typedef const invariant_t *const_invariant_ptr_t; |
5386 | |
5387 | /* Pointer to the inheritance invariants. */ |
5388 | static vec<invariant_ptr_t> invariants; |
5389 | |
5390 | /* Allocation pool for the invariants. */ |
5391 | static object_allocator<lra_invariant> *invariants_pool; |
5392 | |
5393 | /* Hash table for the invariants. */ |
5394 | static htab_t invariant_table; |
5395 | |
5396 | /* Hash function for INVARIANT. */ |
5397 | static hashval_t |
5398 | invariant_hash (const void *invariant) |
5399 | { |
5400 | rtx inv = ((const_invariant_ptr_t) invariant)->invariant_rtx; |
5401 | return lra_rtx_hash (inv); |
5402 | } |
5403 | |
5404 | /* Equal function for invariants INVARIANT1 and INVARIANT2. */ |
5405 | static int |
5406 | invariant_eq_p (const void *invariant1, const void *invariant2) |
5407 | { |
5408 | rtx inv1 = ((const_invariant_ptr_t) invariant1)->invariant_rtx; |
5409 | rtx inv2 = ((const_invariant_ptr_t) invariant2)->invariant_rtx; |
5410 | |
5411 | return rtx_equal_p (inv1, inv2); |
5412 | } |
5413 | |
5414 | /* Insert INVARIANT_RTX into the table if it is not there yet. Return |
5415 | invariant which is in the table. */ |
5416 | static invariant_ptr_t |
5417 | insert_invariant (rtx invariant_rtx) |
5418 | { |
5419 | void **entry_ptr; |
5420 | invariant_t invariant; |
5421 | invariant_ptr_t invariant_ptr; |
5422 | |
5423 | invariant.invariant_rtx = invariant_rtx; |
5424 | entry_ptr = htab_find_slot (invariant_table, &invariant, INSERT); |
5425 | if (*entry_ptr == NULLnullptr) |
5426 | { |
5427 | invariant_ptr = invariants_pool->allocate (); |
5428 | invariant_ptr->invariant_rtx = invariant_rtx; |
5429 | invariant_ptr->insn = NULLnullptr; |
5430 | invariants.safe_push (invariant_ptr); |
5431 | *entry_ptr = (void *) invariant_ptr; |
5432 | } |
5433 | return (invariant_ptr_t) *entry_ptr; |
5434 | } |
5435 | |
5436 | /* Initiate the invariant table. */ |
5437 | static void |
5438 | initiate_invariants (void) |
5439 | { |
5440 | invariants.create (100); |
5441 | invariants_pool |
5442 | = new object_allocator<lra_invariant> ("Inheritance invariants"); |
5443 | invariant_table = htab_create (100, invariant_hash, invariant_eq_p, NULLnullptr); |
5444 | } |
5445 | |
5446 | /* Finish the invariant table. */ |
5447 | static void |
5448 | finish_invariants (void) |
5449 | { |
5450 | htab_delete (invariant_table); |
5451 | delete invariants_pool; |
5452 | invariants.release (); |
5453 | } |
5454 | |
5455 | /* Make the invariant table empty. */ |
5456 | static void |
5457 | clear_invariants (void) |
5458 | { |
5459 | htab_empty (invariant_table); |
5460 | invariants_pool->release (); |
5461 | invariants.truncate (0); |
5462 | } |
5463 | |
5464 | |
5465 | |
5466 | /* This page contains code to do inheritance/split |
5467 | transformations. */ |
5468 | |
5469 | /* Number of reloads passed so far in current EBB. */ |
5470 | static int reloads_num; |
5471 | |
5472 | /* Number of calls passed so far in current EBB. */ |
5473 | static int calls_num; |
5474 | |
5475 | /* Index ID is the CALLS_NUM associated the last call we saw with |
5476 | ABI identifier ID. */ |
5477 | static int last_call_for_abi[NUM_ABI_IDS]; |
5478 | |
5479 | /* Which registers have been fully or partially clobbered by a call |
5480 | since they were last used. */ |
5481 | static HARD_REG_SET full_and_partial_call_clobbers; |
5482 | |
5483 | /* Current reload pseudo check for validity of elements in |
5484 | USAGE_INSNS. */ |
5485 | static int curr_usage_insns_check; |
5486 | |
5487 | /* Info about last usage of registers in EBB to do inheritance/split |
5488 | transformation. Inheritance transformation is done from a spilled |
5489 | pseudo and split transformations from a hard register or a pseudo |
5490 | assigned to a hard register. */ |
5491 | struct usage_insns |
5492 | { |
5493 | /* If the value is equal to CURR_USAGE_INSNS_CHECK, then the member |
5494 | value INSNS is valid. The insns is chain of optional debug insns |
5495 | and a finishing non-debug insn using the corresponding reg. The |
5496 | value is also used to mark the registers which are set up in the |
5497 | current insn. The negated insn uid is used for this. */ |
5498 | int check; |
5499 | /* Value of global reloads_num at the last insn in INSNS. */ |
5500 | int reloads_num; |
5501 | /* Value of global reloads_nums at the last insn in INSNS. */ |
5502 | int calls_num; |
5503 | /* It can be true only for splitting. And it means that the restore |
5504 | insn should be put after insn given by the following member. */ |
5505 | bool after_p; |
5506 | /* Next insns in the current EBB which use the original reg and the |
5507 | original reg value is not changed between the current insn and |
5508 | the next insns. In order words, e.g. for inheritance, if we need |
5509 | to use the original reg value again in the next insns we can try |
5510 | to use the value in a hard register from a reload insn of the |
5511 | current insn. */ |
5512 | rtx insns; |
5513 | }; |
5514 | |
5515 | /* Map: regno -> corresponding pseudo usage insns. */ |
5516 | static struct usage_insns *usage_insns; |
5517 | |
5518 | static void |
5519 | setup_next_usage_insn (int regno, rtx insn, int reloads_num, bool after_p) |
5520 | { |
5521 | usage_insns[regno].check = curr_usage_insns_check; |
5522 | usage_insns[regno].insns = insn; |
5523 | usage_insns[regno].reloads_num = reloads_num; |
5524 | usage_insns[regno].calls_num = calls_num; |
5525 | usage_insns[regno].after_p = after_p; |
5526 | if (regno >= FIRST_PSEUDO_REGISTER76 && reg_renumber[regno] >= 0) |
5527 | remove_from_hard_reg_set (&full_and_partial_call_clobbers, |
5528 | PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode), |
5529 | reg_renumber[regno]); |
5530 | } |
5531 | |
5532 | /* The function is used to form list REGNO usages which consists of |
5533 | optional debug insns finished by a non-debug insn using REGNO. |
5534 | RELOADS_NUM is current number of reload insns processed so far. */ |
5535 | static void |
5536 | add_next_usage_insn (int regno, rtx_insn *insn, int reloads_num) |
5537 | { |
5538 | rtx next_usage_insns; |
5539 | |
5540 | if (usage_insns[regno].check == curr_usage_insns_check |
5541 | && (next_usage_insns = usage_insns[regno].insns) != NULL_RTX(rtx) 0 |
5542 | && DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
5543 | { |
5544 | /* Check that we did not add the debug insn yet. */ |
5545 | if (next_usage_insns != insn |
5546 | && (GET_CODE (next_usage_insns)((enum rtx_code) (next_usage_insns)->code) != INSN_LIST |
5547 | || XEXP (next_usage_insns, 0)(((next_usage_insns)->u.fld[0]).rt_rtx) != insn)) |
5548 | usage_insns[regno].insns = gen_rtx_INSN_LIST (VOIDmode((void) 0, E_VOIDmode), insn, |
5549 | next_usage_insns); |
5550 | } |
5551 | else if (NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN))) |
5552 | setup_next_usage_insn (regno, insn, reloads_num, false); |
5553 | else |
5554 | usage_insns[regno].check = 0; |
5555 | } |
5556 | |
5557 | /* Return first non-debug insn in list USAGE_INSNS. */ |
5558 | static rtx_insn * |
5559 | skip_usage_debug_insns (rtx usage_insns) |
5560 | { |
5561 | rtx insn; |
5562 | |
5563 | /* Skip debug insns. */ |
5564 | for (insn = usage_insns; |
5565 | insn != NULL_RTX(rtx) 0 && GET_CODE (insn)((enum rtx_code) (insn)->code) == INSN_LIST; |
5566 | insn = XEXP (insn, 1)(((insn)->u.fld[1]).rt_rtx)) |
5567 | ; |
5568 | return safe_as_a <rtx_insn *> (insn); |
5569 | } |
5570 | |
5571 | /* Return true if we need secondary memory moves for insn in |
5572 | USAGE_INSNS after inserting inherited pseudo of class INHER_CL |
5573 | into the insn. */ |
5574 | static bool |
5575 | check_secondary_memory_needed_p (enum reg_class inher_cl ATTRIBUTE_UNUSED__attribute__ ((__unused__)), |
5576 | rtx usage_insns ATTRIBUTE_UNUSED__attribute__ ((__unused__))) |
5577 | { |
5578 | rtx_insn *insn; |
5579 | rtx set, dest; |
5580 | enum reg_class cl; |
5581 | |
5582 | if (inher_cl == ALL_REGS |
5583 | || (insn = skip_usage_debug_insns (usage_insns)) == NULL_RTX(rtx) 0) |
5584 | return false; |
5585 | lra_assert (INSN_P (insn))((void)(!((((((enum rtx_code) (insn)->code) == INSN) || (( (enum rtx_code) (insn)->code) == JUMP_INSN) || (((enum rtx_code ) (insn)->code) == CALL_INSN)) || (((enum rtx_code) (insn) ->code) == DEBUG_INSN))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 5585, __FUNCTION__), 0 : 0)); |
5586 | if ((set = single_set (insn)) == NULL_RTX(rtx) 0 || ! REG_P (SET_DEST (set))(((enum rtx_code) ((((set)->u.fld[0]).rt_rtx))->code) == REG)) |
5587 | return false; |
5588 | dest = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); |
5589 | if (! REG_P (dest)(((enum rtx_code) (dest)->code) == REG)) |
5590 | return false; |
5591 | lra_assert (inher_cl != NO_REGS)((void)(!(inher_cl != NO_REGS) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 5591, __FUNCTION__), 0 : 0)); |
5592 | cl = get_reg_class (REGNO (dest)(rhs_regno(dest))); |
5593 | return (cl != NO_REGS && cl != ALL_REGS |
5594 | && targetm.secondary_memory_needed (GET_MODE (dest)((machine_mode) (dest)->mode), inher_cl, cl)); |
5595 | } |
5596 | |
5597 | /* Registers involved in inheritance/split in the current EBB |
5598 | (inheritance/split pseudos and original registers). */ |
5599 | static bitmap_head check_only_regs; |
5600 | |
5601 | /* Reload pseudos cannot be involded in invariant inheritance in the |
5602 | current EBB. */ |
5603 | static bitmap_head invalid_invariant_regs; |
5604 | |
5605 | /* Do inheritance transformations for insn INSN, which defines (if |
5606 | DEF_P) or uses ORIGINAL_REGNO. NEXT_USAGE_INSNS specifies which |
5607 | instruction in the EBB next uses ORIGINAL_REGNO; it has the same |
5608 | form as the "insns" field of usage_insns. Return true if we |
5609 | succeed in such transformation. |
5610 | |
5611 | The transformations look like: |
5612 | |
5613 | p <- ... i <- ... |
5614 | ... p <- i (new insn) |
5615 | ... => |
5616 | <- ... p ... <- ... i ... |
5617 | or |
5618 | ... i <- p (new insn) |
5619 | <- ... p ... <- ... i ... |
5620 | ... => |
5621 | <- ... p ... <- ... i ... |
5622 | where p is a spilled original pseudo and i is a new inheritance pseudo. |
5623 | |
5624 | |
5625 | The inheritance pseudo has the smallest class of two classes CL and |
5626 | class of ORIGINAL REGNO. */ |
5627 | static bool |
5628 | inherit_reload_reg (bool def_p, int original_regno, |
5629 | enum reg_class cl, rtx_insn *insn, rtx next_usage_insns) |
5630 | { |
5631 | if (optimize_function_for_size_p (cfun(cfun + 0))) |
5632 | return false; |
5633 | |
5634 | enum reg_class rclass = lra_get_allocno_class (original_regno); |
5635 | rtx original_reg = regno_reg_rtx[original_regno]; |
5636 | rtx new_reg, usage_insn; |
5637 | rtx_insn *new_insns; |
5638 | |
5639 | lra_assert (! usage_insns[original_regno].after_p)((void)(!(! usage_insns[original_regno].after_p) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 5639, __FUNCTION__), 0 : 0)); |
5640 | if (lra_dump_file != NULLnullptr) |
5641 | fprintf (lra_dump_file, |
5642 | " <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n"); |
5643 | if (! ira_reg_classes_intersect_p(this_target_ira->x_ira_reg_classes_intersect_p)[cl][rclass]) |
5644 | { |
5645 | if (lra_dump_file != NULLnullptr) |
5646 | { |
5647 | fprintf (lra_dump_file, |
5648 | " Rejecting inheritance for %d " |
5649 | "because of disjoint classes %s and %s\n", |
5650 | original_regno, reg_class_names[cl], |
5651 | reg_class_names[rclass]); |
5652 | fprintf (lra_dump_file, |
5653 | " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); |
5654 | } |
5655 | return false; |
5656 | } |
5657 | if ((ira_class_subset_p(this_target_ira->x_ira_class_subset_p)[cl][rclass] && cl != rclass) |
5658 | /* We don't use a subset of two classes because it can be |
5659 | NO_REGS. This transformation is still profitable in most |
5660 | cases even if the classes are not intersected as register |
5661 | move is probably cheaper than a memory load. */ |
5662 | || ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[cl] < ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[rclass]) |
5663 | { |
5664 | if (lra_dump_file != NULLnullptr) |
5665 | fprintf (lra_dump_file, " Use smallest class of %s and %s\n", |
5666 | reg_class_names[cl], reg_class_names[rclass]); |
5667 | |
5668 | rclass = cl; |
5669 | } |
5670 | if (check_secondary_memory_needed_p (rclass, next_usage_insns)) |
5671 | { |
5672 | /* Reject inheritance resulting in secondary memory moves. |
5673 | Otherwise, there is a danger in LRA cycling. Also such |
5674 | transformation will be unprofitable. */ |
5675 | if (lra_dump_file != NULLnullptr) |
5676 | { |
5677 | rtx_insn *insn = skip_usage_debug_insns (next_usage_insns); |
5678 | rtx set = single_set (insn); |
5679 | |
5680 | lra_assert (set != NULL_RTX)((void)(!(set != (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 5680, __FUNCTION__), 0 : 0)); |
5681 | |
5682 | rtx dest = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); |
5683 | |
5684 | lra_assert (REG_P (dest))((void)(!((((enum rtx_code) (dest)->code) == REG)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 5684, __FUNCTION__), 0 : 0)); |
5685 | fprintf (lra_dump_file, |
5686 | " Rejecting inheritance for insn %d(%s)<-%d(%s) " |
5687 | "as secondary mem is needed\n", |
5688 | REGNO (dest)(rhs_regno(dest)), reg_class_names[get_reg_class (REGNO (dest)(rhs_regno(dest)))], |
5689 | original_regno, reg_class_names[rclass]); |
5690 | fprintf (lra_dump_file, |
5691 | " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); |
5692 | } |
5693 | return false; |
5694 | } |
5695 | new_reg = lra_create_new_reg (GET_MODE (original_reg)((machine_mode) (original_reg)->mode), original_reg, |
5696 | rclass, NULLnullptr, "inheritance"); |
5697 | start_sequence (); |
5698 | if (def_p) |
5699 | lra_emit_move (original_reg, new_reg); |
5700 | else |
5701 | lra_emit_move (new_reg, original_reg); |
5702 | new_insns = get_insns (); |
5703 | end_sequence (); |
5704 | if (NEXT_INSN (new_insns) != NULL_RTX(rtx) 0) |
5705 | { |
5706 | if (lra_dump_file != NULLnullptr) |
5707 | { |
5708 | fprintf (lra_dump_file, |
5709 | " Rejecting inheritance %d->%d " |
5710 | "as it results in 2 or more insns:\n", |
5711 | original_regno, REGNO (new_reg)(rhs_regno(new_reg))); |
5712 | dump_rtl_slim (lra_dump_file, new_insns, NULLnullptr, -1, 0); |
5713 | fprintf (lra_dump_file, |
5714 | " >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); |
5715 | } |
5716 | return false; |
5717 | } |
5718 | lra_substitute_pseudo_within_insn (insn, original_regno, new_reg, false); |
5719 | lra_update_insn_regno_info (insn); |
5720 | if (! def_p) |
5721 | /* We now have a new usage insn for original regno. */ |
5722 | setup_next_usage_insn (original_regno, new_insns, reloads_num, false); |
5723 | if (lra_dump_file != NULLnullptr) |
5724 | fprintf (lra_dump_file, " Original reg change %d->%d (bb%d):\n", |
5725 | original_regno, REGNO (new_reg)(rhs_regno(new_reg)), BLOCK_FOR_INSN (insn)->index); |
5726 | lra_reg_info[REGNO (new_reg)(rhs_regno(new_reg))].restore_rtx = regno_reg_rtx[original_regno]; |
5727 | bitmap_set_bit (&check_only_regs, REGNO (new_reg)(rhs_regno(new_reg))); |
5728 | bitmap_set_bit (&check_only_regs, original_regno); |
5729 | bitmap_set_bit (&lra_inheritance_pseudos, REGNO (new_reg)(rhs_regno(new_reg))); |
5730 | if (def_p) |
5731 | lra_process_new_insns (insn, NULLnullptr, new_insns, |
5732 | "Add original<-inheritance"); |
5733 | else |
5734 | lra_process_new_insns (insn, new_insns, NULLnullptr, |
5735 | "Add inheritance<-original"); |
5736 | while (next_usage_insns != NULL_RTX(rtx) 0) |
5737 | { |
5738 | if (GET_CODE (next_usage_insns)((enum rtx_code) (next_usage_insns)->code) != INSN_LIST) |
5739 | { |
5740 | usage_insn = next_usage_insns; |
5741 | lra_assert (NONDEBUG_INSN_P (usage_insn))((void)(!(((((enum rtx_code) (usage_insn)->code) == INSN) || (((enum rtx_code) (usage_insn)->code) == JUMP_INSN) || (( (enum rtx_code) (usage_insn)->code) == CALL_INSN))) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 5741, __FUNCTION__), 0 : 0)); |
5742 | next_usage_insns = NULLnullptr; |
5743 | } |
5744 | else |
5745 | { |
5746 | usage_insn = XEXP (next_usage_insns, 0)(((next_usage_insns)->u.fld[0]).rt_rtx); |
5747 | lra_assert (DEBUG_INSN_P (usage_insn))((void)(!((((enum rtx_code) (usage_insn)->code) == DEBUG_INSN )) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-constraints.cc" , 5747, __FUNCTION__), 0 : 0)); |
5748 | next_usage_insns = XEXP (next_usage_insns, 1)(((next_usage_insns)->u.fld[1]).rt_rtx); |
5749 | } |
5750 | lra_substitute_pseudo (&usage_insn, original_regno, new_reg, false, |
5751 | DEBUG_INSN_P (usage_insn)(((enum rtx_code) (usage_insn)->code) == DEBUG_INSN)); |
5752 | lra_update_insn_regno_info (as_a <rtx_insn *> (usage_insn)); |
5753 | if (lra_dump_file != NULLnullptr) |
5754 | { |
5755 | basic_block bb = BLOCK_FOR_INSN (usage_insn); |
5756 | fprintf (lra_dump_file, |
5757 | " Inheritance reuse change %d->%d (bb%d):\n", |
5758 | original_regno, REGNO (new_reg)(rhs_regno(new_reg)), |
5759 | bb ? bb->index : -1); |
5760 | dump_insn_slim (lra_dump_file, as_a <rtx_insn *> (usag |