File: | build/gcc/lra.cc |
Warning: | line 855, column 15 Value stored to 'fmt' during its initialization is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* LRA (local register allocator) driver and LRA utilities. |
2 | Copyright (C) 2010-2023 Free Software Foundation, Inc. |
3 | Contributed by Vladimir Makarov <vmakarov@redhat.com>. |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free |
9 | Software Foundation; either version 3, or (at your option) any later |
10 | version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
15 | for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | |
22 | /* The Local Register Allocator (LRA) is a replacement of former |
23 | reload pass. It is focused to simplify code solving the reload |
24 | pass tasks, to make the code maintenance easier, and to implement new |
25 | perspective optimizations. |
26 | |
27 | The major LRA design solutions are: |
28 | o division small manageable, separated sub-tasks |
29 | o reflection of all transformations and decisions in RTL as more |
30 | as possible |
31 | o insn constraints as a primary source of the info (minimizing |
32 | number of target-depended macros/hooks) |
33 | |
34 | In brief LRA works by iterative insn process with the final goal is |
35 | to satisfy all insn and address constraints: |
36 | o New reload insns (in brief reloads) and reload pseudos might be |
37 | generated; |
38 | o Some pseudos might be spilled to assign hard registers to |
39 | new reload pseudos; |
40 | o Recalculating spilled pseudo values (rematerialization); |
41 | o Changing spilled pseudos to stack memory or their equivalences; |
42 | o Allocation stack memory changes the address displacement and |
43 | new iteration is needed. |
44 | |
45 | Here is block diagram of LRA passes: |
46 | |
47 | ------------------------ |
48 | --------------- | Undo inheritance for | --------------- |
49 | | Memory-memory | | spilled pseudos, | | New (and old) | |
50 | | move coalesce |<---| splits for pseudos got |<-- | pseudos | |
51 | --------------- | the same hard regs, | | assignment | |
52 | Start | | and optional reloads | --------------- |
53 | | | ------------------------ ^ |
54 | V | ---------------- | |
55 | ----------- V | Update virtual | | |
56 | | Remove |----> ------------>| register | | |
57 | | scratches | ^ | displacements | | |
58 | ----------- | ---------------- | |
59 | | | | |
60 | | V New | |
61 | | ------------ pseudos ------------------- |
62 | | |Constraints:| or insns | Inheritance/split | |
63 | | | RTL |--------->| transformations | |
64 | | | transfor- | | in EBB scope | |
65 | | substi- | mations | ------------------- |
66 | | tutions ------------ |
67 | | | No change |
68 | ---------------- V |
69 | | Spilled pseudo | ------------------- |
70 | | to memory |<----| Rematerialization | |
71 | | substitution | ------------------- |
72 | ---------------- |
73 | | No susbtitions |
74 | V |
75 | ------------------------- |
76 | | Hard regs substitution, | |
77 | | devirtalization, and |------> Finish |
78 | | restoring scratches got | |
79 | | memory | |
80 | ------------------------- |
81 | |
82 | To speed up the process: |
83 | o We process only insns affected by changes on previous |
84 | iterations; |
85 | o We don't use DFA-infrastructure because it results in much slower |
86 | compiler speed than a special IR described below does; |
87 | o We use a special insn representation for quick access to insn |
88 | info which is always *synchronized* with the current RTL; |
89 | o Insn IR is minimized by memory. It is divided on three parts: |
90 | o one specific for each insn in RTL (only operand locations); |
91 | o one common for all insns in RTL with the same insn code |
92 | (different operand attributes from machine descriptions); |
93 | o one oriented for maintenance of live info (list of pseudos). |
94 | o Pseudo data: |
95 | o all insns where the pseudo is referenced; |
96 | o live info (conflicting hard regs, live ranges, # of |
97 | references etc); |
98 | o data used for assigning (preferred hard regs, costs etc). |
99 | |
100 | This file contains LRA driver, LRA utility functions and data, and |
101 | code for dealing with scratches. */ |
102 | |
103 | #include "config.h" |
104 | #include "system.h" |
105 | #include "coretypes.h" |
106 | #include "backend.h" |
107 | #include "target.h" |
108 | #include "rtl.h" |
109 | #include "tree.h" |
110 | #include "predict.h" |
111 | #include "df.h" |
112 | #include "memmodel.h" |
113 | #include "tm_p.h" |
114 | #include "optabs.h" |
115 | #include "regs.h" |
116 | #include "ira.h" |
117 | #include "recog.h" |
118 | #include "expr.h" |
119 | #include "cfgrtl.h" |
120 | #include "cfgbuild.h" |
121 | #include "lra.h" |
122 | #include "lra-int.h" |
123 | #include "print-rtl.h" |
124 | #include "function-abi.h" |
125 | |
126 | /* Dump bitmap SET with TITLE and BB INDEX. */ |
127 | void |
128 | lra_dump_bitmap_with_title (const char *title, bitmap set, int index) |
129 | { |
130 | unsigned int i; |
131 | int count; |
132 | bitmap_iterator bi; |
133 | static const int max_nums_on_line = 10; |
134 | |
135 | if (bitmap_empty_p (set)) |
136 | return; |
137 | fprintf (lra_dump_file, " %s %d:", title, index); |
138 | fprintf (lra_dump_file, "\n"); |
139 | count = max_nums_on_line + 1; |
140 | EXECUTE_IF_SET_IN_BITMAP (set, 0, i, bi)for (bmp_iter_set_init (&(bi), (set), (0), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next (&(bi), &(i))) |
141 | { |
142 | if (count > max_nums_on_line) |
143 | { |
144 | fprintf (lra_dump_file, "\n "); |
145 | count = 0; |
146 | } |
147 | fprintf (lra_dump_file, " %4u", i); |
148 | count++; |
149 | } |
150 | fprintf (lra_dump_file, "\n"); |
151 | } |
152 | |
153 | /* Hard registers currently not available for allocation. It can |
154 | changed after some hard registers become not eliminable. */ |
155 | HARD_REG_SET lra_no_alloc_regs; |
156 | |
157 | static int get_new_reg_value (void); |
158 | static void expand_reg_info (void); |
159 | static void invalidate_insn_recog_data (int); |
160 | static int get_insn_freq (rtx_insn *); |
161 | static void invalidate_insn_data_regno_info (lra_insn_recog_data_t, |
162 | rtx_insn *, int); |
163 | /* Expand all regno related info needed for LRA. */ |
164 | static void |
165 | expand_reg_data (int old) |
166 | { |
167 | resize_reg_info (); |
168 | expand_reg_info (); |
169 | ira_expand_reg_equiv (); |
170 | for (int i = (int) max_reg_num () - 1; i >= old; i--) |
171 | lra_change_class (i, ALL_REGS, " Set", true); |
172 | } |
173 | |
174 | /* Create and return a new reg of ORIGINAL mode. If ORIGINAL is NULL |
175 | or of VOIDmode, use MD_MODE for the new reg. Initialize its |
176 | register class to RCLASS. Print message about assigning class |
177 | RCLASS containing new register name TITLE unless it is NULL. Use |
178 | attributes of ORIGINAL if it is a register. The created register |
179 | will have unique held value. */ |
180 | rtx |
181 | lra_create_new_reg_with_unique_value (machine_mode md_mode, rtx original, |
182 | enum reg_class rclass, |
183 | HARD_REG_SET *exclude_start_hard_regs, |
184 | const char *title) |
185 | { |
186 | machine_mode mode; |
187 | rtx new_reg; |
188 | |
189 | if (original == NULL_RTX(rtx) 0 || (mode = GET_MODE (original)((machine_mode) (original)->mode)) == VOIDmode((void) 0, E_VOIDmode)) |
190 | mode = md_mode; |
191 | lra_assert (mode != VOIDmode)((void)(!(mode != ((void) 0, E_VOIDmode)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 191, __FUNCTION__), 0 : 0)); |
192 | new_reg = gen_reg_rtx (mode); |
193 | if (original == NULL_RTX(rtx) 0 || ! REG_P (original)(((enum rtx_code) (original)->code) == REG)) |
194 | { |
195 | if (lra_dump_file != NULLnullptr) |
196 | fprintf (lra_dump_file, " Creating newreg=%i", REGNO (new_reg)(rhs_regno(new_reg))); |
197 | } |
198 | else |
199 | { |
200 | if (ORIGINAL_REGNO (original)(__extension__ ({ __typeof ((original)) const _rtx = ((original )); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("ORIGINAL_REGNO", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 200, __FUNCTION__); _rtx; })->u2.original_regno) >= FIRST_PSEUDO_REGISTER76) |
201 | ORIGINAL_REGNO (new_reg)(__extension__ ({ __typeof ((new_reg)) const _rtx = ((new_reg )); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("ORIGINAL_REGNO", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 201, __FUNCTION__); _rtx; })->u2.original_regno) = ORIGINAL_REGNO (original)(__extension__ ({ __typeof ((original)) const _rtx = ((original )); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("ORIGINAL_REGNO", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 201, __FUNCTION__); _rtx; })->u2.original_regno); |
202 | REG_USERVAR_P (new_reg)(__extension__ ({ __typeof ((new_reg)) const _rtx = ((new_reg )); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("REG_USERVAR_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 202, __FUNCTION__); _rtx; })->volatil) = REG_USERVAR_P (original)(__extension__ ({ __typeof ((original)) const _rtx = ((original )); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("REG_USERVAR_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 202, __FUNCTION__); _rtx; })->volatil); |
203 | REG_POINTER (new_reg)(__extension__ ({ __typeof ((new_reg)) const _rtx = ((new_reg )); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("REG_POINTER", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 203, __FUNCTION__); _rtx; })->frame_related) = REG_POINTER (original)(__extension__ ({ __typeof ((original)) const _rtx = ((original )); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("REG_POINTER", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 203, __FUNCTION__); _rtx; })->frame_related); |
204 | REG_ATTRS (new_reg)((&(new_reg)->u.reg)->attrs) = REG_ATTRS (original)((&(original)->u.reg)->attrs); |
205 | if (lra_dump_file != NULLnullptr) |
206 | fprintf (lra_dump_file, " Creating newreg=%i from oldreg=%i", |
207 | REGNO (new_reg)(rhs_regno(new_reg)), REGNO (original)(rhs_regno(original))); |
208 | } |
209 | if (lra_dump_file != NULLnullptr) |
210 | { |
211 | if (title != NULLnullptr) |
212 | fprintf (lra_dump_file, ", assigning class %s to%s%s r%d", |
213 | reg_class_names[rclass], *title == '\0' ? "" : " ", |
214 | title, REGNO (new_reg)(rhs_regno(new_reg))); |
215 | fprintf (lra_dump_file, "\n"); |
216 | } |
217 | expand_reg_data (max_reg_num ()); |
218 | setup_reg_classes (REGNO (new_reg)(rhs_regno(new_reg)), rclass, NO_REGS, rclass); |
219 | if (exclude_start_hard_regs != NULLnullptr) |
220 | lra_reg_info[REGNO (new_reg)(rhs_regno(new_reg))].exclude_start_hard_regs |
221 | = *exclude_start_hard_regs; |
222 | return new_reg; |
223 | } |
224 | |
225 | /* Analogous to the previous function but also inherits value of |
226 | ORIGINAL. */ |
227 | rtx |
228 | lra_create_new_reg (machine_mode md_mode, rtx original, enum reg_class rclass, |
229 | HARD_REG_SET *exclude_start_hard_regs, const char *title) |
230 | { |
231 | rtx new_reg; |
232 | |
233 | new_reg |
234 | = lra_create_new_reg_with_unique_value (md_mode, original, rclass, |
235 | exclude_start_hard_regs, title); |
236 | if (original != NULL_RTX(rtx) 0 && REG_P (original)(((enum rtx_code) (original)->code) == REG)) |
237 | lra_assign_reg_val (REGNO (original)(rhs_regno(original)), REGNO (new_reg)(rhs_regno(new_reg))); |
238 | return new_reg; |
239 | } |
240 | |
241 | /* Set up for REGNO unique hold value. */ |
242 | void |
243 | lra_set_regno_unique_value (int regno) |
244 | { |
245 | lra_reg_info[regno].val = get_new_reg_value (); |
246 | } |
247 | |
248 | /* Invalidate INSN related info used by LRA. The info should never be |
249 | used after that. */ |
250 | void |
251 | lra_invalidate_insn_data (rtx_insn *insn) |
252 | { |
253 | lra_invalidate_insn_regno_info (insn); |
254 | invalidate_insn_recog_data (INSN_UID (insn)); |
255 | } |
256 | |
257 | /* Mark INSN deleted and invalidate the insn related info used by |
258 | LRA. */ |
259 | void |
260 | lra_set_insn_deleted (rtx_insn *insn) |
261 | { |
262 | lra_invalidate_insn_data (insn); |
263 | SET_INSN_DELETED (insn)set_insn_deleted (insn);; |
264 | } |
265 | |
266 | /* Delete an unneeded INSN and any previous insns who sole purpose is |
267 | loading data that is dead in INSN. */ |
268 | void |
269 | lra_delete_dead_insn (rtx_insn *insn) |
270 | { |
271 | rtx_insn *prev = prev_real_insn (insn); |
272 | rtx prev_dest; |
273 | |
274 | /* If the previous insn sets a register that dies in our insn, |
275 | delete it too. */ |
276 | if (prev && GET_CODE (PATTERN (prev))((enum rtx_code) (PATTERN (prev))->code) == SET |
277 | && (prev_dest = SET_DEST (PATTERN (prev))(((PATTERN (prev))->u.fld[0]).rt_rtx), REG_P (prev_dest)(((enum rtx_code) (prev_dest)->code) == REG)) |
278 | && reg_mentioned_p (prev_dest, PATTERN (insn)) |
279 | && find_regno_note (insn, REG_DEAD, REGNO (prev_dest)(rhs_regno(prev_dest))) |
280 | && ! side_effects_p (SET_SRC (PATTERN (prev))(((PATTERN (prev))->u.fld[1]).rt_rtx))) |
281 | lra_delete_dead_insn (prev); |
282 | |
283 | lra_set_insn_deleted (insn); |
284 | } |
285 | |
286 | /* Emit insn x = y + z. Return NULL if we failed to do it. |
287 | Otherwise, return the insn. We don't use gen_add3_insn as it might |
288 | clobber CC. */ |
289 | static rtx_insn * |
290 | emit_add3_insn (rtx x, rtx y, rtx z) |
291 | { |
292 | rtx_insn *last; |
293 | |
294 | last = get_last_insn (); |
295 | |
296 | if (have_addptr3_insn (x, y, z)) |
297 | { |
298 | rtx_insn *insn = gen_addptr3_insn (x, y, z); |
299 | |
300 | /* If the target provides an "addptr" pattern it hopefully does |
301 | for a reason. So falling back to the normal add would be |
302 | a bug. */ |
303 | lra_assert (insn != NULL_RTX)((void)(!(insn != (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 303, __FUNCTION__), 0 : 0)); |
304 | emit_insn (insn); |
305 | return insn; |
306 | } |
307 | |
308 | rtx_insn *insn = emit_insn (gen_rtx_SET (x, gen_rtx_PLUS (GET_MODE (y),gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((x)), ( (gen_rtx_fmt_ee_stat ((PLUS), ((((machine_mode) (y)->mode) )), ((y)), ((z)) ))) ) |
309 | y, z))gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((x)), ( (gen_rtx_fmt_ee_stat ((PLUS), ((((machine_mode) (y)->mode) )), ((y)), ((z)) ))) )); |
310 | if (recog_memoized (insn) < 0) |
311 | { |
312 | delete_insns_since (last); |
313 | insn = NULLnullptr; |
314 | } |
315 | return insn; |
316 | } |
317 | |
318 | /* Emit insn x = x + y. Return the insn. We use gen_add2_insn as the |
319 | last resort. */ |
320 | static rtx_insn * |
321 | emit_add2_insn (rtx x, rtx y) |
322 | { |
323 | rtx_insn *insn = emit_add3_insn (x, x, y); |
324 | if (insn == NULL_RTX(rtx) 0) |
325 | { |
326 | insn = gen_add2_insn (x, y); |
327 | if (insn != NULL_RTX(rtx) 0) |
328 | emit_insn (insn); |
329 | } |
330 | return insn; |
331 | } |
332 | |
333 | /* Target checks operands through operand predicates to recognize an |
334 | insn. We should have a special precaution to generate add insns |
335 | which are frequent results of elimination. |
336 | |
337 | Emit insns for x = y + z. X can be used to store intermediate |
338 | values and should be not in Y and Z when we use X to store an |
339 | intermediate value. Y + Z should form [base] [+ index[ * scale]] [ |
340 | + disp] where base and index are registers, disp and scale are |
341 | constants. Y should contain base if it is present, Z should |
342 | contain disp if any. index[*scale] can be part of Y or Z. */ |
343 | void |
344 | lra_emit_add (rtx x, rtx y, rtx z) |
345 | { |
346 | int old; |
347 | rtx_insn *last; |
348 | rtx a1, a2, base, index, disp, scale, index_scale; |
349 | bool ok_p; |
350 | |
351 | rtx_insn *add3_insn = emit_add3_insn (x, y, z); |
352 | old = max_reg_num (); |
353 | if (add3_insn != NULLnullptr) |
354 | ; |
355 | else |
356 | { |
357 | disp = a2 = NULL_RTX(rtx) 0; |
358 | if (GET_CODE (y)((enum rtx_code) (y)->code) == PLUS) |
359 | { |
360 | a1 = XEXP (y, 0)(((y)->u.fld[0]).rt_rtx); |
361 | a2 = XEXP (y, 1)(((y)->u.fld[1]).rt_rtx); |
362 | disp = z; |
363 | } |
364 | else |
365 | { |
366 | a1 = y; |
367 | if (CONSTANT_P (z)((rtx_class[(int) (((enum rtx_code) (z)->code))]) == RTX_CONST_OBJ )) |
368 | disp = z; |
369 | else |
370 | a2 = z; |
371 | } |
372 | index_scale = scale = NULL_RTX(rtx) 0; |
373 | if (GET_CODE (a1)((enum rtx_code) (a1)->code) == MULT) |
374 | { |
375 | index_scale = a1; |
376 | index = XEXP (a1, 0)(((a1)->u.fld[0]).rt_rtx); |
377 | scale = XEXP (a1, 1)(((a1)->u.fld[1]).rt_rtx); |
378 | base = a2; |
379 | } |
380 | else if (a2 != NULL_RTX(rtx) 0 && GET_CODE (a2)((enum rtx_code) (a2)->code) == MULT) |
381 | { |
382 | index_scale = a2; |
383 | index = XEXP (a2, 0)(((a2)->u.fld[0]).rt_rtx); |
384 | scale = XEXP (a2, 1)(((a2)->u.fld[1]).rt_rtx); |
385 | base = a1; |
386 | } |
387 | else |
388 | { |
389 | base = a1; |
390 | index = a2; |
391 | } |
392 | if ((base != NULL_RTX(rtx) 0 && ! (REG_P (base)(((enum rtx_code) (base)->code) == REG) || GET_CODE (base)((enum rtx_code) (base)->code) == SUBREG)) |
393 | || (index != NULL_RTX(rtx) 0 |
394 | && ! (REG_P (index)(((enum rtx_code) (index)->code) == REG) || GET_CODE (index)((enum rtx_code) (index)->code) == SUBREG)) |
395 | || (disp != NULL_RTX(rtx) 0 && ! CONSTANT_P (disp)((rtx_class[(int) (((enum rtx_code) (disp)->code))]) == RTX_CONST_OBJ )) |
396 | || (scale != NULL_RTX(rtx) 0 && ! CONSTANT_P (scale)((rtx_class[(int) (((enum rtx_code) (scale)->code))]) == RTX_CONST_OBJ ))) |
397 | { |
398 | /* Probably we have no 3 op add. Last chance is to use 2-op |
399 | add insn. To succeed, don't move Z to X as an address |
400 | segment always comes in Y. Otherwise, we might fail when |
401 | adding the address segment to register. */ |
402 | lra_assert (x != y && x != z)((void)(!(x != y && x != z) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 402, __FUNCTION__), 0 : 0)); |
403 | emit_move_insn (x, y); |
404 | rtx_insn *insn = emit_add2_insn (x, z); |
405 | lra_assert (insn != NULL_RTX)((void)(!(insn != (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 405, __FUNCTION__), 0 : 0)); |
406 | } |
407 | else |
408 | { |
409 | if (index_scale == NULL_RTX(rtx) 0) |
410 | index_scale = index; |
411 | if (disp == NULL_RTX(rtx) 0) |
412 | { |
413 | /* Generate x = index_scale; x = x + base. */ |
414 | lra_assert (index_scale != NULL_RTX && base != NULL_RTX)((void)(!(index_scale != (rtx) 0 && base != (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 414, __FUNCTION__), 0 : 0)); |
415 | emit_move_insn (x, index_scale); |
416 | rtx_insn *insn = emit_add2_insn (x, base); |
417 | lra_assert (insn != NULL_RTX)((void)(!(insn != (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 417, __FUNCTION__), 0 : 0)); |
418 | } |
419 | else if (scale == NULL_RTX(rtx) 0) |
420 | { |
421 | /* Try x = base + disp. */ |
422 | lra_assert (base != NULL_RTX)((void)(!(base != (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 422, __FUNCTION__), 0 : 0)); |
423 | last = get_last_insn (); |
424 | rtx_insn *move_insn = |
425 | emit_move_insn (x, gen_rtx_PLUS (GET_MODE (base), base, disp)gen_rtx_fmt_ee_stat ((PLUS), ((((machine_mode) (base)->mode ))), ((base)), ((disp)) )); |
426 | if (recog_memoized (move_insn) < 0) |
427 | { |
428 | delete_insns_since (last); |
429 | /* Generate x = disp; x = x + base. */ |
430 | emit_move_insn (x, disp); |
431 | rtx_insn *add2_insn = emit_add2_insn (x, base); |
432 | lra_assert (add2_insn != NULL_RTX)((void)(!(add2_insn != (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 432, __FUNCTION__), 0 : 0)); |
433 | } |
434 | /* Generate x = x + index. */ |
435 | if (index != NULL_RTX(rtx) 0) |
436 | { |
437 | rtx_insn *insn = emit_add2_insn (x, index); |
438 | lra_assert (insn != NULL_RTX)((void)(!(insn != (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 438, __FUNCTION__), 0 : 0)); |
439 | } |
440 | } |
441 | else |
442 | { |
443 | /* Try x = index_scale; x = x + disp; x = x + base. */ |
444 | last = get_last_insn (); |
445 | rtx_insn *move_insn = emit_move_insn (x, index_scale); |
446 | ok_p = false; |
447 | if (recog_memoized (move_insn) >= 0) |
448 | { |
449 | rtx_insn *insn = emit_add2_insn (x, disp); |
450 | if (insn != NULL_RTX(rtx) 0) |
451 | { |
452 | if (base == NULL_RTX(rtx) 0) |
453 | ok_p = true; |
454 | else |
455 | { |
456 | insn = emit_add2_insn (x, base); |
457 | if (insn != NULL_RTX(rtx) 0) |
458 | ok_p = true; |
459 | } |
460 | } |
461 | } |
462 | if (! ok_p) |
463 | { |
464 | rtx_insn *insn; |
465 | |
466 | delete_insns_since (last); |
467 | /* Generate x = disp; x = x + base; x = x + index_scale. */ |
468 | emit_move_insn (x, disp); |
469 | if (base != NULL_RTX(rtx) 0) |
470 | { |
471 | insn = emit_add2_insn (x, base); |
472 | lra_assert (insn != NULL_RTX)((void)(!(insn != (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 472, __FUNCTION__), 0 : 0)); |
473 | } |
474 | insn = emit_add2_insn (x, index_scale); |
475 | lra_assert (insn != NULL_RTX)((void)(!(insn != (rtx) 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 475, __FUNCTION__), 0 : 0)); |
476 | } |
477 | } |
478 | } |
479 | } |
480 | /* Functions emit_... can create pseudos -- so expand the pseudo |
481 | data. */ |
482 | if (old != max_reg_num ()) |
483 | expand_reg_data (old); |
484 | } |
485 | |
486 | /* The number of emitted reload insns so far. */ |
487 | int lra_curr_reload_num; |
488 | |
489 | static void remove_insn_scratches (rtx_insn *insn); |
490 | |
491 | /* Emit x := y, processing special case when y = u + v or y = u + v * |
492 | scale + w through emit_add (Y can be an address which is base + |
493 | index reg * scale + displacement in general case). X may be used |
494 | as intermediate result therefore it should be not in Y. */ |
495 | void |
496 | lra_emit_move (rtx x, rtx y) |
497 | { |
498 | int old; |
499 | rtx_insn *insn; |
500 | |
501 | if (GET_CODE (y)((enum rtx_code) (y)->code) != PLUS) |
502 | { |
503 | if (rtx_equal_p (x, y)) |
504 | return; |
505 | old = max_reg_num (); |
506 | |
507 | insn = (GET_CODE (x)((enum rtx_code) (x)->code) != STRICT_LOW_PART |
508 | ? emit_move_insn (x, y) : emit_insn (gen_rtx_SET (x, y)gen_rtx_fmt_ee_stat ((SET), (((void) 0, E_VOIDmode)), ((x)), ( (y)) ))); |
509 | /* The move pattern may require scratch registers, so convert them |
510 | into real registers now. */ |
511 | if (insn != NULL_RTX(rtx) 0) |
512 | remove_insn_scratches (insn); |
513 | if (REG_P (x)(((enum rtx_code) (x)->code) == REG)) |
514 | lra_reg_info[ORIGINAL_REGNO (x)(__extension__ ({ __typeof ((x)) const _rtx = ((x)); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("ORIGINAL_REGNO" , _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 514, __FUNCTION__); _rtx; })->u2.original_regno)].last_reload = ++lra_curr_reload_num; |
515 | /* Function emit_move can create pseudos -- so expand the pseudo |
516 | data. */ |
517 | if (old != max_reg_num ()) |
518 | expand_reg_data (old); |
519 | return; |
520 | } |
521 | lra_emit_add (x, XEXP (y, 0)(((y)->u.fld[0]).rt_rtx), XEXP (y, 1)(((y)->u.fld[1]).rt_rtx)); |
522 | } |
523 | |
524 | /* Update insn operands which are duplication of operands whose |
525 | numbers are in array of NOPS (with end marker -1). The insn is |
526 | represented by its LRA internal representation ID. */ |
527 | void |
528 | lra_update_dups (lra_insn_recog_data_t id, signed char *nops) |
529 | { |
530 | int i, j, nop; |
531 | struct lra_static_insn_data *static_id = id->insn_static_data; |
532 | |
533 | for (i = 0; i < static_id->n_dups; i++) |
534 | for (j = 0; (nop = nops[j]) >= 0; j++) |
535 | if (static_id->dup_num[i] == nop) |
536 | *id->dup_loc[i] = *id->operand_loc[nop]; |
537 | } |
538 | |
539 | |
540 | |
541 | /* This page contains code dealing with info about registers in the |
542 | insns. */ |
543 | |
544 | /* Pools for insn reg info. */ |
545 | object_allocator<lra_insn_reg> lra_insn_reg_pool ("insn regs"); |
546 | |
547 | /* Create LRA insn related info about a reference to REGNO in INSN |
548 | with TYPE (in/out/inout), biggest reference mode MODE, flag that it |
549 | is reference through subreg (SUBREG_P), and reference to the next |
550 | insn reg info (NEXT). If REGNO can be early clobbered, |
551 | alternatives in which it can be early clobbered are given by |
552 | EARLY_CLOBBER_ALTS. */ |
553 | static struct lra_insn_reg * |
554 | new_insn_reg (rtx_insn *insn, int regno, enum op_type type, |
555 | machine_mode mode, bool subreg_p, |
556 | alternative_mask early_clobber_alts, |
557 | struct lra_insn_reg *next) |
558 | { |
559 | lra_insn_reg *ir = lra_insn_reg_pool.allocate (); |
560 | ir->type = type; |
561 | ir->biggest_mode = mode; |
562 | if (NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) |
563 | && partial_subreg_p (lra_reg_info[regno].biggest_mode, mode)) |
564 | lra_reg_info[regno].biggest_mode = mode; |
565 | ir->subreg_p = subreg_p; |
566 | ir->early_clobber_alts = early_clobber_alts; |
567 | ir->regno = regno; |
568 | ir->next = next; |
569 | return ir; |
570 | } |
571 | |
572 | /* Free insn reg info list IR. */ |
573 | static void |
574 | free_insn_regs (struct lra_insn_reg *ir) |
575 | { |
576 | struct lra_insn_reg *next_ir; |
577 | |
578 | for (; ir != NULLnullptr; ir = next_ir) |
579 | { |
580 | next_ir = ir->next; |
581 | lra_insn_reg_pool.remove (ir); |
582 | } |
583 | } |
584 | |
585 | /* Finish pool for insn reg info. */ |
586 | static void |
587 | finish_insn_regs (void) |
588 | { |
589 | lra_insn_reg_pool.release (); |
590 | } |
591 | |
592 | |
593 | |
594 | /* This page contains code dealing LRA insn info (or in other words |
595 | LRA internal insn representation). */ |
596 | |
597 | /* Map INSN_CODE -> the static insn data. This info is valid during |
598 | all translation unit. */ |
599 | struct lra_static_insn_data *insn_code_data[NUM_INSN_CODES]; |
600 | |
601 | /* Debug insns are represented as a special insn with one input |
602 | operand which is RTL expression in var_location. */ |
603 | |
604 | /* The following data are used as static insn operand data for all |
605 | debug insns. If structure lra_operand_data is changed, the |
606 | initializer should be changed too. */ |
607 | static struct lra_operand_data debug_operand_data = |
608 | { |
609 | NULLnullptr, /* alternative */ |
610 | 0, /* early_clobber_alts */ |
611 | E_VOIDmode, /* We are not interesting in the operand mode. */ |
612 | OP_IN, |
613 | 0, 0, 0 |
614 | }; |
615 | |
616 | /* The following data are used as static insn data for all debug |
617 | bind insns. If structure lra_static_insn_data is changed, the |
618 | initializer should be changed too. */ |
619 | static struct lra_static_insn_data debug_bind_static_data = |
620 | { |
621 | &debug_operand_data, |
622 | 0, /* Duplication operands #. */ |
623 | -1, /* Commutative operand #. */ |
624 | 1, /* Operands #. There is only one operand which is debug RTL |
625 | expression. */ |
626 | 0, /* Duplications #. */ |
627 | 0, /* Alternatives #. We are not interesting in alternatives |
628 | because we does not proceed debug_insns for reloads. */ |
629 | NULLnullptr, /* Hard registers referenced in machine description. */ |
630 | NULLnullptr /* Descriptions of operands in alternatives. */ |
631 | }; |
632 | |
633 | /* The following data are used as static insn data for all debug |
634 | marker insns. If structure lra_static_insn_data is changed, the |
635 | initializer should be changed too. */ |
636 | static struct lra_static_insn_data debug_marker_static_data = |
637 | { |
638 | &debug_operand_data, |
639 | 0, /* Duplication operands #. */ |
640 | -1, /* Commutative operand #. */ |
641 | 0, /* Operands #. There isn't any operand. */ |
642 | 0, /* Duplications #. */ |
643 | 0, /* Alternatives #. We are not interesting in alternatives |
644 | because we does not proceed debug_insns for reloads. */ |
645 | NULLnullptr, /* Hard registers referenced in machine description. */ |
646 | NULLnullptr /* Descriptions of operands in alternatives. */ |
647 | }; |
648 | |
649 | /* Called once per compiler work to initialize some LRA data related |
650 | to insns. */ |
651 | static void |
652 | init_insn_code_data_once (void) |
653 | { |
654 | memset (insn_code_data, 0, sizeof (insn_code_data)); |
655 | } |
656 | |
657 | /* Called once per compiler work to finalize some LRA data related to |
658 | insns. */ |
659 | static void |
660 | finish_insn_code_data_once (void) |
661 | { |
662 | for (unsigned int i = 0; i < NUM_INSN_CODES; i++) |
663 | { |
664 | if (insn_code_data[i] != NULLnullptr) |
665 | { |
666 | free (insn_code_data[i]); |
667 | insn_code_data[i] = NULLnullptr; |
668 | } |
669 | } |
670 | } |
671 | |
672 | /* Return static insn data, allocate and setup if necessary. Although |
673 | dup_num is static data (it depends only on icode), to set it up we |
674 | need to extract insn first. So recog_data should be valid for |
675 | normal insn (ICODE >= 0) before the call. */ |
676 | static struct lra_static_insn_data * |
677 | get_static_insn_data (int icode, int nop, int ndup, int nalt) |
678 | { |
679 | struct lra_static_insn_data *data; |
680 | size_t n_bytes; |
681 | |
682 | lra_assert (icode < (int) NUM_INSN_CODES)((void)(!(icode < (int) NUM_INSN_CODES) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 682, __FUNCTION__), 0 : 0)); |
683 | if (icode >= 0 && (data = insn_code_data[icode]) != NULLnullptr) |
684 | return data; |
685 | lra_assert (nop >= 0 && ndup >= 0 && nalt >= 0)((void)(!(nop >= 0 && ndup >= 0 && nalt >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 685, __FUNCTION__), 0 : 0)); |
686 | n_bytes = sizeof (struct lra_static_insn_data) |
687 | + sizeof (struct lra_operand_data) * nop |
688 | + sizeof (int) * ndup; |
689 | data = XNEWVAR (struct lra_static_insn_data, n_bytes)((struct lra_static_insn_data *) xmalloc ((n_bytes))); |
690 | data->operand_alternative = NULLnullptr; |
691 | data->n_operands = nop; |
692 | data->n_dups = ndup; |
693 | data->n_alternatives = nalt; |
694 | data->operand = ((struct lra_operand_data *) |
695 | ((char *) data + sizeof (struct lra_static_insn_data))); |
696 | data->dup_num = ((int *) ((char *) data->operand |
697 | + sizeof (struct lra_operand_data) * nop)); |
698 | if (icode >= 0) |
699 | { |
700 | int i; |
701 | |
702 | insn_code_data[icode] = data; |
703 | for (i = 0; i < nop; i++) |
704 | { |
705 | data->operand[i].constraint |
706 | = insn_data[icode].operand[i].constraint; |
707 | data->operand[i].mode = insn_data[icode].operand[i].mode; |
708 | data->operand[i].strict_low = insn_data[icode].operand[i].strict_low; |
709 | data->operand[i].is_operator |
710 | = insn_data[icode].operand[i].is_operator; |
711 | data->operand[i].type |
712 | = (data->operand[i].constraint[0] == '=' ? OP_OUT |
713 | : data->operand[i].constraint[0] == '+' ? OP_INOUT |
714 | : OP_IN); |
715 | data->operand[i].is_address = false; |
716 | } |
717 | for (i = 0; i < ndup; i++) |
718 | data->dup_num[i] = recog_data.dup_num[i]; |
719 | } |
720 | return data; |
721 | } |
722 | |
723 | /* The current length of the following array. */ |
724 | int lra_insn_recog_data_len; |
725 | |
726 | /* Map INSN_UID -> the insn recog data (NULL if unknown). */ |
727 | lra_insn_recog_data_t *lra_insn_recog_data; |
728 | |
729 | /* Alloc pool we allocate entries for lra_insn_recog_data from. */ |
730 | static object_allocator<class lra_insn_recog_data> |
731 | lra_insn_recog_data_pool ("insn recog data pool"); |
732 | |
733 | /* Initialize LRA data about insns. */ |
734 | static void |
735 | init_insn_recog_data (void) |
736 | { |
737 | lra_insn_recog_data_len = 0; |
738 | lra_insn_recog_data = NULLnullptr; |
739 | } |
740 | |
741 | /* Expand, if necessary, LRA data about insns. */ |
742 | static void |
743 | check_and_expand_insn_recog_data (int index) |
744 | { |
745 | int i, old; |
746 | |
747 | if (lra_insn_recog_data_len > index) |
748 | return; |
749 | old = lra_insn_recog_data_len; |
750 | lra_insn_recog_data_len = index * 3 / 2 + 1; |
751 | lra_insn_recog_data = XRESIZEVEC (lra_insn_recog_data_t,((lra_insn_recog_data_t *) xrealloc ((void *) (lra_insn_recog_data ), sizeof (lra_insn_recog_data_t) * (lra_insn_recog_data_len) )) |
752 | lra_insn_recog_data,((lra_insn_recog_data_t *) xrealloc ((void *) (lra_insn_recog_data ), sizeof (lra_insn_recog_data_t) * (lra_insn_recog_data_len) )) |
753 | lra_insn_recog_data_len)((lra_insn_recog_data_t *) xrealloc ((void *) (lra_insn_recog_data ), sizeof (lra_insn_recog_data_t) * (lra_insn_recog_data_len) )); |
754 | for (i = old; i < lra_insn_recog_data_len; i++) |
755 | lra_insn_recog_data[i] = NULLnullptr; |
756 | } |
757 | |
758 | /* Finish LRA DATA about insn. */ |
759 | static void |
760 | free_insn_recog_data (lra_insn_recog_data_t data) |
761 | { |
762 | if (data->operand_loc != NULLnullptr) |
763 | free (data->operand_loc); |
764 | if (data->dup_loc != NULLnullptr) |
765 | free (data->dup_loc); |
766 | if (data->arg_hard_regs != NULLnullptr) |
767 | free (data->arg_hard_regs); |
768 | if (data->icode < 0 && NONDEBUG_INSN_P (data->insn)((((enum rtx_code) (data->insn)->code) == INSN) || (((enum rtx_code) (data->insn)->code) == JUMP_INSN) || (((enum rtx_code) (data->insn)->code) == CALL_INSN))) |
769 | { |
770 | if (data->insn_static_data->operand_alternative != NULLnullptr) |
771 | free (const_cast <operand_alternative *> |
772 | (data->insn_static_data->operand_alternative)); |
773 | free_insn_regs (data->insn_static_data->hard_regs); |
774 | free (data->insn_static_data); |
775 | } |
776 | free_insn_regs (data->regs); |
777 | data->regs = NULLnullptr; |
778 | lra_insn_recog_data_pool.remove (data); |
779 | } |
780 | |
781 | /* Pools for copies. */ |
782 | static object_allocator<lra_copy> lra_copy_pool ("lra copies"); |
783 | |
784 | /* Finish LRA data about all insns. */ |
785 | static void |
786 | finish_insn_recog_data (void) |
787 | { |
788 | int i; |
789 | lra_insn_recog_data_t data; |
790 | |
791 | for (i = 0; i < lra_insn_recog_data_len; i++) |
792 | if ((data = lra_insn_recog_data[i]) != NULLnullptr) |
793 | free_insn_recog_data (data); |
794 | finish_insn_regs (); |
795 | lra_copy_pool.release (); |
796 | lra_insn_reg_pool.release (); |
797 | lra_insn_recog_data_pool.release (); |
798 | free (lra_insn_recog_data); |
799 | } |
800 | |
801 | /* Setup info about operands in alternatives of LRA DATA of insn. */ |
802 | static void |
803 | setup_operand_alternative (lra_insn_recog_data_t data, |
804 | const operand_alternative *op_alt) |
805 | { |
806 | int i, j, nop, nalt; |
807 | int icode = data->icode; |
808 | struct lra_static_insn_data *static_data = data->insn_static_data; |
809 | |
810 | static_data->commutative = -1; |
811 | nop = static_data->n_operands; |
812 | nalt = static_data->n_alternatives; |
813 | static_data->operand_alternative = op_alt; |
814 | for (i = 0; i < nop; i++) |
815 | { |
816 | static_data->operand[i].early_clobber_alts = 0; |
817 | static_data->operand[i].is_address = false; |
818 | if (static_data->operand[i].constraint[0] == '%') |
819 | { |
820 | /* We currently only support one commutative pair of operands. */ |
821 | if (static_data->commutative < 0) |
822 | static_data->commutative = i; |
823 | else |
824 | lra_assert (icode < 0)((void)(!(icode < 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 824, __FUNCTION__), 0 : 0)); /* Asm */ |
825 | /* The last operand should not be marked commutative. */ |
826 | lra_assert (i != nop - 1)((void)(!(i != nop - 1) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 826, __FUNCTION__), 0 : 0)); |
827 | } |
828 | } |
829 | for (j = 0; j < nalt; j++) |
830 | for (i = 0; i < nop; i++, op_alt++) |
831 | { |
832 | if (op_alt->earlyclobber) |
833 | static_data->operand[i].early_clobber_alts |= (alternative_mask) 1 << j; |
834 | static_data->operand[i].is_address |= op_alt->is_address; |
835 | } |
836 | } |
837 | |
838 | /* Recursively process X and collect info about registers, which are |
839 | not the insn operands, in X with TYPE (in/out/inout) and flag that |
840 | it is early clobbered in the insn (EARLY_CLOBBER) and add the info |
841 | to LIST. X is a part of insn given by DATA. Return the result |
842 | list. */ |
843 | static struct lra_insn_reg * |
844 | collect_non_operand_hard_regs (rtx_insn *insn, rtx *x, |
845 | lra_insn_recog_data_t data, |
846 | struct lra_insn_reg *list, |
847 | enum op_type type, bool early_clobber) |
848 | { |
849 | int i, j, regno, last; |
850 | bool subreg_p; |
851 | machine_mode mode; |
852 | struct lra_insn_reg *curr; |
853 | rtx op = *x; |
854 | enum rtx_code code = GET_CODE (op)((enum rtx_code) (op)->code); |
855 | const char *fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
Value stored to 'fmt' during its initialization is never read | |
856 | |
857 | for (i = 0; i < data->insn_static_data->n_operands; i++) |
858 | if (! data->insn_static_data->operand[i].is_operator |
859 | && x == data->operand_loc[i]) |
860 | /* It is an operand loc. Stop here. */ |
861 | return list; |
862 | for (i = 0; i < data->insn_static_data->n_dups; i++) |
863 | if (x == data->dup_loc[i]) |
864 | /* It is a dup loc. Stop here. */ |
865 | return list; |
866 | mode = GET_MODE (op)((machine_mode) (op)->mode); |
867 | subreg_p = false; |
868 | if (code == SUBREG) |
869 | { |
870 | mode = wider_subreg_mode (op); |
871 | if (read_modify_subreg_p (op)) |
872 | subreg_p = true; |
873 | op = SUBREG_REG (op)(((op)->u.fld[0]).rt_rtx); |
874 | code = GET_CODE (op)((enum rtx_code) (op)->code); |
875 | } |
876 | if (REG_P (op)(((enum rtx_code) (op)->code) == REG)) |
877 | { |
878 | if ((regno = REGNO (op)(rhs_regno(op))) >= FIRST_PSEUDO_REGISTER76) |
879 | return list; |
880 | /* Process all regs even unallocatable ones as we need info |
881 | about all regs for rematerialization pass. */ |
882 | for (last = end_hard_regno (mode, regno); regno < last; regno++) |
883 | { |
884 | for (curr = list; curr != NULLnullptr; curr = curr->next) |
885 | if (curr->regno == regno && curr->subreg_p == subreg_p |
886 | && curr->biggest_mode == mode) |
887 | { |
888 | if (curr->type != type) |
889 | curr->type = OP_INOUT; |
890 | if (early_clobber) |
891 | curr->early_clobber_alts = ALL_ALTERNATIVES((alternative_mask) -1); |
892 | break; |
893 | } |
894 | if (curr == NULLnullptr) |
895 | { |
896 | /* This is a new hard regno or the info cannot be |
897 | integrated into the found structure. */ |
898 | #ifdef STACK_REGS |
899 | early_clobber |
900 | = (early_clobber |
901 | /* This clobber is to inform popping floating |
902 | point stack only. */ |
903 | && ! (FIRST_STACK_REG8 <= regno |
904 | && regno <= LAST_STACK_REG15)); |
905 | #endif |
906 | list = new_insn_reg (data->insn, regno, type, mode, subreg_p, |
907 | early_clobber ? ALL_ALTERNATIVES((alternative_mask) -1) : 0, list); |
908 | } |
909 | } |
910 | return list; |
911 | } |
912 | switch (code) |
913 | { |
914 | case SET: |
915 | list = collect_non_operand_hard_regs (insn, &SET_DEST (op)(((op)->u.fld[0]).rt_rtx), data, |
916 | list, OP_OUT, false); |
917 | list = collect_non_operand_hard_regs (insn, &SET_SRC (op)(((op)->u.fld[1]).rt_rtx), data, |
918 | list, OP_IN, false); |
919 | break; |
920 | case CLOBBER: |
921 | /* We treat clobber of non-operand hard registers as early clobber. */ |
922 | list = collect_non_operand_hard_regs (insn, &XEXP (op, 0)(((op)->u.fld[0]).rt_rtx), data, |
923 | list, OP_OUT, true); |
924 | break; |
925 | case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC: |
926 | list = collect_non_operand_hard_regs (insn, &XEXP (op, 0)(((op)->u.fld[0]).rt_rtx), data, |
927 | list, OP_INOUT, false); |
928 | break; |
929 | case PRE_MODIFY: case POST_MODIFY: |
930 | list = collect_non_operand_hard_regs (insn, &XEXP (op, 0)(((op)->u.fld[0]).rt_rtx), data, |
931 | list, OP_INOUT, false); |
932 | list = collect_non_operand_hard_regs (insn, &XEXP (op, 1)(((op)->u.fld[1]).rt_rtx), data, |
933 | list, OP_IN, false); |
934 | break; |
935 | default: |
936 | fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
937 | for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--) |
938 | { |
939 | if (fmt[i] == 'e') |
940 | list = collect_non_operand_hard_regs (insn, &XEXP (op, i)(((op)->u.fld[i]).rt_rtx), data, |
941 | list, OP_IN, false); |
942 | else if (fmt[i] == 'E') |
943 | for (j = XVECLEN (op, i)(((((op)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--) |
944 | list = collect_non_operand_hard_regs (insn, &XVECEXP (op, i, j)(((((op)->u.fld[i]).rt_rtvec))->elem[j]), |
945 | data, list, OP_IN, false); |
946 | } |
947 | } |
948 | return list; |
949 | } |
950 | |
951 | /* Set up and return info about INSN. Set up the info if it is not set up |
952 | yet. */ |
953 | lra_insn_recog_data_t |
954 | lra_set_insn_recog_data (rtx_insn *insn) |
955 | { |
956 | lra_insn_recog_data_t data; |
957 | int i, n, icode; |
958 | rtx **locs; |
959 | unsigned int uid = INSN_UID (insn); |
960 | struct lra_static_insn_data *insn_static_data; |
961 | |
962 | check_and_expand_insn_recog_data (uid); |
963 | if (DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
964 | icode = -1; |
965 | else |
966 | { |
967 | icode = INSN_CODE (insn)(((insn)->u.fld[5]).rt_int); |
968 | if (icode < 0) |
969 | /* It might be a new simple insn which is not recognized yet. */ |
970 | INSN_CODE (insn)(((insn)->u.fld[5]).rt_int) = icode = recog_memoized (insn); |
971 | } |
972 | data = lra_insn_recog_data_pool.allocate (); |
973 | lra_insn_recog_data[uid] = data; |
974 | data->insn = insn; |
975 | data->used_insn_alternative = LRA_UNKNOWN_ALT-1; |
976 | data->icode = icode; |
977 | data->regs = NULLnullptr; |
978 | if (DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
979 | { |
980 | data->dup_loc = NULLnullptr; |
981 | data->arg_hard_regs = NULLnullptr; |
982 | data->preferred_alternatives = ALL_ALTERNATIVES((alternative_mask) -1); |
983 | if (DEBUG_BIND_INSN_P (insn)((((enum rtx_code) (insn)->code) == DEBUG_INSN) && (((enum rtx_code) (PATTERN (insn))->code) == VAR_LOCATION ))) |
984 | { |
985 | data->insn_static_data = &debug_bind_static_data; |
986 | data->operand_loc = XNEWVEC (rtx *, 1)((rtx * *) xmalloc (sizeof (rtx *) * (1))); |
987 | data->operand_loc[0] = &INSN_VAR_LOCATION_LOC (insn)((((((__extension__ ({ __typeof (PATTERN (insn)) const _rtx = (PATTERN (insn)); if (((enum rtx_code) (_rtx)->code) != VAR_LOCATION ) rtl_check_failed_flag ("INSN_VAR_LOCATION", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 987, __FUNCTION__); _rtx; }))))->u.fld[1]).rt_rtx)); |
988 | } |
989 | else if (DEBUG_MARKER_INSN_P (insn)((((enum rtx_code) (insn)->code) == DEBUG_INSN) && (((enum rtx_code) (PATTERN (insn))->code) != VAR_LOCATION ))) |
990 | { |
991 | data->insn_static_data = &debug_marker_static_data; |
992 | data->operand_loc = NULLnullptr; |
993 | } |
994 | return data; |
995 | } |
996 | if (icode < 0) |
997 | { |
998 | int nop, nalt; |
999 | machine_mode operand_mode[MAX_RECOG_OPERANDS30]; |
1000 | const char *constraints[MAX_RECOG_OPERANDS30]; |
1001 | |
1002 | nop = asm_noperands (PATTERN (insn)); |
1003 | data->operand_loc = data->dup_loc = NULLnullptr; |
1004 | nalt = 1; |
1005 | if (nop < 0) |
1006 | { |
1007 | /* It is a special insn like USE or CLOBBER. We should |
1008 | recognize any regular insn otherwise LRA can do nothing |
1009 | with this insn. */ |
1010 | gcc_assert (GET_CODE (PATTERN (insn)) == USE((void)(!(((enum rtx_code) (PATTERN (insn))->code) == USE || ((enum rtx_code) (PATTERN (insn))->code) == CLOBBER || (( enum rtx_code) (PATTERN (insn))->code) == ASM_INPUT) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1012, __FUNCTION__), 0 : 0)) |
1011 | || GET_CODE (PATTERN (insn)) == CLOBBER((void)(!(((enum rtx_code) (PATTERN (insn))->code) == USE || ((enum rtx_code) (PATTERN (insn))->code) == CLOBBER || (( enum rtx_code) (PATTERN (insn))->code) == ASM_INPUT) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1012, __FUNCTION__), 0 : 0)) |
1012 | || GET_CODE (PATTERN (insn)) == ASM_INPUT)((void)(!(((enum rtx_code) (PATTERN (insn))->code) == USE || ((enum rtx_code) (PATTERN (insn))->code) == CLOBBER || (( enum rtx_code) (PATTERN (insn))->code) == ASM_INPUT) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1012, __FUNCTION__), 0 : 0)); |
1013 | data->insn_static_data = insn_static_data |
1014 | = get_static_insn_data (-1, 0, 0, nalt); |
1015 | } |
1016 | else |
1017 | { |
1018 | /* expand_asm_operands makes sure there aren't too many |
1019 | operands. */ |
1020 | lra_assert (nop <= MAX_RECOG_OPERANDS)((void)(!(nop <= 30) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1020, __FUNCTION__), 0 : 0)); |
1021 | if (nop != 0) |
1022 | data->operand_loc = XNEWVEC (rtx *, nop)((rtx * *) xmalloc (sizeof (rtx *) * (nop))); |
1023 | /* Now get the operand values and constraints out of the |
1024 | insn. */ |
1025 | decode_asm_operands (PATTERN (insn), NULLnullptr, |
1026 | data->operand_loc, |
1027 | constraints, operand_mode, NULLnullptr); |
1028 | if (nop > 0) |
1029 | for (const char *p =constraints[0]; *p; p++) |
1030 | nalt += *p == ','; |
1031 | data->insn_static_data = insn_static_data |
1032 | = get_static_insn_data (-1, nop, 0, nalt); |
1033 | for (i = 0; i < nop; i++) |
1034 | { |
1035 | insn_static_data->operand[i].mode = operand_mode[i]; |
1036 | insn_static_data->operand[i].constraint = constraints[i]; |
1037 | insn_static_data->operand[i].strict_low = false; |
1038 | insn_static_data->operand[i].is_operator = false; |
1039 | insn_static_data->operand[i].is_address = false; |
1040 | } |
1041 | } |
1042 | for (i = 0; i < insn_static_data->n_operands; i++) |
1043 | insn_static_data->operand[i].type |
1044 | = (insn_static_data->operand[i].constraint[0] == '=' ? OP_OUT |
1045 | : insn_static_data->operand[i].constraint[0] == '+' ? OP_INOUT |
1046 | : OP_IN); |
1047 | data->preferred_alternatives = ALL_ALTERNATIVES((alternative_mask) -1); |
1048 | if (nop > 0) |
1049 | { |
1050 | operand_alternative *op_alt = XCNEWVEC (operand_alternative,((operand_alternative *) xcalloc ((nalt * nop), sizeof (operand_alternative ))) |
1051 | nalt * nop)((operand_alternative *) xcalloc ((nalt * nop), sizeof (operand_alternative ))); |
1052 | preprocess_constraints (nop, nalt, constraints, op_alt, |
1053 | data->operand_loc); |
1054 | setup_operand_alternative (data, op_alt); |
1055 | } |
1056 | } |
1057 | else |
1058 | { |
1059 | insn_extract (insn); |
1060 | data->insn_static_data = insn_static_data |
1061 | = get_static_insn_data (icode, insn_data[icode].n_operands, |
1062 | insn_data[icode].n_dups, |
1063 | insn_data[icode].n_alternatives); |
1064 | n = insn_static_data->n_operands; |
1065 | if (n == 0) |
1066 | locs = NULLnullptr; |
1067 | else |
1068 | { |
1069 | locs = XNEWVEC (rtx *, n)((rtx * *) xmalloc (sizeof (rtx *) * (n))); |
1070 | memcpy (locs, recog_data.operand_loc, n * sizeof (rtx *)); |
1071 | } |
1072 | data->operand_loc = locs; |
1073 | n = insn_static_data->n_dups; |
1074 | if (n == 0) |
1075 | locs = NULLnullptr; |
1076 | else |
1077 | { |
1078 | locs = XNEWVEC (rtx *, n)((rtx * *) xmalloc (sizeof (rtx *) * (n))); |
1079 | memcpy (locs, recog_data.dup_loc, n * sizeof (rtx *)); |
1080 | } |
1081 | data->dup_loc = locs; |
1082 | data->preferred_alternatives = get_preferred_alternatives (insn); |
1083 | const operand_alternative *op_alt = preprocess_insn_constraints (icode); |
1084 | if (!insn_static_data->operand_alternative) |
1085 | setup_operand_alternative (data, op_alt); |
1086 | else if (op_alt != insn_static_data->operand_alternative) |
1087 | insn_static_data->operand_alternative = op_alt; |
1088 | } |
1089 | if (GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == CLOBBER || GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == USE) |
1090 | insn_static_data->hard_regs = NULLnullptr; |
1091 | else |
1092 | insn_static_data->hard_regs |
1093 | = collect_non_operand_hard_regs (insn, &PATTERN (insn), data, |
1094 | NULLnullptr, OP_IN, false); |
1095 | data->arg_hard_regs = NULLnullptr; |
1096 | if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)) |
1097 | { |
1098 | bool use_p; |
1099 | rtx link; |
1100 | int n_hard_regs, regno, arg_hard_regs[FIRST_PSEUDO_REGISTER76]; |
1101 | |
1102 | n_hard_regs = 0; |
1103 | /* Finding implicit hard register usage. We believe it will be |
1104 | not changed whatever transformations are used. Call insns |
1105 | are such example. */ |
1106 | for (link = CALL_INSN_FUNCTION_USAGE (insn)(((insn)->u.fld[7]).rt_rtx); |
1107 | link != NULL_RTX(rtx) 0; |
1108 | link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx)) |
1109 | if (((use_p = GET_CODE (XEXP (link, 0))((enum rtx_code) ((((link)->u.fld[0]).rt_rtx))->code) == USE) |
1110 | || GET_CODE (XEXP (link, 0))((enum rtx_code) ((((link)->u.fld[0]).rt_rtx))->code) == CLOBBER) |
1111 | && REG_P (XEXP (XEXP (link, 0), 0))(((enum rtx_code) (((((((link)->u.fld[0]).rt_rtx))->u.fld [0]).rt_rtx))->code) == REG)) |
1112 | { |
1113 | regno = REGNO (XEXP (XEXP (link, 0), 0))(rhs_regno(((((((link)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx ))); |
1114 | lra_assert (regno < FIRST_PSEUDO_REGISTER)((void)(!(regno < 76) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1114, __FUNCTION__), 0 : 0)); |
1115 | /* It is an argument register. */ |
1116 | for (i = REG_NREGS (XEXP (XEXP (link, 0), 0))((&(((((((link)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx ))->u.reg)->nregs) - 1; i >= 0; i--) |
1117 | arg_hard_regs[n_hard_regs++] |
1118 | = regno + i + (use_p ? 0 : FIRST_PSEUDO_REGISTER76); |
1119 | } |
1120 | |
1121 | if (n_hard_regs != 0) |
1122 | { |
1123 | arg_hard_regs[n_hard_regs++] = -1; |
1124 | data->arg_hard_regs = XNEWVEC (int, n_hard_regs)((int *) xmalloc (sizeof (int) * (n_hard_regs))); |
1125 | memcpy (data->arg_hard_regs, arg_hard_regs, |
1126 | sizeof (int) * n_hard_regs); |
1127 | } |
1128 | } |
1129 | /* Some output operand can be recognized only from the context not |
1130 | from the constraints which are empty in this case. Call insn may |
1131 | contain a hard register in set destination with empty constraint |
1132 | and extract_insn treats them as an input. */ |
1133 | for (i = 0; i < insn_static_data->n_operands; i++) |
1134 | { |
1135 | int j; |
1136 | rtx pat, set; |
1137 | struct lra_operand_data *operand = &insn_static_data->operand[i]; |
1138 | |
1139 | /* ??? Should we treat 'X' the same way. It looks to me that |
1140 | 'X' means anything and empty constraint means we do not |
1141 | care. */ |
1142 | if (operand->type != OP_IN || *operand->constraint != '\0' |
1143 | || operand->is_operator) |
1144 | continue; |
1145 | pat = PATTERN (insn); |
1146 | if (GET_CODE (pat)((enum rtx_code) (pat)->code) == SET) |
1147 | { |
1148 | if (data->operand_loc[i] != &SET_DEST (pat)(((pat)->u.fld[0]).rt_rtx)) |
1149 | continue; |
1150 | } |
1151 | else if (GET_CODE (pat)((enum rtx_code) (pat)->code) == PARALLEL) |
1152 | { |
1153 | for (j = XVECLEN (pat, 0)(((((pat)->u.fld[0]).rt_rtvec))->num_elem) - 1; j >= 0; j--) |
1154 | { |
1155 | set = XVECEXP (PATTERN (insn), 0, j)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->elem[j]); |
1156 | if (GET_CODE (set)((enum rtx_code) (set)->code) == SET |
1157 | && &SET_DEST (set)(((set)->u.fld[0]).rt_rtx) == data->operand_loc[i]) |
1158 | break; |
1159 | } |
1160 | if (j < 0) |
1161 | continue; |
1162 | } |
1163 | else |
1164 | continue; |
1165 | operand->type = OP_OUT; |
1166 | } |
1167 | return data; |
1168 | } |
1169 | |
1170 | /* Return info about insn give by UID. The info should be already set |
1171 | up. */ |
1172 | static lra_insn_recog_data_t |
1173 | get_insn_recog_data_by_uid (int uid) |
1174 | { |
1175 | lra_insn_recog_data_t data; |
1176 | |
1177 | data = lra_insn_recog_data[uid]; |
1178 | lra_assert (data != NULL)((void)(!(data != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1178, __FUNCTION__), 0 : 0)); |
1179 | return data; |
1180 | } |
1181 | |
1182 | /* Invalidate all info about insn given by its UID. */ |
1183 | static void |
1184 | invalidate_insn_recog_data (int uid) |
1185 | { |
1186 | lra_insn_recog_data_t data; |
1187 | |
1188 | data = lra_insn_recog_data[uid]; |
1189 | lra_assert (data != NULL)((void)(!(data != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1189, __FUNCTION__), 0 : 0)); |
1190 | free_insn_recog_data (data); |
1191 | lra_insn_recog_data[uid] = NULLnullptr; |
1192 | } |
1193 | |
1194 | /* Update all the insn info about INSN. It is usually called when |
1195 | something in the insn was changed. Return the updated info. */ |
1196 | lra_insn_recog_data_t |
1197 | lra_update_insn_recog_data (rtx_insn *insn) |
1198 | { |
1199 | lra_insn_recog_data_t data; |
1200 | int n; |
1201 | unsigned int uid = INSN_UID (insn); |
1202 | struct lra_static_insn_data *insn_static_data; |
1203 | poly_int64 sp_offset = 0; |
1204 | |
1205 | check_and_expand_insn_recog_data (uid); |
1206 | if ((data = lra_insn_recog_data[uid]) != NULLnullptr |
1207 | && data->icode != INSN_CODE (insn)(((insn)->u.fld[5]).rt_int)) |
1208 | { |
1209 | sp_offset = data->sp_offset; |
1210 | invalidate_insn_data_regno_info (data, insn, get_insn_freq (insn)); |
1211 | invalidate_insn_recog_data (uid); |
1212 | data = NULLnullptr; |
1213 | } |
1214 | if (data == NULLnullptr) |
1215 | { |
1216 | data = lra_get_insn_recog_data (insn); |
1217 | /* Initiate or restore SP offset. */ |
1218 | data->sp_offset = sp_offset; |
1219 | return data; |
1220 | } |
1221 | insn_static_data = data->insn_static_data; |
1222 | data->used_insn_alternative = LRA_UNKNOWN_ALT-1; |
1223 | if (DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)) |
1224 | return data; |
1225 | if (data->icode < 0) |
1226 | { |
1227 | int nop; |
1228 | machine_mode operand_mode[MAX_RECOG_OPERANDS30]; |
1229 | const char *constraints[MAX_RECOG_OPERANDS30]; |
1230 | |
1231 | nop = asm_noperands (PATTERN (insn)); |
1232 | if (nop >= 0) |
1233 | { |
1234 | lra_assert (nop == data->insn_static_data->n_operands)((void)(!(nop == data->insn_static_data->n_operands) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1234, __FUNCTION__), 0 : 0)); |
1235 | /* Now get the operand values and constraints out of the |
1236 | insn. */ |
1237 | decode_asm_operands (PATTERN (insn), NULLnullptr, |
1238 | data->operand_loc, |
1239 | constraints, operand_mode, NULLnullptr); |
1240 | |
1241 | if (flag_checkingglobal_options.x_flag_checking) |
1242 | for (int i = 0; i < nop; i++) |
1243 | lra_assert((void)(!(insn_static_data->operand[i].mode == operand_mode [i] && insn_static_data->operand[i].constraint == constraints [i] && ! insn_static_data->operand[i].is_operator) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1246, __FUNCTION__), 0 : 0)) |
1244 | (insn_static_data->operand[i].mode == operand_mode[i]((void)(!(insn_static_data->operand[i].mode == operand_mode [i] && insn_static_data->operand[i].constraint == constraints [i] && ! insn_static_data->operand[i].is_operator) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1246, __FUNCTION__), 0 : 0)) |
1245 | && insn_static_data->operand[i].constraint == constraints[i]((void)(!(insn_static_data->operand[i].mode == operand_mode [i] && insn_static_data->operand[i].constraint == constraints [i] && ! insn_static_data->operand[i].is_operator) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1246, __FUNCTION__), 0 : 0)) |
1246 | && ! insn_static_data->operand[i].is_operator)((void)(!(insn_static_data->operand[i].mode == operand_mode [i] && insn_static_data->operand[i].constraint == constraints [i] && ! insn_static_data->operand[i].is_operator) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1246, __FUNCTION__), 0 : 0)); |
1247 | } |
1248 | |
1249 | if (flag_checkingglobal_options.x_flag_checking) |
1250 | for (int i = 0; i < insn_static_data->n_operands; i++) |
1251 | lra_assert((void)(!(insn_static_data->operand[i].type == (insn_static_data ->operand[i].constraint[0] == '=' ? OP_OUT : insn_static_data ->operand[i].constraint[0] == '+' ? OP_INOUT : OP_IN)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1255, __FUNCTION__), 0 : 0)) |
1252 | (insn_static_data->operand[i].type((void)(!(insn_static_data->operand[i].type == (insn_static_data ->operand[i].constraint[0] == '=' ? OP_OUT : insn_static_data ->operand[i].constraint[0] == '+' ? OP_INOUT : OP_IN)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1255, __FUNCTION__), 0 : 0)) |
1253 | == (insn_static_data->operand[i].constraint[0] == '=' ? OP_OUT((void)(!(insn_static_data->operand[i].type == (insn_static_data ->operand[i].constraint[0] == '=' ? OP_OUT : insn_static_data ->operand[i].constraint[0] == '+' ? OP_INOUT : OP_IN)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1255, __FUNCTION__), 0 : 0)) |
1254 | : insn_static_data->operand[i].constraint[0] == '+' ? OP_INOUT((void)(!(insn_static_data->operand[i].type == (insn_static_data ->operand[i].constraint[0] == '=' ? OP_OUT : insn_static_data ->operand[i].constraint[0] == '+' ? OP_INOUT : OP_IN)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1255, __FUNCTION__), 0 : 0)) |
1255 | : OP_IN))((void)(!(insn_static_data->operand[i].type == (insn_static_data ->operand[i].constraint[0] == '=' ? OP_OUT : insn_static_data ->operand[i].constraint[0] == '+' ? OP_INOUT : OP_IN)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1255, __FUNCTION__), 0 : 0)); |
1256 | } |
1257 | else |
1258 | { |
1259 | insn_extract (insn); |
1260 | n = insn_static_data->n_operands; |
1261 | if (n != 0) |
1262 | memcpy (data->operand_loc, recog_data.operand_loc, n * sizeof (rtx *)); |
1263 | n = insn_static_data->n_dups; |
1264 | if (n != 0) |
1265 | memcpy (data->dup_loc, recog_data.dup_loc, n * sizeof (rtx *)); |
1266 | lra_assert (check_bool_attrs (insn))((void)(!(check_bool_attrs (insn)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1266, __FUNCTION__), 0 : 0)); |
1267 | } |
1268 | return data; |
1269 | } |
1270 | |
1271 | /* Set up that INSN is using alternative ALT now. */ |
1272 | void |
1273 | lra_set_used_insn_alternative (rtx_insn *insn, int alt) |
1274 | { |
1275 | lra_insn_recog_data_t data; |
1276 | |
1277 | data = lra_get_insn_recog_data (insn); |
1278 | data->used_insn_alternative = alt; |
1279 | } |
1280 | |
1281 | /* Set up that insn with UID is using alternative ALT now. The insn |
1282 | info should be already set up. */ |
1283 | void |
1284 | lra_set_used_insn_alternative_by_uid (int uid, int alt) |
1285 | { |
1286 | lra_insn_recog_data_t data; |
1287 | |
1288 | check_and_expand_insn_recog_data (uid); |
1289 | data = lra_insn_recog_data[uid]; |
1290 | lra_assert (data != NULL)((void)(!(data != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1290, __FUNCTION__), 0 : 0)); |
1291 | data->used_insn_alternative = alt; |
1292 | } |
1293 | |
1294 | |
1295 | |
1296 | /* This page contains code dealing with common register info and |
1297 | pseudo copies. */ |
1298 | |
1299 | /* The size of the following array. */ |
1300 | static int reg_info_size; |
1301 | /* Common info about each register. */ |
1302 | class lra_reg *lra_reg_info; |
1303 | |
1304 | HARD_REG_SET hard_regs_spilled_into; |
1305 | |
1306 | /* Last register value. */ |
1307 | static int last_reg_value; |
1308 | |
1309 | /* Return new register value. */ |
1310 | static int |
1311 | get_new_reg_value (void) |
1312 | { |
1313 | return ++last_reg_value; |
1314 | } |
1315 | |
1316 | /* Vec referring to pseudo copies. */ |
1317 | static vec<lra_copy_t> copy_vec; |
1318 | |
1319 | /* Initialize I-th element of lra_reg_info. */ |
1320 | static inline void |
1321 | initialize_lra_reg_info_element (int i) |
1322 | { |
1323 | bitmap_initialize (&lra_reg_info[i].insn_bitmap, ®_obstack); |
1324 | #ifdef STACK_REGS |
1325 | lra_reg_info[i].no_stack_p = false; |
1326 | #endif |
1327 | CLEAR_HARD_REG_SET (lra_reg_info[i].conflict_hard_regs); |
1328 | CLEAR_HARD_REG_SET (lra_reg_info[i].exclude_start_hard_regs); |
1329 | lra_reg_info[i].preferred_hard_regno1 = -1; |
1330 | lra_reg_info[i].preferred_hard_regno2 = -1; |
1331 | lra_reg_info[i].preferred_hard_regno_profit1 = 0; |
1332 | lra_reg_info[i].preferred_hard_regno_profit2 = 0; |
1333 | lra_reg_info[i].biggest_mode = VOIDmode((void) 0, E_VOIDmode); |
1334 | lra_reg_info[i].live_ranges = NULLnullptr; |
1335 | lra_reg_info[i].nrefs = lra_reg_info[i].freq = 0; |
1336 | lra_reg_info[i].last_reload = 0; |
1337 | lra_reg_info[i].restore_rtx = NULL_RTX(rtx) 0; |
1338 | lra_reg_info[i].val = get_new_reg_value (); |
1339 | lra_reg_info[i].offset = 0; |
1340 | lra_reg_info[i].copies = NULLnullptr; |
1341 | } |
1342 | |
1343 | /* Initialize common reg info and copies. */ |
1344 | static void |
1345 | init_reg_info (void) |
1346 | { |
1347 | int i; |
1348 | |
1349 | last_reg_value = 0; |
1350 | reg_info_size = max_reg_num () * 3 / 2 + 1; |
1351 | lra_reg_info = XNEWVEC (class lra_reg, reg_info_size)((class lra_reg *) xmalloc (sizeof (class lra_reg) * (reg_info_size ))); |
1352 | for (i = 0; i < reg_info_size; i++) |
1353 | initialize_lra_reg_info_element (i); |
1354 | copy_vec.truncate (0); |
1355 | CLEAR_HARD_REG_SET (hard_regs_spilled_into); |
1356 | } |
1357 | |
1358 | |
1359 | /* Finish common reg info and copies. */ |
1360 | static void |
1361 | finish_reg_info (void) |
1362 | { |
1363 | int i; |
1364 | |
1365 | for (i = 0; i < reg_info_size; i++) |
1366 | bitmap_clear (&lra_reg_info[i].insn_bitmap); |
1367 | free (lra_reg_info); |
1368 | reg_info_size = 0; |
1369 | } |
1370 | |
1371 | /* Expand common reg info if it is necessary. */ |
1372 | static void |
1373 | expand_reg_info (void) |
1374 | { |
1375 | int i, old = reg_info_size; |
1376 | |
1377 | if (reg_info_size > max_reg_num ()) |
1378 | return; |
1379 | reg_info_size = max_reg_num () * 3 / 2 + 1; |
1380 | lra_reg_info = XRESIZEVEC (class lra_reg, lra_reg_info, reg_info_size)((class lra_reg *) xrealloc ((void *) (lra_reg_info), sizeof ( class lra_reg) * (reg_info_size))); |
1381 | for (i = old; i < reg_info_size; i++) |
1382 | initialize_lra_reg_info_element (i); |
1383 | } |
1384 | |
1385 | /* Free all copies. */ |
1386 | void |
1387 | lra_free_copies (void) |
1388 | { |
1389 | lra_copy_t cp; |
1390 | |
1391 | while (copy_vec.length () != 0) |
1392 | { |
1393 | cp = copy_vec.pop (); |
1394 | lra_reg_info[cp->regno1].copies = lra_reg_info[cp->regno2].copies = NULLnullptr; |
1395 | lra_copy_pool.remove (cp); |
1396 | } |
1397 | } |
1398 | |
1399 | /* Create copy of two pseudos REGNO1 and REGNO2. The copy execution |
1400 | frequency is FREQ. */ |
1401 | void |
1402 | lra_create_copy (int regno1, int regno2, int freq) |
1403 | { |
1404 | bool regno1_dest_p; |
1405 | lra_copy_t cp; |
1406 | |
1407 | lra_assert (regno1 != regno2)((void)(!(regno1 != regno2) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1407, __FUNCTION__), 0 : 0)); |
1408 | regno1_dest_p = true; |
1409 | if (regno1 > regno2) |
1410 | { |
1411 | std::swap (regno1, regno2); |
1412 | regno1_dest_p = false; |
1413 | } |
1414 | cp = lra_copy_pool.allocate (); |
1415 | copy_vec.safe_push (cp); |
1416 | cp->regno1_dest_p = regno1_dest_p; |
1417 | cp->freq = freq; |
1418 | cp->regno1 = regno1; |
1419 | cp->regno2 = regno2; |
1420 | cp->regno1_next = lra_reg_info[regno1].copies; |
1421 | lra_reg_info[regno1].copies = cp; |
1422 | cp->regno2_next = lra_reg_info[regno2].copies; |
1423 | lra_reg_info[regno2].copies = cp; |
1424 | if (lra_dump_file != NULLnullptr) |
1425 | fprintf (lra_dump_file, " Creating copy r%d%sr%d@%d\n", |
1426 | regno1, regno1_dest_p ? "<-" : "->", regno2, freq); |
1427 | } |
1428 | |
1429 | /* Return N-th (0, 1, ...) copy. If there is no copy, return |
1430 | NULL. */ |
1431 | lra_copy_t |
1432 | lra_get_copy (int n) |
1433 | { |
1434 | if (n >= (int) copy_vec.length ()) |
1435 | return NULLnullptr; |
1436 | return copy_vec[n]; |
1437 | } |
1438 | |
1439 | |
1440 | |
1441 | /* This page contains code dealing with info about registers in |
1442 | insns. */ |
1443 | |
1444 | /* Process X of INSN recursively and add info (operand type is given |
1445 | by TYPE) about registers in X to the insn DATA. If X can be early |
1446 | clobbered, alternatives in which it can be early clobbered are given |
1447 | by EARLY_CLOBBER_ALTS. */ |
1448 | static void |
1449 | add_regs_to_insn_regno_info (lra_insn_recog_data_t data, rtx x, |
1450 | rtx_insn *insn, enum op_type type, |
1451 | alternative_mask early_clobber_alts) |
1452 | { |
1453 | int i, j, regno; |
1454 | bool subreg_p; |
1455 | machine_mode mode; |
1456 | const char *fmt; |
1457 | enum rtx_code code; |
1458 | struct lra_insn_reg *curr; |
1459 | |
1460 | code = GET_CODE (x)((enum rtx_code) (x)->code); |
1461 | mode = GET_MODE (x)((machine_mode) (x)->mode); |
1462 | subreg_p = false; |
1463 | if (GET_CODE (x)((enum rtx_code) (x)->code) == SUBREG) |
1464 | { |
1465 | mode = wider_subreg_mode (x); |
1466 | if (read_modify_subreg_p (x)) |
1467 | subreg_p = true; |
1468 | x = SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx); |
1469 | code = GET_CODE (x)((enum rtx_code) (x)->code); |
1470 | } |
1471 | if (REG_P (x)(((enum rtx_code) (x)->code) == REG)) |
1472 | { |
1473 | regno = REGNO (x)(rhs_regno(x)); |
1474 | /* Process all regs even unallocatable ones as we need info about |
1475 | all regs for rematerialization pass. */ |
1476 | expand_reg_info (); |
1477 | if (bitmap_set_bit (&lra_reg_info[regno].insn_bitmap, INSN_UID (insn))) |
1478 | { |
1479 | data->regs = new_insn_reg (data->insn, regno, type, mode, subreg_p, |
1480 | early_clobber_alts, data->regs); |
1481 | return; |
1482 | } |
1483 | else |
1484 | { |
1485 | for (curr = data->regs; curr != NULLnullptr; curr = curr->next) |
1486 | if (curr->regno == regno) |
1487 | { |
1488 | if (curr->subreg_p != subreg_p || curr->biggest_mode != mode) |
1489 | /* The info cannot be integrated into the found |
1490 | structure. */ |
1491 | data->regs = new_insn_reg (data->insn, regno, type, mode, |
1492 | subreg_p, early_clobber_alts, |
1493 | data->regs); |
1494 | else |
1495 | { |
1496 | if (curr->type != type) |
1497 | curr->type = OP_INOUT; |
1498 | curr->early_clobber_alts |= early_clobber_alts; |
1499 | } |
1500 | return; |
1501 | } |
1502 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1502, __FUNCTION__)); |
1503 | } |
1504 | } |
1505 | |
1506 | switch (code) |
1507 | { |
1508 | case SET: |
1509 | add_regs_to_insn_regno_info (data, SET_DEST (x)(((x)->u.fld[0]).rt_rtx), insn, OP_OUT, 0); |
1510 | add_regs_to_insn_regno_info (data, SET_SRC (x)(((x)->u.fld[1]).rt_rtx), insn, OP_IN, 0); |
1511 | break; |
1512 | case CLOBBER: |
1513 | /* We treat clobber of non-operand hard registers as early |
1514 | clobber. */ |
1515 | add_regs_to_insn_regno_info (data, XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), insn, OP_OUT, |
1516 | ALL_ALTERNATIVES((alternative_mask) -1)); |
1517 | break; |
1518 | case PRE_INC: case PRE_DEC: case POST_INC: case POST_DEC: |
1519 | add_regs_to_insn_regno_info (data, XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), insn, OP_INOUT, 0); |
1520 | break; |
1521 | case PRE_MODIFY: case POST_MODIFY: |
1522 | add_regs_to_insn_regno_info (data, XEXP (x, 0)(((x)->u.fld[0]).rt_rtx), insn, OP_INOUT, 0); |
1523 | add_regs_to_insn_regno_info (data, XEXP (x, 1)(((x)->u.fld[1]).rt_rtx), insn, OP_IN, 0); |
1524 | break; |
1525 | default: |
1526 | if ((code != PARALLEL && code != EXPR_LIST) || type != OP_OUT) |
1527 | /* Some targets place small structures in registers for return |
1528 | values of functions, and those registers are wrapped in |
1529 | PARALLEL that we may see as the destination of a SET. Here |
1530 | is an example: |
1531 | |
1532 | (call_insn 13 12 14 2 (set (parallel:BLK [ |
1533 | (expr_list:REG_DEP_TRUE (reg:DI 0 ax) |
1534 | (const_int 0 [0])) |
1535 | (expr_list:REG_DEP_TRUE (reg:DI 1 dx) |
1536 | (const_int 8 [0x8])) |
1537 | ]) |
1538 | (call (mem:QI (symbol_ref:DI (... */ |
1539 | type = OP_IN; |
1540 | fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
1541 | for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--) |
1542 | { |
1543 | if (fmt[i] == 'e') |
1544 | add_regs_to_insn_regno_info (data, XEXP (x, i)(((x)->u.fld[i]).rt_rtx), insn, type, 0); |
1545 | else if (fmt[i] == 'E') |
1546 | { |
1547 | for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--) |
1548 | add_regs_to_insn_regno_info (data, XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), insn, |
1549 | type, 0); |
1550 | } |
1551 | } |
1552 | } |
1553 | } |
1554 | |
1555 | /* Return execution frequency of INSN. */ |
1556 | static int |
1557 | get_insn_freq (rtx_insn *insn) |
1558 | { |
1559 | basic_block bb = BLOCK_FOR_INSN (insn); |
1560 | |
1561 | gcc_checking_assert (bb != NULL)((void)(!(bb != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1561, __FUNCTION__), 0 : 0)); |
1562 | return REG_FREQ_FROM_BB (bb)((optimize_function_for_size_p ((cfun + 0)) || !(cfun + 0)-> cfg->count_max.initialized_p ()) ? 1000 : ((bb)->count. to_frequency ((cfun + 0)) * 1000 / 10000) ? ((bb)->count.to_frequency ((cfun + 0)) * 1000 / 10000) : 1); |
1563 | } |
1564 | |
1565 | /* Invalidate all reg info of INSN with DATA and execution frequency |
1566 | FREQ. Update common info about the invalidated registers. */ |
1567 | static void |
1568 | invalidate_insn_data_regno_info (lra_insn_recog_data_t data, rtx_insn *insn, |
1569 | int freq) |
1570 | { |
1571 | int uid; |
1572 | bool debug_p; |
1573 | unsigned int i; |
1574 | struct lra_insn_reg *ir, *next_ir; |
1575 | |
1576 | uid = INSN_UID (insn); |
1577 | debug_p = DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN); |
1578 | for (ir = data->regs; ir != NULLnullptr; ir = next_ir) |
1579 | { |
1580 | i = ir->regno; |
1581 | next_ir = ir->next; |
1582 | lra_insn_reg_pool.remove (ir); |
1583 | bitmap_clear_bit (&lra_reg_info[i].insn_bitmap, uid); |
1584 | if (i >= FIRST_PSEUDO_REGISTER76 && ! debug_p) |
1585 | { |
1586 | lra_reg_info[i].nrefs--; |
1587 | lra_reg_info[i].freq -= freq; |
1588 | lra_assert (lra_reg_info[i].nrefs >= 0 && lra_reg_info[i].freq >= 0)((void)(!(lra_reg_info[i].nrefs >= 0 && lra_reg_info [i].freq >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1588, __FUNCTION__), 0 : 0)); |
1589 | } |
1590 | } |
1591 | data->regs = NULLnullptr; |
1592 | } |
1593 | |
1594 | /* Invalidate all reg info of INSN. Update common info about the |
1595 | invalidated registers. */ |
1596 | void |
1597 | lra_invalidate_insn_regno_info (rtx_insn *insn) |
1598 | { |
1599 | invalidate_insn_data_regno_info (lra_get_insn_recog_data (insn), insn, |
1600 | get_insn_freq (insn)); |
1601 | } |
1602 | |
1603 | /* Update common reg info from reg info of insn given by its DATA and |
1604 | execution frequency FREQ. */ |
1605 | static void |
1606 | setup_insn_reg_info (lra_insn_recog_data_t data, int freq) |
1607 | { |
1608 | unsigned int i; |
1609 | struct lra_insn_reg *ir; |
1610 | |
1611 | for (ir = data->regs; ir != NULLnullptr; ir = ir->next) |
1612 | if ((i = ir->regno) >= FIRST_PSEUDO_REGISTER76) |
1613 | { |
1614 | lra_reg_info[i].nrefs++; |
1615 | lra_reg_info[i].freq += freq; |
1616 | } |
1617 | } |
1618 | |
1619 | /* Set up insn reg info of INSN. Update common reg info from reg info |
1620 | of INSN. */ |
1621 | void |
1622 | lra_update_insn_regno_info (rtx_insn *insn) |
1623 | { |
1624 | int i, freq; |
1625 | lra_insn_recog_data_t data; |
1626 | struct lra_static_insn_data *static_data; |
1627 | enum rtx_code code; |
1628 | rtx link; |
1629 | |
1630 | if (! INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
1631 | return; |
1632 | data = lra_get_insn_recog_data (insn); |
1633 | static_data = data->insn_static_data; |
1634 | freq = NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) ? get_insn_freq (insn) : 0; |
1635 | invalidate_insn_data_regno_info (data, insn, freq); |
1636 | for (i = static_data->n_operands - 1; i >= 0; i--) |
1637 | add_regs_to_insn_regno_info (data, *data->operand_loc[i], insn, |
1638 | static_data->operand[i].type, |
1639 | static_data->operand[i].early_clobber_alts); |
1640 | if ((code = GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code)) == CLOBBER || code == USE) |
1641 | add_regs_to_insn_regno_info (data, XEXP (PATTERN (insn), 0)(((PATTERN (insn))->u.fld[0]).rt_rtx), insn, |
1642 | code == USE ? OP_IN : OP_OUT, 0); |
1643 | if (CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN)) |
1644 | /* On some targets call insns can refer to pseudos in memory in |
1645 | CALL_INSN_FUNCTION_USAGE list. Process them in order to |
1646 | consider their occurrences in calls for different |
1647 | transformations (e.g. inheritance) with given pseudos. */ |
1648 | for (link = CALL_INSN_FUNCTION_USAGE (insn)(((insn)->u.fld[7]).rt_rtx); |
1649 | link != NULL_RTX(rtx) 0; |
1650 | link = XEXP (link, 1)(((link)->u.fld[1]).rt_rtx)) |
1651 | { |
1652 | code = GET_CODE (XEXP (link, 0))((enum rtx_code) ((((link)->u.fld[0]).rt_rtx))->code); |
1653 | if ((code == USE || code == CLOBBER) |
1654 | && MEM_P (XEXP (XEXP (link, 0), 0))(((enum rtx_code) (((((((link)->u.fld[0]).rt_rtx))->u.fld [0]).rt_rtx))->code) == MEM)) |
1655 | add_regs_to_insn_regno_info (data, XEXP (XEXP (link, 0), 0)((((((link)->u.fld[0]).rt_rtx))->u.fld[0]).rt_rtx), insn, |
1656 | code == USE ? OP_IN : OP_OUT, 0); |
1657 | } |
1658 | if (NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN))) |
1659 | setup_insn_reg_info (data, freq); |
1660 | } |
1661 | |
1662 | /* Return reg info of insn given by it UID. */ |
1663 | struct lra_insn_reg * |
1664 | lra_get_insn_regs (int uid) |
1665 | { |
1666 | lra_insn_recog_data_t data; |
1667 | |
1668 | data = get_insn_recog_data_by_uid (uid); |
1669 | return data->regs; |
1670 | } |
1671 | |
1672 | |
1673 | |
1674 | /* Recursive hash function for RTL X. */ |
1675 | hashval_t |
1676 | lra_rtx_hash (rtx x) |
1677 | { |
1678 | int i, j; |
1679 | enum rtx_code code; |
1680 | const char *fmt; |
1681 | hashval_t val = 0; |
1682 | |
1683 | if (x == 0) |
1684 | return val; |
1685 | |
1686 | code = GET_CODE (x)((enum rtx_code) (x)->code); |
1687 | val += (int) code + 4095; |
1688 | |
1689 | /* Some RTL can be compared nonrecursively. */ |
1690 | switch (code) |
1691 | { |
1692 | case REG: |
1693 | return val + REGNO (x)(rhs_regno(x)); |
1694 | |
1695 | case LABEL_REF: |
1696 | return iterative_hash_object (XEXP (x, 0), val)iterative_hash (&(((x)->u.fld[0]).rt_rtx), sizeof (((( x)->u.fld[0]).rt_rtx)), val); |
1697 | |
1698 | case SYMBOL_REF: |
1699 | return iterative_hash_object (XSTR (x, 0), val)iterative_hash (&(((x)->u.fld[0]).rt_str), sizeof (((( x)->u.fld[0]).rt_str)), val); |
1700 | |
1701 | case SCRATCH: |
1702 | case CONST_DOUBLE: |
1703 | case CONST_VECTOR: |
1704 | return val; |
1705 | |
1706 | case CONST_INT: |
1707 | return val + UINTVAL (x)((unsigned long) ((x)->u.hwint[0])); |
1708 | |
1709 | default: |
1710 | break; |
1711 | } |
1712 | |
1713 | /* Hash the elements. */ |
1714 | fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
1715 | for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--) |
1716 | { |
1717 | switch (fmt[i]) |
1718 | { |
1719 | case 'w': |
1720 | val += XWINT (x, i)((x)->u.hwint[i]); |
1721 | break; |
1722 | |
1723 | case 'n': |
1724 | case 'i': |
1725 | val += XINT (x, i)(((x)->u.fld[i]).rt_int); |
1726 | break; |
1727 | |
1728 | case 'V': |
1729 | case 'E': |
1730 | val += XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); |
1731 | |
1732 | for (j = 0; j < XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem); j++) |
1733 | val += lra_rtx_hash (XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j])); |
1734 | break; |
1735 | |
1736 | case 'e': |
1737 | val += lra_rtx_hash (XEXP (x, i)(((x)->u.fld[i]).rt_rtx)); |
1738 | break; |
1739 | |
1740 | case 'S': |
1741 | case 's': |
1742 | val += htab_hash_string (XSTR (x, i)(((x)->u.fld[i]).rt_str)); |
1743 | break; |
1744 | |
1745 | case 'u': |
1746 | case '0': |
1747 | case 't': |
1748 | break; |
1749 | |
1750 | /* It is believed that rtx's at this level will never |
1751 | contain anything but integers and other rtx's, except for |
1752 | within LABEL_REFs and SYMBOL_REFs. */ |
1753 | default: |
1754 | abort ()fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1754, __FUNCTION__); |
1755 | } |
1756 | } |
1757 | return val; |
1758 | } |
1759 | |
1760 | |
1761 | |
1762 | /* This page contains code dealing with stack of the insns which |
1763 | should be processed by the next constraint pass. */ |
1764 | |
1765 | /* Bitmap used to put an insn on the stack only in one exemplar. */ |
1766 | static sbitmap lra_constraint_insn_stack_bitmap; |
1767 | |
1768 | /* The stack itself. */ |
1769 | vec<rtx_insn *> lra_constraint_insn_stack; |
1770 | |
1771 | /* Put INSN on the stack. If ALWAYS_UPDATE is true, always update the reg |
1772 | info for INSN, otherwise only update it if INSN is not already on the |
1773 | stack. */ |
1774 | static inline void |
1775 | lra_push_insn_1 (rtx_insn *insn, bool always_update) |
1776 | { |
1777 | unsigned int uid = INSN_UID (insn); |
1778 | if (always_update) |
1779 | lra_update_insn_regno_info (insn); |
1780 | if (uid >= SBITMAP_SIZE (lra_constraint_insn_stack_bitmap)((lra_constraint_insn_stack_bitmap)->n_bits)) |
1781 | lra_constraint_insn_stack_bitmap = |
1782 | sbitmap_resize (lra_constraint_insn_stack_bitmap, 3 * uid / 2, 0); |
1783 | if (bitmap_bit_p (lra_constraint_insn_stack_bitmap, uid)) |
1784 | return; |
1785 | bitmap_set_bit (lra_constraint_insn_stack_bitmap, uid); |
1786 | if (! always_update) |
1787 | lra_update_insn_regno_info (insn); |
1788 | lra_constraint_insn_stack.safe_push (insn); |
1789 | } |
1790 | |
1791 | /* Put INSN on the stack. */ |
1792 | void |
1793 | lra_push_insn (rtx_insn *insn) |
1794 | { |
1795 | lra_push_insn_1 (insn, false); |
1796 | } |
1797 | |
1798 | /* Put INSN on the stack and update its reg info. */ |
1799 | void |
1800 | lra_push_insn_and_update_insn_regno_info (rtx_insn *insn) |
1801 | { |
1802 | lra_push_insn_1 (insn, true); |
1803 | } |
1804 | |
1805 | /* Put insn with UID on the stack. */ |
1806 | void |
1807 | lra_push_insn_by_uid (unsigned int uid) |
1808 | { |
1809 | lra_push_insn (lra_insn_recog_data[uid]->insn); |
1810 | } |
1811 | |
1812 | /* Take the last-inserted insns off the stack and return it. */ |
1813 | rtx_insn * |
1814 | lra_pop_insn (void) |
1815 | { |
1816 | rtx_insn *insn = lra_constraint_insn_stack.pop (); |
1817 | bitmap_clear_bit (lra_constraint_insn_stack_bitmap, INSN_UID (insn)); |
1818 | return insn; |
1819 | } |
1820 | |
1821 | /* Return the current size of the insn stack. */ |
1822 | unsigned int |
1823 | lra_insn_stack_length (void) |
1824 | { |
1825 | return lra_constraint_insn_stack.length (); |
1826 | } |
1827 | |
1828 | /* Push insns FROM to TO (excluding it) going in reverse order. */ |
1829 | static void |
1830 | push_insns (rtx_insn *from, rtx_insn *to) |
1831 | { |
1832 | rtx_insn *insn; |
1833 | |
1834 | if (from == NULL_RTX(rtx) 0) |
1835 | return; |
1836 | for (insn = from; insn != to; insn = PREV_INSN (insn)) |
1837 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
1838 | lra_push_insn (insn); |
1839 | } |
1840 | |
1841 | /* Set up sp offset for insn in range [FROM, LAST]. The offset is |
1842 | taken from the next BB insn after LAST or zero if there in such |
1843 | insn. */ |
1844 | static void |
1845 | setup_sp_offset (rtx_insn *from, rtx_insn *last) |
1846 | { |
1847 | rtx_insn *before = next_nonnote_nondebug_insn_bb (last); |
1848 | poly_int64 offset = (before == NULL_RTX(rtx) 0 || ! INSN_P (before)(((((enum rtx_code) (before)->code) == INSN) || (((enum rtx_code ) (before)->code) == JUMP_INSN) || (((enum rtx_code) (before )->code) == CALL_INSN)) || (((enum rtx_code) (before)-> code) == DEBUG_INSN)) |
1849 | ? 0 : lra_get_insn_recog_data (before)->sp_offset); |
1850 | |
1851 | for (rtx_insn *insn = from; insn != NEXT_INSN (last); insn = NEXT_INSN (insn)) |
1852 | lra_get_insn_recog_data (insn)->sp_offset = offset; |
1853 | } |
1854 | |
1855 | /* Emit insns BEFORE before INSN and insns AFTER after INSN. Put the |
1856 | insns onto the stack. Print about emitting the insns with |
1857 | TITLE. */ |
1858 | void |
1859 | lra_process_new_insns (rtx_insn *insn, rtx_insn *before, rtx_insn *after, |
1860 | const char *title) |
1861 | { |
1862 | if (before == NULL_RTX(rtx) 0 && after == NULL_RTX(rtx) 0) |
1863 | return; |
1864 | if (lra_dump_file != NULLnullptr) |
1865 | { |
1866 | dump_insn_slim (lra_dump_file, insn); |
1867 | if (before != NULL_RTX(rtx) 0) |
1868 | { |
1869 | fprintf (lra_dump_file," %s before:\n", title); |
1870 | dump_rtl_slim (lra_dump_file, before, NULLnullptr, -1, 0); |
1871 | } |
1872 | } |
1873 | if (before != NULL_RTX(rtx) 0) |
1874 | { |
1875 | if (cfun(cfun + 0)->can_throw_non_call_exceptions) |
1876 | copy_reg_eh_region_note_forward (insn, before, NULLnullptr); |
1877 | emit_insn_before (before, insn); |
1878 | push_insns (PREV_INSN (insn), PREV_INSN (before)); |
1879 | setup_sp_offset (before, PREV_INSN (insn)); |
1880 | } |
1881 | if (after != NULL_RTX(rtx) 0) |
1882 | { |
1883 | if (cfun(cfun + 0)->can_throw_non_call_exceptions) |
1884 | copy_reg_eh_region_note_forward (insn, after, NULLnullptr); |
1885 | if (! JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN)) |
1886 | { |
1887 | rtx_insn *last; |
1888 | |
1889 | if (lra_dump_file != NULLnullptr) |
1890 | { |
1891 | fprintf (lra_dump_file, " %s after:\n", title); |
1892 | dump_rtl_slim (lra_dump_file, after, NULLnullptr, -1, 0); |
1893 | } |
1894 | for (last = after; |
1895 | NEXT_INSN (last) != NULL_RTX(rtx) 0; |
1896 | last = NEXT_INSN (last)) |
1897 | ; |
1898 | emit_insn_after (after, insn); |
1899 | push_insns (last, insn); |
1900 | setup_sp_offset (after, last); |
1901 | } |
1902 | else |
1903 | { |
1904 | /* Put output reload insns on successor BBs: */ |
1905 | edge_iterator ei; |
1906 | edge e; |
1907 | |
1908 | FOR_EACH_EDGE (e, ei, BLOCK_FOR_INSN (insn)->succs)for ((ei) = ei_start_1 (&((BLOCK_FOR_INSN (insn)->succs ))); ei_cond ((ei), &(e)); ei_next (&(ei))) |
1909 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
1910 | { |
1911 | /* We already made the edge no-critical in ira.cc::ira */ |
1912 | lra_assert (!EDGE_CRITICAL_P (e))((void)(!(!(vec_safe_length ((e)->src->succs) >= 2 && vec_safe_length ((e)->dest->preds) >= 2)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 1912, __FUNCTION__), 0 : 0)); |
1913 | rtx_insn *curr, *tmp = BB_HEAD (e->dest)(e->dest)->il.x.head_; |
1914 | if (LABEL_P (tmp)(((enum rtx_code) (tmp)->code) == CODE_LABEL)) |
1915 | tmp = NEXT_INSN (tmp); |
1916 | if (NOTE_INSN_BASIC_BLOCK_P (tmp)((((enum rtx_code) (tmp)->code) == NOTE) && (((tmp )->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) |
1917 | tmp = NEXT_INSN (tmp); |
1918 | /* Do not put reload insns if it is the last BB |
1919 | without actual insns. */ |
1920 | if (tmp == NULLnullptr) |
1921 | continue; |
1922 | start_sequence (); |
1923 | for (curr = after; curr != NULL_RTX(rtx) 0; curr = NEXT_INSN (curr)) |
1924 | emit_insn (copy_insn (PATTERN (curr))); |
1925 | rtx_insn *copy = get_insns (), *last = get_last_insn (); |
1926 | end_sequence (); |
1927 | if (lra_dump_file != NULLnullptr) |
1928 | { |
1929 | fprintf (lra_dump_file, " %s after in bb%d:\n", title, |
1930 | e->dest->index); |
1931 | dump_rtl_slim (lra_dump_file, copy, NULLnullptr, -1, 0); |
1932 | } |
1933 | /* Use the right emit func for setting up BB_END/BB_HEAD: */ |
1934 | if (BB_END (e->dest)(e->dest)->il.x.rtl->end_ == PREV_INSN (tmp)) |
1935 | emit_insn_after_noloc (copy, PREV_INSN (tmp), e->dest); |
1936 | else |
1937 | emit_insn_before_noloc (copy, tmp, e->dest); |
1938 | push_insns (last, PREV_INSN (copy)); |
1939 | setup_sp_offset (copy, last); |
1940 | /* We can ignore BB live info here as it and reg notes |
1941 | will be updated before the next assignment |
1942 | sub-pass. */ |
1943 | } |
1944 | } |
1945 | } |
1946 | if (lra_dump_file != NULLnullptr) |
1947 | fprintf (lra_dump_file, "\n"); |
1948 | if (cfun(cfun + 0)->can_throw_non_call_exceptions) |
1949 | { |
1950 | rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX(rtx) 0); |
1951 | if (note && !insn_could_throw_p (insn)) |
1952 | remove_note (insn, note); |
1953 | } |
1954 | } |
1955 | |
1956 | |
1957 | /* Replace all references to register OLD_REGNO in *LOC with pseudo |
1958 | register NEW_REG. Try to simplify subreg of constant if SUBREG_P. |
1959 | DEBUG_P is if LOC is within a DEBUG_INSN. Return true if any |
1960 | change was made. */ |
1961 | bool |
1962 | lra_substitute_pseudo (rtx *loc, int old_regno, rtx new_reg, bool subreg_p, |
1963 | bool debug_p) |
1964 | { |
1965 | rtx x = *loc; |
1966 | bool result = false; |
1967 | enum rtx_code code; |
1968 | const char *fmt; |
1969 | int i, j; |
1970 | |
1971 | if (x == NULL_RTX(rtx) 0) |
1972 | return false; |
1973 | |
1974 | code = GET_CODE (x)((enum rtx_code) (x)->code); |
1975 | if (code == SUBREG && subreg_p) |
1976 | { |
1977 | rtx subst, inner = SUBREG_REG (x)(((x)->u.fld[0]).rt_rtx); |
1978 | /* Transform subreg of constant while we still have inner mode |
1979 | of the subreg. The subreg internal should not be an insn |
1980 | operand. */ |
1981 | if (REG_P (inner)(((enum rtx_code) (inner)->code) == REG) && (int) REGNO (inner)(rhs_regno(inner)) == old_regno |
1982 | && CONSTANT_P (new_reg)((rtx_class[(int) (((enum rtx_code) (new_reg)->code))]) == RTX_CONST_OBJ) |
1983 | && (subst = simplify_subreg (GET_MODE (x)((machine_mode) (x)->mode), new_reg, GET_MODE (inner)((machine_mode) (inner)->mode), |
1984 | SUBREG_BYTE (x)(((x)->u.fld[1]).rt_subreg))) != NULL_RTX(rtx) 0) |
1985 | { |
1986 | *loc = subst; |
1987 | return true; |
1988 | } |
1989 | |
1990 | } |
1991 | else if (code == REG && (int) REGNO (x)(rhs_regno(x)) == old_regno) |
1992 | { |
1993 | machine_mode mode = GET_MODE (x)((machine_mode) (x)->mode); |
1994 | machine_mode inner_mode = GET_MODE (new_reg)((machine_mode) (new_reg)->mode); |
1995 | |
1996 | if (mode != inner_mode |
1997 | && ! (CONST_SCALAR_INT_P (new_reg)((((enum rtx_code) (new_reg)->code) == CONST_INT) || (((enum rtx_code) (new_reg)->code) == CONST_WIDE_INT)) && SCALAR_INT_MODE_P (mode)(((enum mode_class) mode_class[mode]) == MODE_INT || ((enum mode_class ) mode_class[mode]) == MODE_PARTIAL_INT))) |
1998 | { |
1999 | poly_uint64 offset = 0; |
2000 | if (partial_subreg_p (mode, inner_mode) |
2001 | && SCALAR_INT_MODE_P (inner_mode)(((enum mode_class) mode_class[inner_mode]) == MODE_INT || (( enum mode_class) mode_class[inner_mode]) == MODE_PARTIAL_INT)) |
2002 | offset = subreg_lowpart_offset (mode, inner_mode); |
2003 | if (debug_p) |
2004 | new_reg = gen_rtx_raw_SUBREG (mode, new_reg, offset)gen_rtx_fmt_ep_stat ((SUBREG), ((mode)), ((new_reg)), ((offset )) ); |
2005 | else |
2006 | new_reg = gen_rtx_SUBREG (mode, new_reg, offset); |
2007 | } |
2008 | *loc = new_reg; |
2009 | return true; |
2010 | } |
2011 | |
2012 | /* Scan all the operand sub-expressions. */ |
2013 | fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
2014 | for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--) |
2015 | { |
2016 | if (fmt[i] == 'e') |
2017 | { |
2018 | if (debug_p |
2019 | && i == 0 |
2020 | && (code == SUBREG |
2021 | || code == ZERO_EXTEND |
2022 | || code == SIGN_EXTEND |
2023 | || code == FLOAT |
2024 | || code == UNSIGNED_FLOAT)) |
2025 | { |
2026 | rtx y = XEXP (x, 0)(((x)->u.fld[0]).rt_rtx); |
2027 | if (lra_substitute_pseudo (&y, old_regno, |
2028 | new_reg, subreg_p, debug_p)) |
2029 | { |
2030 | result = true; |
2031 | if (CONST_SCALAR_INT_P (y)((((enum rtx_code) (y)->code) == CONST_INT) || (((enum rtx_code ) (y)->code) == CONST_WIDE_INT))) |
2032 | { |
2033 | if (code == SUBREG) |
2034 | y = simplify_subreg (GET_MODE (x)((machine_mode) (x)->mode), y, |
2035 | GET_MODE (SUBREG_REG (x))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode), |
2036 | SUBREG_BYTE (x)(((x)->u.fld[1]).rt_subreg)); |
2037 | else |
2038 | y = simplify_unary_operation (code, GET_MODE (x)((machine_mode) (x)->mode), y, |
2039 | GET_MODE (XEXP (x, 0))((machine_mode) ((((x)->u.fld[0]).rt_rtx))->mode)); |
2040 | if (y) |
2041 | *loc = y; |
2042 | else |
2043 | *loc = gen_rtx_CLOBBER (GET_MODE (x), const0_rtx)gen_rtx_fmt_e_stat ((CLOBBER), ((((machine_mode) (x)->mode ))), (((const_int_rtx[64]))) ); |
2044 | } |
2045 | else |
2046 | XEXP (x, 0)(((x)->u.fld[0]).rt_rtx) = y; |
2047 | } |
2048 | } |
2049 | else if (lra_substitute_pseudo (&XEXP (x, i)(((x)->u.fld[i]).rt_rtx), old_regno, |
2050 | new_reg, subreg_p, debug_p)) |
2051 | result = true; |
2052 | } |
2053 | else if (fmt[i] == 'E') |
2054 | { |
2055 | for (j = XVECLEN (x, i)(((((x)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--) |
2056 | if (lra_substitute_pseudo (&XVECEXP (x, i, j)(((((x)->u.fld[i]).rt_rtvec))->elem[j]), old_regno, |
2057 | new_reg, subreg_p, debug_p)) |
2058 | result = true; |
2059 | } |
2060 | } |
2061 | return result; |
2062 | } |
2063 | |
2064 | /* Call lra_substitute_pseudo within an insn. Try to simplify subreg |
2065 | of constant if SUBREG_P. This won't update the insn ptr, just the |
2066 | contents of the insn. */ |
2067 | bool |
2068 | lra_substitute_pseudo_within_insn (rtx_insn *insn, int old_regno, |
2069 | rtx new_reg, bool subreg_p) |
2070 | { |
2071 | rtx loc = insn; |
2072 | return lra_substitute_pseudo (&loc, old_regno, new_reg, subreg_p, |
2073 | DEBUG_INSN_P (insn)(((enum rtx_code) (insn)->code) == DEBUG_INSN)); |
2074 | } |
2075 | |
2076 | |
2077 | |
2078 | /* Return new register of the same mode as ORIGINAL of class ALL_REGS. |
2079 | Used in ira_remove_scratches. */ |
2080 | static rtx |
2081 | get_scratch_reg (rtx original) |
2082 | { |
2083 | return lra_create_new_reg (GET_MODE (original)((machine_mode) (original)->mode), original, ALL_REGS, |
2084 | NULLnullptr, NULLnullptr); |
2085 | } |
2086 | |
2087 | /* Remove all insn scratches in INSN. */ |
2088 | static void |
2089 | remove_insn_scratches (rtx_insn *insn) |
2090 | { |
2091 | if (ira_remove_insn_scratches (insn, true, lra_dump_file, get_scratch_reg)) |
2092 | df_insn_rescan (insn); |
2093 | } |
2094 | |
2095 | /* Remove all insn scratches in the current function. */ |
2096 | static void |
2097 | remove_scratches (void) |
2098 | { |
2099 | basic_block bb; |
2100 | rtx_insn *insn; |
2101 | |
2102 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
2103 | FOR_BB_INSNS (bb, insn)for ((insn) = (bb)->il.x.head_; (insn) && (insn) != NEXT_INSN ((bb)->il.x.rtl->end_); (insn) = NEXT_INSN ( insn)) |
2104 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
2105 | remove_insn_scratches (insn); |
2106 | } |
2107 | |
2108 | /* Function checks RTL for correctness. If FINAL_P is true, it is |
2109 | done at the end of LRA and the check is more rigorous. */ |
2110 | static void |
2111 | check_rtl (bool final_p) |
2112 | { |
2113 | basic_block bb; |
2114 | rtx_insn *insn; |
2115 | |
2116 | lra_assert (! final_p || reload_completed)((void)(!(! final_p || reload_completed) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 2116, __FUNCTION__), 0 : 0)); |
2117 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
2118 | FOR_BB_INSNS (bb, insn)for ((insn) = (bb)->il.x.head_; (insn) && (insn) != NEXT_INSN ((bb)->il.x.rtl->end_); (insn) = NEXT_INSN ( insn)) |
2119 | if (NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) |
2120 | && GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) != USE |
2121 | && GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) != CLOBBER |
2122 | && GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) != ASM_INPUT) |
2123 | { |
2124 | if (final_p) |
2125 | { |
2126 | extract_constrain_insn (insn); |
2127 | continue; |
2128 | } |
2129 | /* LRA code is based on assumption that all addresses can be |
2130 | correctly decomposed. LRA can generate reloads for |
2131 | decomposable addresses. The decomposition code checks the |
2132 | correctness of the addresses. So we don't need to check |
2133 | the addresses here. Don't call insn_invalid_p here, it can |
2134 | change the code at this stage. */ |
2135 | if (recog_memoized (insn) < 0 && asm_noperands (PATTERN (insn)) < 0) |
2136 | fatal_insn_not_found (insn)_fatal_insn_not_found (insn, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra.cc" , 2136, __FUNCTION__); |
2137 | } |
2138 | } |
2139 | |
2140 | /* Determine if the current function has an exception receiver block |
2141 | that reaches the exit block via non-exceptional edges */ |
2142 | static bool |
2143 | has_nonexceptional_receiver (void) |
2144 | { |
2145 | edge e; |
2146 | edge_iterator ei; |
2147 | basic_block *tos, *worklist, bb; |
2148 | |
2149 | /* If we're not optimizing, then just err on the safe side. */ |
2150 | if (!optimizeglobal_options.x_optimize) |
2151 | return true; |
2152 | |
2153 | /* First determine which blocks can reach exit via normal paths. */ |
2154 | tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun) + 1)((basic_block *) xmalloc (sizeof (basic_block) * ((((cfun + 0 ))->cfg->x_n_basic_blocks) + 1))); |
2155 | |
2156 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
2157 | bb->flags &= ~BB_REACHABLE; |
2158 | |
2159 | /* Place the exit block on our worklist. */ |
2160 | EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)->flags |= BB_REACHABLE; |
2161 | *tos++ = EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr); |
2162 | |
2163 | /* Iterate: find everything reachable from what we've already seen. */ |
2164 | while (tos != worklist) |
2165 | { |
2166 | bb = *--tos; |
2167 | |
2168 | FOR_EACH_EDGE (e, ei, bb->preds)for ((ei) = ei_start_1 (&((bb->preds))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
2169 | if (e->flags & EDGE_ABNORMAL) |
2170 | { |
2171 | free (worklist); |
2172 | return true; |
2173 | } |
2174 | else |
2175 | { |
2176 | basic_block src = e->src; |
2177 | |
2178 | if (!(src->flags & BB_REACHABLE)) |
2179 | { |
2180 | src->flags |= BB_REACHABLE; |
2181 | *tos++ = src; |
2182 | } |
2183 | } |
2184 | } |
2185 | free (worklist); |
2186 | /* No exceptional block reached exit unexceptionally. */ |
2187 | return false; |
2188 | } |
2189 | |
2190 | /* Remove all REG_DEAD and REG_UNUSED notes and regenerate REG_INC. |
2191 | We change pseudos by hard registers without notification of DF and |
2192 | that can make the notes obsolete. DF-infrastructure does not deal |
2193 | with REG_INC notes -- so we should regenerate them here. */ |
2194 | static void |
2195 | update_inc_notes (void) |
2196 | { |
2197 | rtx *pnote; |
2198 | basic_block bb; |
2199 | rtx_insn *insn; |
2200 | |
2201 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
2202 | FOR_BB_INSNS (bb, insn)for ((insn) = (bb)->il.x.head_; (insn) && (insn) != NEXT_INSN ((bb)->il.x.rtl->end_); (insn) = NEXT_INSN ( insn)) |
2203 | if (NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN))) |
2204 | { |
2205 | pnote = ®_NOTES (insn)(((insn)->u.fld[6]).rt_rtx); |
2206 | while (*pnote != 0) |
2207 | { |
2208 | if (REG_NOTE_KIND (*pnote)((enum reg_note) ((machine_mode) (*pnote)->mode)) == REG_DEAD |
2209 | || REG_NOTE_KIND (*pnote)((enum reg_note) ((machine_mode) (*pnote)->mode)) == REG_UNUSED |
2210 | || REG_NOTE_KIND (*pnote)((enum reg_note) ((machine_mode) (*pnote)->mode)) == REG_INC) |
2211 | *pnote = XEXP (*pnote, 1)(((*pnote)->u.fld[1]).rt_rtx); |
2212 | else |
2213 | pnote = &XEXP (*pnote, 1)(((*pnote)->u.fld[1]).rt_rtx); |
2214 | } |
2215 | |
2216 | if (AUTO_INC_DEC0) |
2217 | add_auto_inc_notes (insn, PATTERN (insn)); |
2218 | } |
2219 | } |
2220 | |
2221 | /* Set to 1 while in lra. */ |
2222 | int lra_in_progress; |
2223 | |
2224 | /* Start of pseudo regnos before the LRA. */ |
2225 | int lra_new_regno_start; |
2226 | |
2227 | /* Start of reload pseudo regnos before the new spill pass. */ |
2228 | int lra_constraint_new_regno_start; |
2229 | |
2230 | /* Avoid spilling pseudos with regno more than the following value if |
2231 | it is possible. */ |
2232 | int lra_bad_spill_regno_start; |
2233 | |
2234 | /* A pseudo of Pmode. */ |
2235 | rtx lra_pmode_pseudo; |
2236 | |
2237 | /* Inheritance pseudo regnos before the new spill pass. */ |
2238 | bitmap_head lra_inheritance_pseudos; |
2239 | |
2240 | /* Split regnos before the new spill pass. */ |
2241 | bitmap_head lra_split_regs; |
2242 | |
2243 | /* Reload pseudo regnos before the new assignment pass which still can |
2244 | be spilled after the assignment pass as memory is also accepted in |
2245 | insns for the reload pseudos. */ |
2246 | bitmap_head lra_optional_reload_pseudos; |
2247 | |
2248 | /* Pseudo regnos used for subreg reloads before the new assignment |
2249 | pass. Such pseudos still can be spilled after the assignment |
2250 | pass. */ |
2251 | bitmap_head lra_subreg_reload_pseudos; |
2252 | |
2253 | /* File used for output of LRA debug information. */ |
2254 | FILE *lra_dump_file; |
2255 | |
2256 | /* True if we split hard reg after the last constraint sub-pass. */ |
2257 | bool lra_hard_reg_split_p; |
2258 | |
2259 | /* True if we found an asm error. */ |
2260 | bool lra_asm_error_p; |
2261 | |
2262 | /* True if we should try spill into registers of different classes |
2263 | instead of memory. */ |
2264 | bool lra_reg_spill_p; |
2265 | |
2266 | /* Set up value LRA_REG_SPILL_P. */ |
2267 | static void |
2268 | setup_reg_spill_flag (void) |
2269 | { |
2270 | int cl, mode; |
2271 | |
2272 | if (targetm.spill_class != NULLnullptr) |
2273 | for (cl = 0; cl < (int) LIM_REG_CLASSES; cl++) |
2274 | for (mode = 0; mode < MAX_MACHINE_MODE; mode++) |
2275 | if (targetm.spill_class ((enum reg_class) cl, |
2276 | (machine_mode) mode) != NO_REGS) |
2277 | { |
2278 | lra_reg_spill_p = true; |
2279 | return; |
2280 | } |
2281 | lra_reg_spill_p = false; |
2282 | } |
2283 | |
2284 | /* True if the current function is too big to use regular algorithms |
2285 | in LRA. In other words, we should use simpler and faster algorithms |
2286 | in LRA. It also means we should not worry about generation code |
2287 | for caller saves. The value is set up in IRA. */ |
2288 | bool lra_simple_p; |
2289 | |
2290 | /* Major LRA entry function. F is a file should be used to dump LRA |
2291 | debug info. */ |
2292 | void |
2293 | lra (FILE *f) |
2294 | { |
2295 | int i; |
2296 | bool live_p, inserted_p; |
2297 | |
2298 | lra_dump_file = f; |
2299 | lra_asm_error_p = false; |
2300 | lra_pmode_pseudo = gen_reg_rtx (Pmode(global_options.x_ix86_pmode == PMODE_DI ? (scalar_int_mode ( (scalar_int_mode::from_int) E_DImode)) : (scalar_int_mode ((scalar_int_mode ::from_int) E_SImode)))); |
2301 | |
2302 | timevar_push (TV_LRA); |
2303 | |
2304 | /* Make sure that the last insn is a note. Some subsequent passes |
2305 | need it. */ |
2306 | emit_note (NOTE_INSN_DELETED); |
2307 | |
2308 | lra_no_alloc_regs = ira_no_alloc_regs(this_target_ira->x_ira_no_alloc_regs); |
2309 | |
2310 | init_reg_info (); |
2311 | expand_reg_info (); |
2312 | |
2313 | init_insn_recog_data (); |
2314 | |
2315 | /* Some quick check on RTL generated by previous passes. */ |
2316 | if (flag_checkingglobal_options.x_flag_checking) |
2317 | check_rtl (false); |
2318 | |
2319 | lra_in_progress = 1; |
2320 | |
2321 | lra_live_range_iter = lra_coalesce_iter = lra_constraint_iter = 0; |
2322 | lra_assignment_iter = lra_assignment_iter_after_spill = 0; |
2323 | lra_inheritance_iter = lra_undo_inheritance_iter = 0; |
2324 | lra_rematerialization_iter = 0; |
2325 | |
2326 | setup_reg_spill_flag (); |
2327 | |
2328 | /* Function remove_scratches can creates new pseudos for clobbers -- |
2329 | so set up lra_constraint_new_regno_start before its call to |
2330 | permit changing reg classes for pseudos created by this |
2331 | simplification. */ |
2332 | lra_constraint_new_regno_start = lra_new_regno_start = max_reg_num (); |
2333 | lra_bad_spill_regno_start = INT_MAX2147483647; |
2334 | remove_scratches (); |
2335 | |
2336 | /* A function that has a non-local label that can reach the exit |
2337 | block via non-exceptional paths must save all call-saved |
2338 | registers. */ |
2339 | if (cfun(cfun + 0)->has_nonlocal_label && has_nonexceptional_receiver ()) |
2340 | crtl(&x_rtl)->saves_all_registers = 1; |
2341 | |
2342 | if (crtl(&x_rtl)->saves_all_registers) |
2343 | for (i = 0; i < FIRST_PSEUDO_REGISTER76; i++) |
2344 | if (!crtl(&x_rtl)->abi->clobbers_full_reg_p (i) |
2345 | && !fixed_regs(this_target_hard_regs->x_fixed_regs)[i] |
2346 | && !LOCAL_REGNO (i)0) |
2347 | df_set_regs_ever_live (i, true); |
2348 | |
2349 | /* We don't DF from now and avoid its using because it is to |
2350 | expensive when a lot of RTL changes are made. */ |
2351 | df_set_flags (DF_NO_INSN_RESCAN); |
2352 | lra_constraint_insn_stack.create (get_max_uid ()); |
2353 | lra_constraint_insn_stack_bitmap = sbitmap_alloc (get_max_uid ()); |
2354 | bitmap_clear (lra_constraint_insn_stack_bitmap); |
2355 | lra_live_ranges_init (); |
2356 | lra_constraints_init (); |
2357 | lra_curr_reload_num = 0; |
2358 | push_insns (get_last_insn (), NULLnullptr); |
2359 | /* It is needed for the 1st coalescing. */ |
2360 | bitmap_initialize (&lra_inheritance_pseudos, ®_obstack); |
2361 | bitmap_initialize (&lra_split_regs, ®_obstack); |
2362 | bitmap_initialize (&lra_optional_reload_pseudos, ®_obstack); |
2363 | bitmap_initialize (&lra_subreg_reload_pseudos, ®_obstack); |
2364 | live_p = false; |
2365 | if (maybe_ne (get_frame_size (), 0) && crtl(&x_rtl)->stack_alignment_needed) |
2366 | /* If we have a stack frame, we must align it now. The stack size |
2367 | may be a part of the offset computation for register |
2368 | elimination. */ |
2369 | assign_stack_local (BLKmode((void) 0, E_BLKmode), 0, crtl(&x_rtl)->stack_alignment_needed); |
2370 | lra_init_equiv (); |
2371 | for (;;) |
2372 | { |
2373 | for (;;) |
2374 | { |
2375 | bool reloads_p = lra_constraints (lra_constraint_iter == 0); |
2376 | /* Constraint transformations may result in that eliminable |
2377 | hard regs become uneliminable and pseudos which use them |
2378 | should be spilled. It is better to do it before pseudo |
2379 | assignments. |
2380 | |
2381 | For example, rs6000 can make |
2382 | RS6000_PIC_OFFSET_TABLE_REGNUM uneliminable if we started |
2383 | to use a constant pool. */ |
2384 | lra_eliminate (false, false); |
2385 | /* We should try to assign hard registers to scratches even |
2386 | if there were no RTL transformations in lra_constraints. |
2387 | Also we should check IRA assignments on the first |
2388 | iteration as they can be wrong because of early clobbers |
2389 | operands which are ignored in IRA. */ |
2390 | if (! reloads_p && lra_constraint_iter > 1) |
2391 | { |
2392 | /* Stack is not empty here only when there are changes |
2393 | during the elimination sub-pass. */ |
2394 | if (bitmap_empty_p (lra_constraint_insn_stack_bitmap)) |
2395 | break; |
2396 | else |
2397 | /* If there are no reloads but changing due |
2398 | elimination, restart the constraint sub-pass |
2399 | first. */ |
2400 | continue; |
2401 | } |
2402 | /* Do inheritance only for regular algorithms. */ |
2403 | if (! lra_simple_p) |
2404 | lra_inheritance (); |
2405 | if (live_p) |
2406 | lra_clear_live_ranges (); |
2407 | bool fails_p; |
2408 | lra_hard_reg_split_p = false; |
2409 | do |
2410 | { |
2411 | /* We need live ranges for lra_assign -- so build them. |
2412 | But don't remove dead insns or change global live |
2413 | info as we can undo inheritance transformations after |
2414 | inheritance pseudo assigning. */ |
2415 | lra_create_live_ranges (true, !lra_simple_p); |
2416 | live_p = true; |
2417 | /* If we don't spill non-reload and non-inheritance |
2418 | pseudos, there is no sense to run memory-memory move |
2419 | coalescing. If inheritance pseudos were spilled, the |
2420 | memory-memory moves involving them will be removed by |
2421 | pass undoing inheritance. */ |
2422 | if (lra_simple_p) |
2423 | lra_assign (fails_p); |
2424 | else |
2425 | { |
2426 | bool spill_p = !lra_assign (fails_p); |
2427 | |
2428 | if (lra_undo_inheritance ()) |
2429 | live_p = false; |
2430 | if (spill_p && ! fails_p) |
2431 | { |
2432 | if (! live_p) |
2433 | { |
2434 | lra_create_live_ranges (true, true); |
2435 | live_p = true; |
2436 | } |
2437 | if (lra_coalesce ()) |
2438 | live_p = false; |
2439 | } |
2440 | if (! live_p) |
2441 | lra_clear_live_ranges (); |
2442 | } |
2443 | if (fails_p) |
2444 | { |
2445 | /* It is a very rare case. It is the last hope to |
2446 | split a hard regno live range for a reload |
2447 | pseudo. */ |
2448 | if (live_p) |
2449 | lra_clear_live_ranges (); |
2450 | live_p = false; |
2451 | if (! lra_split_hard_reg_for ()) |
2452 | break; |
2453 | lra_hard_reg_split_p = true; |
2454 | } |
2455 | } |
2456 | while (fails_p && !lra_asm_error_p); |
2457 | if (! live_p) { |
2458 | /* We need the correct reg notes for work of constraint sub-pass. */ |
2459 | lra_create_live_ranges (true, true); |
2460 | live_p = true; |
2461 | } |
2462 | } |
2463 | /* Don't clear optional reloads bitmap until all constraints are |
2464 | satisfied as we need to differ them from regular reloads. */ |
2465 | bitmap_clear (&lra_optional_reload_pseudos); |
2466 | bitmap_clear (&lra_subreg_reload_pseudos); |
2467 | bitmap_clear (&lra_inheritance_pseudos); |
2468 | bitmap_clear (&lra_split_regs); |
2469 | if (! live_p) |
2470 | { |
2471 | /* We need full live info for spilling pseudos into |
2472 | registers instead of memory. */ |
2473 | lra_create_live_ranges (lra_reg_spill_p, true); |
2474 | live_p = true; |
2475 | } |
2476 | /* We should check necessity for spilling here as the above live |
2477 | range pass can remove spilled pseudos. */ |
2478 | if (! lra_need_for_spills_p ()) |
2479 | break; |
2480 | /* Now we know what pseudos should be spilled. Try to |
2481 | rematerialize them first. */ |
2482 | if (lra_remat ()) |
2483 | { |
2484 | /* We need full live info -- see the comment above. */ |
2485 | lra_create_live_ranges (lra_reg_spill_p, true); |
2486 | live_p = true; |
2487 | if (! lra_need_for_spills_p ()) |
2488 | { |
2489 | if (lra_need_for_scratch_reg_p ()) |
2490 | continue; |
2491 | break; |
2492 | } |
2493 | } |
2494 | lra_spill (); |
2495 | /* Assignment of stack slots changes elimination offsets for |
2496 | some eliminations. So update the offsets here. */ |
2497 | lra_eliminate (false, false); |
2498 | lra_constraint_new_regno_start = max_reg_num (); |
2499 | if (lra_bad_spill_regno_start == INT_MAX2147483647 |
2500 | && lra_inheritance_iter > LRA_MAX_INHERITANCE_PASSES2 |
2501 | && lra_rematerialization_iter > LRA_MAX_REMATERIALIZATION_PASSES2) |
2502 | /* After switching off inheritance and rematerialization |
2503 | passes, avoid spilling reload pseudos will be created to |
2504 | prevent LRA cycling in some complicated cases. */ |
2505 | lra_bad_spill_regno_start = lra_constraint_new_regno_start; |
2506 | lra_assignment_iter_after_spill = 0; |
2507 | } |
2508 | ira_restore_scratches (lra_dump_file); |
2509 | lra_eliminate (true, false); |
2510 | lra_final_code_change (); |
2511 | lra_in_progress = 0; |
2512 | if (live_p) |
2513 | lra_clear_live_ranges (); |
2514 | lra_live_ranges_finish (); |
2515 | lra_constraints_finish (); |
2516 | finish_reg_info (); |
2517 | sbitmap_free (lra_constraint_insn_stack_bitmap); |
2518 | lra_constraint_insn_stack.release (); |
2519 | finish_insn_recog_data (); |
2520 | regstat_free_n_sets_and_refs (); |
2521 | regstat_free_ri (); |
2522 | reload_completed = 1; |
2523 | update_inc_notes (); |
2524 | |
2525 | inserted_p = fixup_abnormal_edges (); |
2526 | |
2527 | /* We've possibly turned single trapping insn into multiple ones. */ |
2528 | if (cfun(cfun + 0)->can_throw_non_call_exceptions) |
2529 | { |
2530 | auto_sbitmap blocks (last_basic_block_for_fn (cfun)(((cfun + 0))->cfg->x_last_basic_block)); |
2531 | bitmap_ones (blocks); |
2532 | find_many_sub_basic_blocks (blocks); |
2533 | } |
2534 | |
2535 | if (inserted_p) |
2536 | commit_edge_insertions (); |
2537 | |
2538 | /* Replacing pseudos with their memory equivalents might have |
2539 | created shared rtx. Subsequent passes would get confused |
2540 | by this, so unshare everything here. */ |
2541 | unshare_all_rtl_again (get_insns ()); |
2542 | |
2543 | if (flag_checkingglobal_options.x_flag_checking) |
2544 | check_rtl (true); |
2545 | |
2546 | timevar_pop (TV_LRA); |
2547 | } |
2548 | |
2549 | /* Called once per compiler to initialize LRA data once. */ |
2550 | void |
2551 | lra_init_once (void) |
2552 | { |
2553 | init_insn_code_data_once (); |
2554 | } |
2555 | |
2556 | /* Called once per compiler to finish LRA data which are initialize |
2557 | once. */ |
2558 | void |
2559 | lra_finish_once (void) |
2560 | { |
2561 | finish_insn_code_data_once (); |
2562 | } |