File: | build/gcc/ira-lives.cc |
Warning: | line 956, column 4 Value stored to 'cl' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* IRA processing allocno lives to build allocno live ranges. |
2 | Copyright (C) 2006-2023 Free Software Foundation, Inc. |
3 | Contributed by Vladimir Makarov <vmakarov@redhat.com>. |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free |
9 | Software Foundation; either version 3, or (at your option) any later |
10 | version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
15 | for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | #include "config.h" |
22 | #include "system.h" |
23 | #include "coretypes.h" |
24 | #include "backend.h" |
25 | #include "target.h" |
26 | #include "rtl.h" |
27 | #include "predict.h" |
28 | #include "df.h" |
29 | #include "memmodel.h" |
30 | #include "tm_p.h" |
31 | #include "insn-config.h" |
32 | #include "regs.h" |
33 | #include "ira.h" |
34 | #include "ira-int.h" |
35 | #include "sparseset.h" |
36 | #include "function-abi.h" |
37 | |
38 | /* The code in this file is similar to one in global but the code |
39 | works on the allocno basis and creates live ranges instead of |
40 | pseudo-register conflicts. */ |
41 | |
42 | /* Program points are enumerated by numbers from range |
43 | 0..IRA_MAX_POINT-1. There are approximately two times more program |
44 | points than insns. Program points are places in the program where |
45 | liveness info can be changed. In most general case (there are more |
46 | complicated cases too) some program points correspond to places |
47 | where input operand dies and other ones correspond to places where |
48 | output operands are born. */ |
49 | int ira_max_point; |
50 | |
51 | /* Arrays of size IRA_MAX_POINT mapping a program point to the allocno |
52 | live ranges with given start/finish point. */ |
53 | live_range_t *ira_start_point_ranges, *ira_finish_point_ranges; |
54 | |
55 | /* Number of the current program point. */ |
56 | static int curr_point; |
57 | |
58 | /* Point where register pressure excess started or -1 if there is no |
59 | register pressure excess. Excess pressure for a register class at |
60 | some point means that there are more allocnos of given register |
61 | class living at the point than number of hard-registers of the |
62 | class available for the allocation. It is defined only for |
63 | pressure classes. */ |
64 | static int high_pressure_start_point[N_REG_CLASSES((int) LIM_REG_CLASSES)]; |
65 | |
66 | /* Objects live at current point in the scan. */ |
67 | static sparseset objects_live; |
68 | |
69 | /* A temporary bitmap used in functions that wish to avoid visiting an allocno |
70 | multiple times. */ |
71 | static sparseset allocnos_processed; |
72 | |
73 | /* Set of hard regs (except eliminable ones) currently live. */ |
74 | static HARD_REG_SET hard_regs_live; |
75 | |
76 | /* The loop tree node corresponding to the current basic block. */ |
77 | static ira_loop_tree_node_t curr_bb_node; |
78 | |
79 | /* The number of the last processed call. */ |
80 | static int last_call_num; |
81 | /* The number of last call at which given allocno was saved. */ |
82 | static int *allocno_saved_at_call; |
83 | |
84 | /* The value returned by ira_setup_alts for the current instruction; |
85 | i.e. the set of alternatives that we should consider to be likely |
86 | candidates during reloading. */ |
87 | static alternative_mask preferred_alternatives; |
88 | |
89 | /* If non-NULL, the source operand of a register to register copy for which |
90 | we should not add a conflict with the copy's destination operand. */ |
91 | static rtx ignore_reg_for_conflicts; |
92 | |
93 | /* Record hard register REGNO as now being live. */ |
94 | static void |
95 | make_hard_regno_live (int regno) |
96 | { |
97 | SET_HARD_REG_BIT (hard_regs_live, regno); |
98 | } |
99 | |
100 | /* Process the definition of hard register REGNO. This updates |
101 | hard_regs_live and hard reg conflict information for living allocnos. */ |
102 | static void |
103 | make_hard_regno_dead (int regno) |
104 | { |
105 | unsigned int i; |
106 | EXECUTE_IF_SET_IN_SPARSESET (objects_live, i)for (sparseset_iter_init (objects_live); sparseset_iter_p (objects_live ) && (((i) = sparseset_iter_elm (objects_live)) || 1) ; sparseset_iter_next (objects_live)) |
107 | { |
108 | ira_object_t obj = ira_object_id_map[i]; |
109 | |
110 | if (ignore_reg_for_conflicts != NULL_RTX(rtx) 0 |
111 | && REGNO (ignore_reg_for_conflicts)(rhs_regno(ignore_reg_for_conflicts)) |
112 | == (unsigned int) ALLOCNO_REGNO (OBJECT_ALLOCNO (obj))((((obj)->allocno))->regno)) |
113 | continue; |
114 | |
115 | SET_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj)((obj)->conflict_hard_regs), regno); |
116 | SET_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)((obj)->total_conflict_hard_regs), regno); |
117 | } |
118 | CLEAR_HARD_REG_BIT (hard_regs_live, regno); |
119 | } |
120 | |
121 | /* Record object OBJ as now being live. Set a bit for it in objects_live, |
122 | and start a new live range for it if necessary. */ |
123 | static void |
124 | make_object_live (ira_object_t obj) |
125 | { |
126 | sparseset_set_bit (objects_live, OBJECT_CONFLICT_ID (obj)((obj)->id)); |
127 | |
128 | live_range_t lr = OBJECT_LIVE_RANGES (obj)((obj)->live_ranges); |
129 | if (lr == NULLnullptr |
130 | || (lr->finish != curr_point && lr->finish + 1 != curr_point)) |
131 | ira_add_live_range_to_object (obj, curr_point, -1); |
132 | } |
133 | |
134 | /* Update ALLOCNO_EXCESS_PRESSURE_POINTS_NUM for the allocno |
135 | associated with object OBJ. */ |
136 | static void |
137 | update_allocno_pressure_excess_length (ira_object_t obj) |
138 | { |
139 | ira_allocno_t a = OBJECT_ALLOCNO (obj)((obj)->allocno); |
140 | int start, i; |
141 | enum reg_class aclass, pclass, cl; |
142 | live_range_t p; |
143 | |
144 | aclass = ALLOCNO_CLASS (a)((a)->aclass); |
145 | pclass = ira_pressure_class_translate(this_target_ira->x_ira_pressure_class_translate)[aclass]; |
146 | for (i = 0; |
147 | (cl = ira_reg_class_super_classes(this_target_ira_int->x_ira_reg_class_super_classes)[pclass][i]) != LIM_REG_CLASSES; |
148 | i++) |
149 | { |
150 | if (! ira_reg_pressure_class_p(this_target_ira_int->x_ira_reg_pressure_class_p)[cl]) |
151 | continue; |
152 | if (high_pressure_start_point[cl] < 0) |
153 | continue; |
154 | p = OBJECT_LIVE_RANGES (obj)((obj)->live_ranges); |
155 | ira_assert (p != NULL)((void)(!(p != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 155, __FUNCTION__), 0 : 0)); |
156 | start = (high_pressure_start_point[cl] > p->start |
157 | ? high_pressure_start_point[cl] : p->start); |
158 | ALLOCNO_EXCESS_PRESSURE_POINTS_NUM (a)((a)->excess_pressure_points_num) += curr_point - start + 1; |
159 | } |
160 | } |
161 | |
162 | /* Process the definition of object OBJ, which is associated with allocno A. |
163 | This finishes the current live range for it. */ |
164 | static void |
165 | make_object_dead (ira_object_t obj) |
166 | { |
167 | live_range_t lr; |
168 | int regno; |
169 | int ignore_regno = -1; |
170 | int ignore_total_regno = -1; |
171 | int end_regno = -1; |
172 | |
173 | sparseset_clear_bit (objects_live, OBJECT_CONFLICT_ID (obj)((obj)->id)); |
174 | |
175 | /* Check whether any part of IGNORE_REG_FOR_CONFLICTS already conflicts |
176 | with OBJ. */ |
177 | if (ignore_reg_for_conflicts != NULL_RTX(rtx) 0 |
178 | && REGNO (ignore_reg_for_conflicts)(rhs_regno(ignore_reg_for_conflicts)) < FIRST_PSEUDO_REGISTER76) |
179 | { |
180 | end_regno = END_REGNO (ignore_reg_for_conflicts); |
181 | ignore_regno = ignore_total_regno = REGNO (ignore_reg_for_conflicts)(rhs_regno(ignore_reg_for_conflicts)); |
182 | |
183 | for (regno = ignore_regno; regno < end_regno; regno++) |
184 | { |
185 | if (TEST_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj)((obj)->conflict_hard_regs), regno)) |
186 | ignore_regno = end_regno; |
187 | if (TEST_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)((obj)->total_conflict_hard_regs), regno)) |
188 | ignore_total_regno = end_regno; |
189 | } |
190 | } |
191 | |
192 | OBJECT_CONFLICT_HARD_REGS (obj)((obj)->conflict_hard_regs) |= hard_regs_live; |
193 | OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)((obj)->total_conflict_hard_regs) |= hard_regs_live; |
194 | |
195 | /* If IGNORE_REG_FOR_CONFLICTS did not already conflict with OBJ, make |
196 | sure it still doesn't. */ |
197 | for (regno = ignore_regno; regno < end_regno; regno++) |
198 | CLEAR_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj)((obj)->conflict_hard_regs), regno); |
199 | for (regno = ignore_total_regno; regno < end_regno; regno++) |
200 | CLEAR_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)((obj)->total_conflict_hard_regs), regno); |
201 | |
202 | lr = OBJECT_LIVE_RANGES (obj)((obj)->live_ranges); |
203 | ira_assert (lr != NULL)((void)(!(lr != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 203, __FUNCTION__), 0 : 0)); |
204 | lr->finish = curr_point; |
205 | update_allocno_pressure_excess_length (obj); |
206 | } |
207 | |
208 | /* The current register pressures for each pressure class for the current |
209 | basic block. */ |
210 | static int curr_reg_pressure[N_REG_CLASSES((int) LIM_REG_CLASSES)]; |
211 | |
212 | /* Record that register pressure for PCLASS increased by N registers. |
213 | Update the current register pressure, maximal register pressure for |
214 | the current BB and the start point of the register pressure |
215 | excess. */ |
216 | static void |
217 | inc_register_pressure (enum reg_class pclass, int n) |
218 | { |
219 | int i; |
220 | enum reg_class cl; |
221 | |
222 | for (i = 0; |
223 | (cl = ira_reg_class_super_classes(this_target_ira_int->x_ira_reg_class_super_classes)[pclass][i]) != LIM_REG_CLASSES; |
224 | i++) |
225 | { |
226 | if (! ira_reg_pressure_class_p(this_target_ira_int->x_ira_reg_pressure_class_p)[cl]) |
227 | continue; |
228 | curr_reg_pressure[cl] += n; |
229 | if (high_pressure_start_point[cl] < 0 |
230 | && (curr_reg_pressure[cl] > ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[cl])) |
231 | high_pressure_start_point[cl] = curr_point; |
232 | if (curr_bb_node->reg_pressure[cl] < curr_reg_pressure[cl]) |
233 | curr_bb_node->reg_pressure[cl] = curr_reg_pressure[cl]; |
234 | } |
235 | } |
236 | |
237 | /* Record that register pressure for PCLASS has decreased by NREGS |
238 | registers; update current register pressure, start point of the |
239 | register pressure excess, and register pressure excess length for |
240 | living allocnos. */ |
241 | |
242 | static void |
243 | dec_register_pressure (enum reg_class pclass, int nregs) |
244 | { |
245 | int i; |
246 | unsigned int j; |
247 | enum reg_class cl; |
248 | bool set_p = false; |
249 | |
250 | for (i = 0; |
251 | (cl = ira_reg_class_super_classes(this_target_ira_int->x_ira_reg_class_super_classes)[pclass][i]) != LIM_REG_CLASSES; |
252 | i++) |
253 | { |
254 | if (! ira_reg_pressure_class_p(this_target_ira_int->x_ira_reg_pressure_class_p)[cl]) |
255 | continue; |
256 | curr_reg_pressure[cl] -= nregs; |
257 | ira_assert (curr_reg_pressure[cl] >= 0)((void)(!(curr_reg_pressure[cl] >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 257, __FUNCTION__), 0 : 0)); |
258 | if (high_pressure_start_point[cl] >= 0 |
259 | && curr_reg_pressure[cl] <= ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[cl]) |
260 | set_p = true; |
261 | } |
262 | if (set_p) |
263 | { |
264 | EXECUTE_IF_SET_IN_SPARSESET (objects_live, j)for (sparseset_iter_init (objects_live); sparseset_iter_p (objects_live ) && (((j) = sparseset_iter_elm (objects_live)) || 1) ; sparseset_iter_next (objects_live)) |
265 | update_allocno_pressure_excess_length (ira_object_id_map[j]); |
266 | for (i = 0; |
267 | (cl = ira_reg_class_super_classes(this_target_ira_int->x_ira_reg_class_super_classes)[pclass][i]) != LIM_REG_CLASSES; |
268 | i++) |
269 | { |
270 | if (! ira_reg_pressure_class_p(this_target_ira_int->x_ira_reg_pressure_class_p)[cl]) |
271 | continue; |
272 | if (high_pressure_start_point[cl] >= 0 |
273 | && curr_reg_pressure[cl] <= ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[cl]) |
274 | high_pressure_start_point[cl] = -1; |
275 | } |
276 | } |
277 | } |
278 | |
279 | /* Determine from the objects_live bitmap whether REGNO is currently live, |
280 | and occupies only one object. Return false if we have no information. */ |
281 | static bool |
282 | pseudo_regno_single_word_and_live_p (int regno) |
283 | { |
284 | ira_allocno_t a = ira_curr_regno_allocno_map[regno]; |
285 | ira_object_t obj; |
286 | |
287 | if (a == NULLnullptr) |
288 | return false; |
289 | if (ALLOCNO_NUM_OBJECTS (a)((a)->num_objects) > 1) |
290 | return false; |
291 | |
292 | obj = ALLOCNO_OBJECT (a, 0)((a)->objects[0]); |
293 | |
294 | return sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)((obj)->id)); |
295 | } |
296 | |
297 | /* Mark the pseudo register REGNO as live. Update all information about |
298 | live ranges and register pressure. */ |
299 | static void |
300 | mark_pseudo_regno_live (int regno) |
301 | { |
302 | ira_allocno_t a = ira_curr_regno_allocno_map[regno]; |
303 | enum reg_class pclass; |
304 | int i, n, nregs; |
305 | |
306 | if (a == NULLnullptr) |
307 | return; |
308 | |
309 | /* Invalidate because it is referenced. */ |
310 | allocno_saved_at_call[ALLOCNO_NUM (a)((a)->num)] = 0; |
311 | |
312 | n = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects); |
313 | pclass = ira_pressure_class_translate(this_target_ira->x_ira_pressure_class_translate)[ALLOCNO_CLASS (a)((a)->aclass)]; |
314 | nregs = ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[ALLOCNO_CLASS (a)((a)->aclass)][ALLOCNO_MODE (a)((a)->mode)]; |
315 | if (n > 1) |
316 | { |
317 | /* We track every subobject separately. */ |
318 | gcc_assert (nregs == n)((void)(!(nregs == n) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 318, __FUNCTION__), 0 : 0)); |
319 | nregs = 1; |
320 | } |
321 | |
322 | for (i = 0; i < n; i++) |
323 | { |
324 | ira_object_t obj = ALLOCNO_OBJECT (a, i)((a)->objects[i]); |
325 | |
326 | if (sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)((obj)->id))) |
327 | continue; |
328 | |
329 | inc_register_pressure (pclass, nregs); |
330 | make_object_live (obj); |
331 | } |
332 | } |
333 | |
334 | /* Like mark_pseudo_regno_live, but try to only mark one subword of |
335 | the pseudo as live. SUBWORD indicates which; a value of 0 |
336 | indicates the low part. */ |
337 | static void |
338 | mark_pseudo_regno_subword_live (int regno, int subword) |
339 | { |
340 | ira_allocno_t a = ira_curr_regno_allocno_map[regno]; |
341 | int n; |
342 | enum reg_class pclass; |
343 | ira_object_t obj; |
344 | |
345 | if (a == NULLnullptr) |
346 | return; |
347 | |
348 | /* Invalidate because it is referenced. */ |
349 | allocno_saved_at_call[ALLOCNO_NUM (a)((a)->num)] = 0; |
350 | |
351 | n = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects); |
352 | if (n == 1) |
353 | { |
354 | mark_pseudo_regno_live (regno); |
355 | return; |
356 | } |
357 | |
358 | pclass = ira_pressure_class_translate(this_target_ira->x_ira_pressure_class_translate)[ALLOCNO_CLASS (a)((a)->aclass)]; |
359 | gcc_assert((void)(!(n == (this_target_ira->x_ira_reg_class_max_nregs )[((a)->aclass)][((a)->mode)]) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 360, __FUNCTION__), 0 : 0)) |
360 | (n == ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)])((void)(!(n == (this_target_ira->x_ira_reg_class_max_nregs )[((a)->aclass)][((a)->mode)]) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 360, __FUNCTION__), 0 : 0)); |
361 | obj = ALLOCNO_OBJECT (a, subword)((a)->objects[subword]); |
362 | |
363 | if (sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)((obj)->id))) |
364 | return; |
365 | |
366 | inc_register_pressure (pclass, 1); |
367 | make_object_live (obj); |
368 | } |
369 | |
370 | /* Mark the register REG as live. Store a 1 in hard_regs_live for |
371 | this register, record how many consecutive hardware registers it |
372 | actually needs. */ |
373 | static void |
374 | mark_hard_reg_live (rtx reg) |
375 | { |
376 | int regno = REGNO (reg)(rhs_regno(reg)); |
377 | |
378 | if (! TEST_HARD_REG_BIT (ira_no_alloc_regs(this_target_ira->x_ira_no_alloc_regs), regno)) |
379 | { |
380 | int last = END_REGNO (reg); |
381 | enum reg_class aclass, pclass; |
382 | |
383 | while (regno < last) |
384 | { |
385 | if (! TEST_HARD_REG_BIT (hard_regs_live, regno) |
386 | && ! TEST_HARD_REG_BIT (eliminable_regset, regno)) |
387 | { |
388 | aclass = ira_hard_regno_allocno_class(this_target_ira->x_ira_hard_regno_allocno_class)[regno]; |
389 | pclass = ira_pressure_class_translate(this_target_ira->x_ira_pressure_class_translate)[aclass]; |
390 | inc_register_pressure (pclass, 1); |
391 | make_hard_regno_live (regno); |
392 | } |
393 | regno++; |
394 | } |
395 | } |
396 | } |
397 | |
398 | /* Mark a pseudo, or one of its subwords, as live. REGNO is the pseudo's |
399 | register number; ORIG_REG is the access in the insn, which may be a |
400 | subreg. */ |
401 | static void |
402 | mark_pseudo_reg_live (rtx orig_reg, unsigned regno) |
403 | { |
404 | if (read_modify_subreg_p (orig_reg)) |
405 | { |
406 | mark_pseudo_regno_subword_live (regno, |
407 | subreg_lowpart_p (orig_reg) ? 0 : 1); |
408 | } |
409 | else |
410 | mark_pseudo_regno_live (regno); |
411 | } |
412 | |
413 | /* Mark the register referenced by use or def REF as live. */ |
414 | static void |
415 | mark_ref_live (df_ref ref) |
416 | { |
417 | rtx reg = DF_REF_REG (ref)((ref)->base.reg); |
418 | rtx orig_reg = reg; |
419 | |
420 | if (GET_CODE (reg)((enum rtx_code) (reg)->code) == SUBREG) |
421 | reg = SUBREG_REG (reg)(((reg)->u.fld[0]).rt_rtx); |
422 | |
423 | if (REGNO (reg)(rhs_regno(reg)) >= FIRST_PSEUDO_REGISTER76) |
424 | mark_pseudo_reg_live (orig_reg, REGNO (reg)(rhs_regno(reg))); |
425 | else |
426 | mark_hard_reg_live (reg); |
427 | } |
428 | |
429 | /* Mark the pseudo register REGNO as dead. Update all information about |
430 | live ranges and register pressure. */ |
431 | static void |
432 | mark_pseudo_regno_dead (int regno) |
433 | { |
434 | ira_allocno_t a = ira_curr_regno_allocno_map[regno]; |
435 | int n, i, nregs; |
436 | enum reg_class cl; |
437 | |
438 | if (a == NULLnullptr) |
439 | return; |
440 | |
441 | /* Invalidate because it is referenced. */ |
442 | allocno_saved_at_call[ALLOCNO_NUM (a)((a)->num)] = 0; |
443 | |
444 | n = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects); |
445 | cl = ira_pressure_class_translate(this_target_ira->x_ira_pressure_class_translate)[ALLOCNO_CLASS (a)((a)->aclass)]; |
446 | nregs = ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[ALLOCNO_CLASS (a)((a)->aclass)][ALLOCNO_MODE (a)((a)->mode)]; |
447 | if (n > 1) |
448 | { |
449 | /* We track every subobject separately. */ |
450 | gcc_assert (nregs == n)((void)(!(nregs == n) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 450, __FUNCTION__), 0 : 0)); |
451 | nregs = 1; |
452 | } |
453 | for (i = 0; i < n; i++) |
454 | { |
455 | ira_object_t obj = ALLOCNO_OBJECT (a, i)((a)->objects[i]); |
456 | if (!sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)((obj)->id))) |
457 | continue; |
458 | |
459 | dec_register_pressure (cl, nregs); |
460 | make_object_dead (obj); |
461 | } |
462 | } |
463 | |
464 | /* Like mark_pseudo_regno_dead, but called when we know that only part of the |
465 | register dies. SUBWORD indicates which; a value of 0 indicates the low part. */ |
466 | static void |
467 | mark_pseudo_regno_subword_dead (int regno, int subword) |
468 | { |
469 | ira_allocno_t a = ira_curr_regno_allocno_map[regno]; |
470 | int n; |
471 | enum reg_class cl; |
472 | ira_object_t obj; |
473 | |
474 | if (a == NULLnullptr) |
475 | return; |
476 | |
477 | /* Invalidate because it is referenced. */ |
478 | allocno_saved_at_call[ALLOCNO_NUM (a)((a)->num)] = 0; |
479 | |
480 | n = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects); |
481 | if (n == 1) |
482 | /* The allocno as a whole doesn't die in this case. */ |
483 | return; |
484 | |
485 | cl = ira_pressure_class_translate(this_target_ira->x_ira_pressure_class_translate)[ALLOCNO_CLASS (a)((a)->aclass)]; |
486 | gcc_assert((void)(!(n == (this_target_ira->x_ira_reg_class_max_nregs )[((a)->aclass)][((a)->mode)]) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 487, __FUNCTION__), 0 : 0)) |
487 | (n == ira_reg_class_max_nregs[ALLOCNO_CLASS (a)][ALLOCNO_MODE (a)])((void)(!(n == (this_target_ira->x_ira_reg_class_max_nregs )[((a)->aclass)][((a)->mode)]) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 487, __FUNCTION__), 0 : 0)); |
488 | |
489 | obj = ALLOCNO_OBJECT (a, subword)((a)->objects[subword]); |
490 | if (!sparseset_bit_p (objects_live, OBJECT_CONFLICT_ID (obj)((obj)->id))) |
491 | return; |
492 | |
493 | dec_register_pressure (cl, 1); |
494 | make_object_dead (obj); |
495 | } |
496 | |
497 | /* Process the definition of hard register REG. This updates hard_regs_live |
498 | and hard reg conflict information for living allocnos. */ |
499 | static void |
500 | mark_hard_reg_dead (rtx reg) |
501 | { |
502 | int regno = REGNO (reg)(rhs_regno(reg)); |
503 | |
504 | if (! TEST_HARD_REG_BIT (ira_no_alloc_regs(this_target_ira->x_ira_no_alloc_regs), regno)) |
505 | { |
506 | int last = END_REGNO (reg); |
507 | enum reg_class aclass, pclass; |
508 | |
509 | while (regno < last) |
510 | { |
511 | if (TEST_HARD_REG_BIT (hard_regs_live, regno)) |
512 | { |
513 | aclass = ira_hard_regno_allocno_class(this_target_ira->x_ira_hard_regno_allocno_class)[regno]; |
514 | pclass = ira_pressure_class_translate(this_target_ira->x_ira_pressure_class_translate)[aclass]; |
515 | dec_register_pressure (pclass, 1); |
516 | make_hard_regno_dead (regno); |
517 | } |
518 | regno++; |
519 | } |
520 | } |
521 | } |
522 | |
523 | /* Mark a pseudo, or one of its subwords, as dead. REGNO is the pseudo's |
524 | register number; ORIG_REG is the access in the insn, which may be a |
525 | subreg. */ |
526 | static void |
527 | mark_pseudo_reg_dead (rtx orig_reg, unsigned regno) |
528 | { |
529 | if (read_modify_subreg_p (orig_reg)) |
530 | { |
531 | mark_pseudo_regno_subword_dead (regno, |
532 | subreg_lowpart_p (orig_reg) ? 0 : 1); |
533 | } |
534 | else |
535 | mark_pseudo_regno_dead (regno); |
536 | } |
537 | |
538 | /* Mark the register referenced by definition DEF as dead, if the |
539 | definition is a total one. */ |
540 | static void |
541 | mark_ref_dead (df_ref def) |
542 | { |
543 | rtx reg = DF_REF_REG (def)((def)->base.reg); |
544 | rtx orig_reg = reg; |
545 | |
546 | if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL)((((def)->base.flags) & (DF_REF_CONDITIONAL)) != 0)) |
547 | return; |
548 | |
549 | if (GET_CODE (reg)((enum rtx_code) (reg)->code) == SUBREG) |
550 | reg = SUBREG_REG (reg)(((reg)->u.fld[0]).rt_rtx); |
551 | |
552 | if (DF_REF_FLAGS_IS_SET (def, DF_REF_PARTIAL)((((def)->base.flags) & (DF_REF_PARTIAL)) != 0) |
553 | && (GET_CODE (orig_reg)((enum rtx_code) (orig_reg)->code) != SUBREG |
554 | || REGNO (reg)(rhs_regno(reg)) < FIRST_PSEUDO_REGISTER76 |
555 | || !read_modify_subreg_p (orig_reg))) |
556 | return; |
557 | |
558 | if (REGNO (reg)(rhs_regno(reg)) >= FIRST_PSEUDO_REGISTER76) |
559 | mark_pseudo_reg_dead (orig_reg, REGNO (reg)(rhs_regno(reg))); |
560 | else |
561 | mark_hard_reg_dead (reg); |
562 | } |
563 | |
564 | /* If REG is a pseudo or a subreg of it, and the class of its allocno |
565 | intersects CL, make a conflict with pseudo DREG. ORIG_DREG is the |
566 | rtx actually accessed, it may be identical to DREG or a subreg of it. |
567 | Advance the current program point before making the conflict if |
568 | ADVANCE_P. Return TRUE if we will need to advance the current |
569 | program point. */ |
570 | static bool |
571 | make_pseudo_conflict (rtx reg, enum reg_class cl, rtx dreg, rtx orig_dreg, |
572 | bool advance_p) |
573 | { |
574 | rtx orig_reg = reg; |
575 | ira_allocno_t a; |
576 | |
577 | if (GET_CODE (reg)((enum rtx_code) (reg)->code) == SUBREG) |
578 | reg = SUBREG_REG (reg)(((reg)->u.fld[0]).rt_rtx); |
579 | |
580 | if (! REG_P (reg)(((enum rtx_code) (reg)->code) == REG) || REGNO (reg)(rhs_regno(reg)) < FIRST_PSEUDO_REGISTER76) |
581 | return advance_p; |
582 | |
583 | a = ira_curr_regno_allocno_map[REGNO (reg)(rhs_regno(reg))]; |
584 | if (! reg_classes_intersect_p (cl, ALLOCNO_CLASS (a)((a)->aclass))) |
585 | return advance_p; |
586 | |
587 | if (advance_p) |
588 | curr_point++; |
589 | |
590 | mark_pseudo_reg_live (orig_reg, REGNO (reg)(rhs_regno(reg))); |
591 | mark_pseudo_reg_live (orig_dreg, REGNO (dreg)(rhs_regno(dreg))); |
592 | mark_pseudo_reg_dead (orig_reg, REGNO (reg)(rhs_regno(reg))); |
593 | mark_pseudo_reg_dead (orig_dreg, REGNO (dreg)(rhs_regno(dreg))); |
594 | |
595 | return false; |
596 | } |
597 | |
598 | /* Check and make if necessary conflicts for pseudo DREG of class |
599 | DEF_CL of the current insn with input operand USE of class USE_CL. |
600 | ORIG_DREG is the rtx actually accessed, it may be identical to |
601 | DREG or a subreg of it. Advance the current program point before |
602 | making the conflict if ADVANCE_P. Return TRUE if we will need to |
603 | advance the current program point. */ |
604 | static bool |
605 | check_and_make_def_use_conflict (rtx dreg, rtx orig_dreg, |
606 | enum reg_class def_cl, int use, |
607 | enum reg_class use_cl, bool advance_p) |
608 | { |
609 | if (! reg_classes_intersect_p (def_cl, use_cl)) |
610 | return advance_p; |
611 | |
612 | advance_p = make_pseudo_conflict (recog_data.operand[use], |
613 | use_cl, dreg, orig_dreg, advance_p); |
614 | |
615 | /* Reload may end up swapping commutative operands, so you |
616 | have to take both orderings into account. The |
617 | constraints for the two operands can be completely |
618 | different. (Indeed, if the constraints for the two |
619 | operands are the same for all alternatives, there's no |
620 | point marking them as commutative.) */ |
621 | if (use < recog_data.n_operands - 1 |
622 | && recog_data.constraints[use][0] == '%') |
623 | advance_p |
624 | = make_pseudo_conflict (recog_data.operand[use + 1], |
625 | use_cl, dreg, orig_dreg, advance_p); |
626 | if (use >= 1 |
627 | && recog_data.constraints[use - 1][0] == '%') |
628 | advance_p |
629 | = make_pseudo_conflict (recog_data.operand[use - 1], |
630 | use_cl, dreg, orig_dreg, advance_p); |
631 | return advance_p; |
632 | } |
633 | |
634 | /* Check and make if necessary conflicts for definition DEF of class |
635 | DEF_CL of the current insn with input operands. Process only |
636 | constraints of alternative ALT. |
637 | |
638 | One of three things is true when this function is called: |
639 | |
640 | (1) DEF is an earlyclobber for alternative ALT. Input operands then |
641 | conflict with DEF in ALT unless they explicitly match DEF via 0-9 |
642 | constraints. |
643 | |
644 | (2) DEF matches (via 0-9 constraints) an operand that is an |
645 | earlyclobber for alternative ALT. Other input operands then |
646 | conflict with DEF in ALT. |
647 | |
648 | (3) [FOR_TIE_P] Some input operand X matches DEF for alternative ALT. |
649 | Input operands with a different value from X then conflict with |
650 | DEF in ALT. |
651 | |
652 | However, there's still a judgement call to make when deciding |
653 | whether a conflict in ALT is important enough to be reflected |
654 | in the pan-alternative allocno conflict set. */ |
655 | static void |
656 | check_and_make_def_conflict (int alt, int def, enum reg_class def_cl, |
657 | bool for_tie_p) |
658 | { |
659 | int use, use_match; |
660 | ira_allocno_t a; |
661 | enum reg_class use_cl, acl; |
662 | bool advance_p; |
663 | rtx dreg = recog_data.operand[def]; |
664 | rtx orig_dreg = dreg; |
665 | |
666 | if (def_cl == NO_REGS) |
667 | return; |
668 | |
669 | if (GET_CODE (dreg)((enum rtx_code) (dreg)->code) == SUBREG) |
670 | dreg = SUBREG_REG (dreg)(((dreg)->u.fld[0]).rt_rtx); |
671 | |
672 | if (! REG_P (dreg)(((enum rtx_code) (dreg)->code) == REG) || REGNO (dreg)(rhs_regno(dreg)) < FIRST_PSEUDO_REGISTER76) |
673 | return; |
674 | |
675 | a = ira_curr_regno_allocno_map[REGNO (dreg)(rhs_regno(dreg))]; |
676 | acl = ALLOCNO_CLASS (a)((a)->aclass); |
677 | if (! reg_classes_intersect_p (acl, def_cl)) |
678 | return; |
679 | |
680 | advance_p = true; |
681 | |
682 | int n_operands = recog_data.n_operands; |
683 | const operand_alternative *op_alt = &recog_op_alt[alt * n_operands]; |
684 | for (use = 0; use < n_operands; use++) |
685 | { |
686 | int alt1; |
687 | |
688 | if (use == def || recog_data.operand_type[use] == OP_OUT) |
689 | continue; |
690 | |
691 | /* An earlyclobber on DEF doesn't apply to an input operand X if X |
692 | explicitly matches DEF, but it applies to other input operands |
693 | even if they happen to be the same value as X. |
694 | |
695 | In contrast, if an input operand X is tied to a non-earlyclobber |
696 | DEF, there's no conflict with other input operands that have the |
697 | same value as X. */ |
698 | if (op_alt[use].matches == def |
699 | || (for_tie_p |
700 | && rtx_equal_p (recog_data.operand[use], |
701 | recog_data.operand[op_alt[def].matched]))) |
702 | continue; |
703 | |
704 | if (op_alt[use].anything_ok) |
705 | use_cl = ALL_REGS; |
706 | else |
707 | use_cl = op_alt[use].cl; |
708 | if (use_cl == NO_REGS) |
709 | continue; |
710 | |
711 | /* If DEF is simply a tied operand, ignore cases in which this |
712 | alternative requires USE to have a likely-spilled class. |
713 | Adding a conflict would just constrain USE further if DEF |
714 | happens to be allocated first. */ |
715 | if (for_tie_p && targetm.class_likely_spilled_p (use_cl)) |
716 | continue; |
717 | |
718 | /* If there's any alternative that allows USE to match DEF, do not |
719 | record a conflict. If that causes us to create an invalid |
720 | instruction due to the earlyclobber, reload must fix it up. |
721 | |
722 | Likewise, if we're treating a tied DEF like a partial earlyclobber, |
723 | do not record a conflict if there's another alternative in which |
724 | DEF is neither tied nor earlyclobber. */ |
725 | for (alt1 = 0; alt1 < recog_data.n_alternatives; alt1++) |
726 | { |
727 | if (!TEST_BIT (preferred_alternatives, alt1)(((preferred_alternatives) >> (alt1)) & 1)) |
728 | continue; |
729 | const operand_alternative *op_alt1 |
730 | = &recog_op_alt[alt1 * n_operands]; |
731 | if (op_alt1[use].matches == def |
732 | || (use < n_operands - 1 |
733 | && recog_data.constraints[use][0] == '%' |
734 | && op_alt1[use + 1].matches == def) |
735 | || (use >= 1 |
736 | && recog_data.constraints[use - 1][0] == '%' |
737 | && op_alt1[use - 1].matches == def)) |
738 | break; |
739 | if (for_tie_p |
740 | && !op_alt1[def].earlyclobber |
741 | && op_alt1[def].matched < 0 |
742 | && alternative_class (op_alt1, def) != NO_REGS |
743 | && alternative_class (op_alt1, use) != NO_REGS) |
744 | break; |
745 | } |
746 | |
747 | if (alt1 < recog_data.n_alternatives) |
748 | continue; |
749 | |
750 | advance_p = check_and_make_def_use_conflict (dreg, orig_dreg, def_cl, |
751 | use, use_cl, advance_p); |
752 | |
753 | if ((use_match = op_alt[use].matches) >= 0) |
754 | { |
755 | gcc_checking_assert (use_match != def)((void)(!(use_match != def) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 755, __FUNCTION__), 0 : 0)); |
756 | |
757 | if (op_alt[use_match].anything_ok) |
758 | use_cl = ALL_REGS; |
759 | else |
760 | use_cl = op_alt[use_match].cl; |
761 | advance_p = check_and_make_def_use_conflict (dreg, orig_dreg, def_cl, |
762 | use, use_cl, advance_p); |
763 | } |
764 | } |
765 | } |
766 | |
767 | /* Make conflicts of early clobber pseudo registers of the current |
768 | insn with its inputs. Avoid introducing unnecessary conflicts by |
769 | checking classes of the constraints and pseudos because otherwise |
770 | significant code degradation is possible for some targets. |
771 | |
772 | For these purposes, tying an input to an output makes that output act |
773 | like an earlyclobber for inputs with a different value, since the output |
774 | register then has a predetermined purpose on input to the instruction. */ |
775 | static void |
776 | make_early_clobber_and_input_conflicts (void) |
777 | { |
778 | int alt; |
779 | int def, def_match; |
780 | enum reg_class def_cl; |
781 | |
782 | int n_alternatives = recog_data.n_alternatives; |
783 | int n_operands = recog_data.n_operands; |
784 | const operand_alternative *op_alt = recog_op_alt; |
785 | for (alt = 0; alt < n_alternatives; alt++, op_alt += n_operands) |
786 | if (TEST_BIT (preferred_alternatives, alt)(((preferred_alternatives) >> (alt)) & 1)) |
787 | for (def = 0; def < n_operands; def++) |
788 | { |
789 | if (op_alt[def].anything_ok) |
790 | def_cl = ALL_REGS; |
791 | else |
792 | def_cl = op_alt[def].cl; |
793 | if (def_cl != NO_REGS) |
794 | { |
795 | if (op_alt[def].earlyclobber) |
796 | check_and_make_def_conflict (alt, def, def_cl, false); |
797 | else if (op_alt[def].matched >= 0 |
798 | && !targetm.class_likely_spilled_p (def_cl)) |
799 | check_and_make_def_conflict (alt, def, def_cl, true); |
800 | } |
801 | |
802 | if ((def_match = op_alt[def].matches) >= 0 |
803 | && (op_alt[def_match].earlyclobber |
804 | || op_alt[def].earlyclobber)) |
805 | { |
806 | if (op_alt[def_match].anything_ok) |
807 | def_cl = ALL_REGS; |
808 | else |
809 | def_cl = op_alt[def_match].cl; |
810 | check_and_make_def_conflict (alt, def, def_cl, false); |
811 | } |
812 | } |
813 | } |
814 | |
815 | /* Mark early clobber hard registers of the current INSN as live (if |
816 | LIVE_P) or dead. Return true if there are such registers. */ |
817 | static bool |
818 | mark_hard_reg_early_clobbers (rtx_insn *insn, bool live_p) |
819 | { |
820 | df_ref def; |
821 | bool set_p = false; |
822 | |
823 | FOR_EACH_INSN_DEF (def, insn)for (def = (((df->insns[(INSN_UID (insn))]))->defs); def ; def = ((def)->base.next_loc)) |
824 | if (DF_REF_FLAGS_IS_SET (def, DF_REF_MUST_CLOBBER)((((def)->base.flags) & (DF_REF_MUST_CLOBBER)) != 0)) |
825 | { |
826 | rtx dreg = DF_REF_REG (def)((def)->base.reg); |
827 | |
828 | if (GET_CODE (dreg)((enum rtx_code) (dreg)->code) == SUBREG) |
829 | dreg = SUBREG_REG (dreg)(((dreg)->u.fld[0]).rt_rtx); |
830 | if (! REG_P (dreg)(((enum rtx_code) (dreg)->code) == REG) || REGNO (dreg)(rhs_regno(dreg)) >= FIRST_PSEUDO_REGISTER76) |
831 | continue; |
832 | |
833 | /* Hard register clobbers are believed to be early clobber |
834 | because there is no way to say that non-operand hard |
835 | register clobbers are not early ones. */ |
836 | if (live_p) |
837 | mark_ref_live (def); |
838 | else |
839 | mark_ref_dead (def); |
840 | set_p = true; |
841 | } |
842 | |
843 | return set_p; |
844 | } |
845 | |
846 | /* Checks that CONSTRAINTS permits to use only one hard register. If |
847 | it is so, the function returns the class of the hard register. |
848 | Otherwise it returns NO_REGS. */ |
849 | static enum reg_class |
850 | single_reg_class (const char *constraints, rtx op, rtx equiv_const) |
851 | { |
852 | int c; |
853 | enum reg_class cl, next_cl; |
854 | enum constraint_num cn; |
855 | |
856 | cl = NO_REGS; |
857 | alternative_mask preferred = preferred_alternatives; |
858 | while ((c = *constraints)) |
859 | { |
860 | if (c == '#') |
861 | preferred &= ~ALTERNATIVE_BIT (0)((alternative_mask) 1 << (0)); |
862 | else if (c == ',') |
863 | preferred >>= 1; |
864 | else if (preferred & 1) |
865 | switch (c) |
866 | { |
867 | case 'g': |
868 | return NO_REGS; |
869 | |
870 | default: |
871 | /* ??? Is this the best way to handle memory constraints? */ |
872 | cn = lookup_constraint (constraints); |
873 | if (insn_extra_memory_constraint (cn) |
874 | || insn_extra_special_memory_constraint (cn) |
875 | || insn_extra_relaxed_memory_constraint (cn) |
876 | || insn_extra_address_constraint (cn)) |
877 | return NO_REGS; |
878 | if (constraint_satisfied_p (op, cn) |
879 | || (equiv_const != NULL_RTX(rtx) 0 |
880 | && CONSTANT_P (equiv_const)((rtx_class[(int) (((enum rtx_code) (equiv_const)->code))] ) == RTX_CONST_OBJ) |
881 | && constraint_satisfied_p (equiv_const, cn))) |
882 | return NO_REGS; |
883 | next_cl = reg_class_for_constraint (cn); |
884 | if (next_cl == NO_REGS) |
885 | break; |
886 | if (cl == NO_REGS |
887 | ? ira_class_singleton(this_target_ira->x_ira_class_singleton)[next_cl][GET_MODE (op)((machine_mode) (op)->mode)] < 0 |
888 | : (ira_class_singleton(this_target_ira->x_ira_class_singleton)[cl][GET_MODE (op)((machine_mode) (op)->mode)] |
889 | != ira_class_singleton(this_target_ira->x_ira_class_singleton)[next_cl][GET_MODE (op)((machine_mode) (op)->mode)])) |
890 | return NO_REGS; |
891 | cl = next_cl; |
892 | break; |
893 | |
894 | case '0': case '1': case '2': case '3': case '4': |
895 | case '5': case '6': case '7': case '8': case '9': |
896 | { |
897 | char *end; |
898 | unsigned long dup = strtoul (constraints, &end, 10); |
899 | constraints = end; |
900 | next_cl |
901 | = single_reg_class (recog_data.constraints[dup], |
902 | recog_data.operand[dup], NULL_RTX(rtx) 0); |
903 | if (cl == NO_REGS |
904 | ? ira_class_singleton(this_target_ira->x_ira_class_singleton)[next_cl][GET_MODE (op)((machine_mode) (op)->mode)] < 0 |
905 | : (ira_class_singleton(this_target_ira->x_ira_class_singleton)[cl][GET_MODE (op)((machine_mode) (op)->mode)] |
906 | != ira_class_singleton(this_target_ira->x_ira_class_singleton)[next_cl][GET_MODE (op)((machine_mode) (op)->mode)])) |
907 | return NO_REGS; |
908 | cl = next_cl; |
909 | continue; |
910 | } |
911 | } |
912 | constraints += CONSTRAINT_LEN (c, constraints)insn_constraint_len (c,constraints); |
913 | } |
914 | return cl; |
915 | } |
916 | |
917 | /* The function checks that operand OP_NUM of the current insn can use |
918 | only one hard register. If it is so, the function returns the |
919 | class of the hard register. Otherwise it returns NO_REGS. */ |
920 | static enum reg_class |
921 | single_reg_operand_class (int op_num) |
922 | { |
923 | if (op_num < 0 || recog_data.n_alternatives == 0) |
924 | return NO_REGS; |
925 | return single_reg_class (recog_data.constraints[op_num], |
926 | recog_data.operand[op_num], NULL_RTX(rtx) 0); |
927 | } |
928 | |
929 | /* The function sets up hard register set *SET to hard registers which |
930 | might be used by insn reloads because the constraints are too |
931 | strict. */ |
932 | void |
933 | ira_implicitly_set_insn_hard_regs (HARD_REG_SET *set, |
934 | alternative_mask preferred) |
935 | { |
936 | int i, c, regno = 0; |
937 | enum reg_class cl; |
938 | rtx op; |
939 | machine_mode mode; |
940 | |
941 | CLEAR_HARD_REG_SET (*set); |
942 | for (i = 0; i < recog_data.n_operands; i++) |
943 | { |
944 | op = recog_data.operand[i]; |
945 | |
946 | if (GET_CODE (op)((enum rtx_code) (op)->code) == SUBREG) |
947 | op = SUBREG_REG (op)(((op)->u.fld[0]).rt_rtx); |
948 | |
949 | if (GET_CODE (op)((enum rtx_code) (op)->code) == SCRATCH |
950 | || (REG_P (op)(((enum rtx_code) (op)->code) == REG) && (regno = REGNO (op)(rhs_regno(op))) >= FIRST_PSEUDO_REGISTER76)) |
951 | { |
952 | const char *p = recog_data.constraints[i]; |
953 | |
954 | mode = (GET_CODE (op)((enum rtx_code) (op)->code) == SCRATCH |
955 | ? GET_MODE (op)((machine_mode) (op)->mode) : PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode)); |
956 | cl = NO_REGS; |
Value stored to 'cl' is never read | |
957 | for (; (c = *p); p += CONSTRAINT_LEN (c, p)insn_constraint_len (c,p)) |
958 | if (c == '#') |
959 | preferred &= ~ALTERNATIVE_BIT (0)((alternative_mask) 1 << (0)); |
960 | else if (c == ',') |
961 | preferred >>= 1; |
962 | else if (preferred & 1) |
963 | { |
964 | cl = reg_class_for_constraint (lookup_constraint (p)); |
965 | if (cl != NO_REGS) |
966 | { |
967 | /* There is no register pressure problem if all of the |
968 | regs in this class are fixed. */ |
969 | int regno = ira_class_singleton(this_target_ira->x_ira_class_singleton)[cl][mode]; |
970 | if (regno >= 0) |
971 | add_to_hard_reg_set (set, mode, regno); |
972 | } |
973 | } |
974 | } |
975 | } |
976 | } |
977 | /* Processes input operands, if IN_P, or output operands otherwise of |
978 | the current insn with FREQ to find allocno which can use only one |
979 | hard register and makes other currently living allocnos conflicting |
980 | with the hard register. */ |
981 | static void |
982 | process_single_reg_class_operands (bool in_p, int freq) |
983 | { |
984 | int i, regno; |
985 | unsigned int px; |
986 | enum reg_class cl; |
987 | rtx operand; |
988 | ira_allocno_t operand_a, a; |
989 | |
990 | for (i = 0; i < recog_data.n_operands; i++) |
991 | { |
992 | operand = recog_data.operand[i]; |
993 | if (in_p && recog_data.operand_type[i] != OP_IN |
994 | && recog_data.operand_type[i] != OP_INOUT) |
995 | continue; |
996 | if (! in_p && recog_data.operand_type[i] != OP_OUT |
997 | && recog_data.operand_type[i] != OP_INOUT) |
998 | continue; |
999 | cl = single_reg_operand_class (i); |
1000 | if (cl == NO_REGS) |
1001 | continue; |
1002 | |
1003 | operand_a = NULLnullptr; |
1004 | |
1005 | if (GET_CODE (operand)((enum rtx_code) (operand)->code) == SUBREG) |
1006 | operand = SUBREG_REG (operand)(((operand)->u.fld[0]).rt_rtx); |
1007 | |
1008 | if (REG_P (operand)(((enum rtx_code) (operand)->code) == REG) |
1009 | && (regno = REGNO (operand)(rhs_regno(operand))) >= FIRST_PSEUDO_REGISTER76) |
1010 | { |
1011 | enum reg_class aclass; |
1012 | |
1013 | operand_a = ira_curr_regno_allocno_map[regno]; |
1014 | aclass = ALLOCNO_CLASS (operand_a)((operand_a)->aclass); |
1015 | if (ira_class_subset_p(this_target_ira->x_ira_class_subset_p)[cl][aclass]) |
1016 | { |
1017 | /* View the desired allocation of OPERAND as: |
1018 | |
1019 | (REG:YMODE YREGNO), |
1020 | |
1021 | a simplification of: |
1022 | |
1023 | (subreg:YMODE (reg:XMODE XREGNO) OFFSET). */ |
1024 | machine_mode ymode, xmode; |
1025 | int xregno, yregno; |
1026 | poly_int64 offset; |
1027 | |
1028 | xmode = recog_data.operand_mode[i]; |
1029 | xregno = ira_class_singleton(this_target_ira->x_ira_class_singleton)[cl][xmode]; |
1030 | gcc_assert (xregno >= 0)((void)(!(xregno >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 1030, __FUNCTION__), 0 : 0)); |
1031 | ymode = ALLOCNO_MODE (operand_a)((operand_a)->mode); |
1032 | offset = subreg_lowpart_offset (ymode, xmode); |
1033 | yregno = simplify_subreg_regno (xregno, xmode, offset, ymode); |
1034 | if (yregno >= 0 |
1035 | && ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)[aclass][yregno] >= 0) |
1036 | { |
1037 | int cost; |
1038 | |
1039 | ira_allocate_and_set_costs |
1040 | (&ALLOCNO_CONFLICT_HARD_REG_COSTS (operand_a)((operand_a)->conflict_hard_reg_costs), |
1041 | aclass, 0); |
1042 | ira_init_register_move_cost_if_necessary (xmode); |
1043 | cost = freq * (in_p |
1044 | ? ira_register_move_cost(this_target_ira_int->x_ira_register_move_cost)[xmode][aclass][cl] |
1045 | : ira_register_move_cost(this_target_ira_int->x_ira_register_move_cost)[xmode][cl][aclass]); |
1046 | ALLOCNO_CONFLICT_HARD_REG_COSTS (operand_a)((operand_a)->conflict_hard_reg_costs) |
1047 | [ira_class_hard_reg_index(this_target_ira_int->x_ira_class_hard_reg_index)[aclass][yregno]] -= cost; |
1048 | } |
1049 | } |
1050 | } |
1051 | |
1052 | EXECUTE_IF_SET_IN_SPARSESET (objects_live, px)for (sparseset_iter_init (objects_live); sparseset_iter_p (objects_live ) && (((px) = sparseset_iter_elm (objects_live)) || 1 ); sparseset_iter_next (objects_live)) |
1053 | { |
1054 | ira_object_t obj = ira_object_id_map[px]; |
1055 | a = OBJECT_ALLOCNO (obj)((obj)->allocno); |
1056 | if (a != operand_a) |
1057 | { |
1058 | /* We could increase costs of A instead of making it |
1059 | conflicting with the hard register. But it works worse |
1060 | because it will be spilled in reload in anyway. */ |
1061 | OBJECT_CONFLICT_HARD_REGS (obj)((obj)->conflict_hard_regs) |= reg_class_contents(this_target_hard_regs->x_reg_class_contents)[cl]; |
1062 | OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)((obj)->total_conflict_hard_regs) |= reg_class_contents(this_target_hard_regs->x_reg_class_contents)[cl]; |
1063 | } |
1064 | } |
1065 | } |
1066 | } |
1067 | |
1068 | /* Look through the CALL_INSN_FUNCTION_USAGE of a call insn INSN, and see if |
1069 | we find a SET rtx that we can use to deduce that a register can be cheaply |
1070 | caller-saved. Return such a register, or NULL_RTX if none is found. */ |
1071 | static rtx |
1072 | find_call_crossed_cheap_reg (rtx_insn *insn) |
1073 | { |
1074 | rtx cheap_reg = NULL_RTX(rtx) 0; |
1075 | rtx exp = CALL_INSN_FUNCTION_USAGE (insn)(((insn)->u.fld[7]).rt_rtx); |
1076 | |
1077 | while (exp != NULLnullptr) |
1078 | { |
1079 | rtx x = XEXP (exp, 0)(((exp)->u.fld[0]).rt_rtx); |
1080 | if (GET_CODE (x)((enum rtx_code) (x)->code) == SET) |
1081 | { |
1082 | exp = x; |
1083 | break; |
1084 | } |
1085 | exp = XEXP (exp, 1)(((exp)->u.fld[1]).rt_rtx); |
1086 | } |
1087 | if (exp != NULLnullptr) |
1088 | { |
1089 | basic_block bb = BLOCK_FOR_INSN (insn); |
1090 | rtx reg = SET_SRC (exp)(((exp)->u.fld[1]).rt_rtx); |
1091 | rtx_insn *prev = PREV_INSN (insn); |
1092 | while (prev && !(INSN_P (prev)(((((enum rtx_code) (prev)->code) == INSN) || (((enum rtx_code ) (prev)->code) == JUMP_INSN) || (((enum rtx_code) (prev)-> code) == CALL_INSN)) || (((enum rtx_code) (prev)->code) == DEBUG_INSN)) |
1093 | && BLOCK_FOR_INSN (prev) != bb)) |
1094 | { |
1095 | if (NONDEBUG_INSN_P (prev)((((enum rtx_code) (prev)->code) == INSN) || (((enum rtx_code ) (prev)->code) == JUMP_INSN) || (((enum rtx_code) (prev)-> code) == CALL_INSN))) |
1096 | { |
1097 | rtx set = single_set (prev); |
1098 | |
1099 | if (set && rtx_equal_p (SET_DEST (set)(((set)->u.fld[0]).rt_rtx), reg)) |
1100 | { |
1101 | rtx src = SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
1102 | if (!REG_P (src)(((enum rtx_code) (src)->code) == REG) || HARD_REGISTER_P (src)((((rhs_regno(src))) < 76)) |
1103 | || !pseudo_regno_single_word_and_live_p (REGNO (src)(rhs_regno(src)))) |
1104 | break; |
1105 | if (!modified_between_p (src, prev, insn)) |
1106 | cheap_reg = src; |
1107 | break; |
1108 | } |
1109 | if (set && rtx_equal_p (SET_SRC (set)(((set)->u.fld[1]).rt_rtx), reg)) |
1110 | { |
1111 | rtx dest = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); |
1112 | if (!REG_P (dest)(((enum rtx_code) (dest)->code) == REG) || HARD_REGISTER_P (dest)((((rhs_regno(dest))) < 76)) |
1113 | || !pseudo_regno_single_word_and_live_p (REGNO (dest)(rhs_regno(dest)))) |
1114 | break; |
1115 | if (!modified_between_p (dest, prev, insn)) |
1116 | cheap_reg = dest; |
1117 | break; |
1118 | } |
1119 | |
1120 | if (reg_set_p (reg, prev)) |
1121 | break; |
1122 | } |
1123 | prev = PREV_INSN (prev); |
1124 | } |
1125 | } |
1126 | return cheap_reg; |
1127 | } |
1128 | |
1129 | /* Determine whether INSN is a register to register copy of the type where |
1130 | we do not need to make the source and destiniation registers conflict. |
1131 | If this is a copy instruction, then return the source reg. Otherwise, |
1132 | return NULL_RTX. */ |
1133 | rtx |
1134 | non_conflicting_reg_copy_p (rtx_insn *insn) |
1135 | { |
1136 | /* Reload has issues with overlapping pseudos being assigned to the |
1137 | same hard register, so don't allow it. See PR87600 for details. */ |
1138 | if (!targetm.lra_p ()) |
1139 | return NULL_RTX(rtx) 0; |
1140 | |
1141 | rtx set = single_set (insn); |
1142 | |
1143 | /* Disallow anything other than a simple register to register copy |
1144 | that has no side effects. */ |
1145 | if (set == NULL_RTX(rtx) 0 |
1146 | || !REG_P (SET_DEST (set))(((enum rtx_code) ((((set)->u.fld[0]).rt_rtx))->code) == REG) |
1147 | || !REG_P (SET_SRC (set))(((enum rtx_code) ((((set)->u.fld[1]).rt_rtx))->code) == REG) |
1148 | || side_effects_p (set)) |
1149 | return NULL_RTX(rtx) 0; |
1150 | |
1151 | int dst_regno = REGNO (SET_DEST (set))(rhs_regno((((set)->u.fld[0]).rt_rtx))); |
1152 | int src_regno = REGNO (SET_SRC (set))(rhs_regno((((set)->u.fld[1]).rt_rtx))); |
1153 | machine_mode mode = GET_MODE (SET_DEST (set))((machine_mode) ((((set)->u.fld[0]).rt_rtx))->mode); |
1154 | |
1155 | /* By definition, a register does not conflict with itself, therefore we |
1156 | do not have to handle it specially. Returning NULL_RTX now, helps |
1157 | simplify the callers of this function. */ |
1158 | if (dst_regno == src_regno) |
1159 | return NULL_RTX(rtx) 0; |
1160 | |
1161 | /* Computing conflicts for register pairs is difficult to get right, so |
1162 | for now, disallow it. */ |
1163 | if ((HARD_REGISTER_NUM_P (dst_regno)((dst_regno) < 76) |
1164 | && hard_regno_nregs (dst_regno, mode) != 1) |
1165 | || (HARD_REGISTER_NUM_P (src_regno)((src_regno) < 76) |
1166 | && hard_regno_nregs (src_regno, mode) != 1)) |
1167 | return NULL_RTX(rtx) 0; |
1168 | |
1169 | return SET_SRC (set)(((set)->u.fld[1]).rt_rtx); |
1170 | } |
1171 | |
1172 | #ifdef EH_RETURN_DATA_REGNO |
1173 | |
1174 | /* Add EH return hard registers as conflict hard registers to allocnos |
1175 | living at end of BB. For most allocnos it is already done in |
1176 | process_bb_node_lives when we processing input edges but it does |
1177 | not work when and EH edge is edge out of the current region. This |
1178 | function covers such out of region edges. */ |
1179 | static void |
1180 | process_out_of_region_eh_regs (basic_block bb) |
1181 | { |
1182 | edge e; |
1183 | edge_iterator ei; |
1184 | unsigned int i; |
1185 | bitmap_iterator bi; |
1186 | bool eh_p = false; |
1187 | |
1188 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
1189 | if ((e->flags & EDGE_EH) |
1190 | && IRA_BB_NODE (e->dest)__extension__ (({ ira_loop_tree_node_t _node = (&ira_bb_nodes [(e->dest)->index]); if (_node->children != nullptr || _node->loop != nullptr || _node->bb == nullptr) { fprintf (stderr, "\n%s: %d: error in %s: it is not a block node\n", "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 1190, __FUNCTION__); (fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 1190, __FUNCTION__)); } _node; }))->parent != IRA_BB_NODE (bb)__extension__ (({ ira_loop_tree_node_t _node = (&ira_bb_nodes [(bb)->index]); if (_node->children != nullptr || _node ->loop != nullptr || _node->bb == nullptr) { fprintf (stderr , "\n%s: %d: error in %s: it is not a block node\n", "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 1190, __FUNCTION__); (fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 1190, __FUNCTION__)); } _node; }))->parent) |
1191 | eh_p = true; |
1192 | |
1193 | if (! eh_p) |
1194 | return; |
1195 | |
1196 | EXECUTE_IF_SET_IN_BITMAP (df_get_live_out (bb), FIRST_PSEUDO_REGISTER, i, bi)for (bmp_iter_set_init (&(bi), (df_get_live_out (bb)), (76 ), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next (&(bi), &(i))) |
1197 | { |
1198 | ira_allocno_t a = ira_curr_regno_allocno_map[i]; |
1199 | for (int n = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects) - 1; n >= 0; n--) |
1200 | { |
1201 | ira_object_t obj = ALLOCNO_OBJECT (a, n)((a)->objects[n]); |
1202 | for (int k = 0; ; k++) |
1203 | { |
1204 | unsigned int regno = EH_RETURN_DATA_REGNO (k)((k) <= 1 ? (k) : (~(unsigned int) 0)); |
1205 | if (regno == INVALID_REGNUM(~(unsigned int) 0)) |
1206 | break; |
1207 | SET_HARD_REG_BIT (OBJECT_CONFLICT_HARD_REGS (obj)((obj)->conflict_hard_regs), regno); |
1208 | SET_HARD_REG_BIT (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)((obj)->total_conflict_hard_regs), regno); |
1209 | } |
1210 | } |
1211 | } |
1212 | } |
1213 | |
1214 | #endif |
1215 | |
1216 | /* Process insns of the basic block given by its LOOP_TREE_NODE to |
1217 | update allocno live ranges, allocno hard register conflicts, |
1218 | intersected calls, and register pressure info for allocnos for the |
1219 | basic block for and regions containing the basic block. */ |
1220 | static void |
1221 | process_bb_node_lives (ira_loop_tree_node_t loop_tree_node) |
1222 | { |
1223 | int i, freq; |
1224 | unsigned int j; |
1225 | basic_block bb; |
1226 | rtx_insn *insn; |
1227 | bitmap_iterator bi; |
1228 | bitmap reg_live_out; |
1229 | unsigned int px; |
1230 | bool set_p; |
1231 | |
1232 | bb = loop_tree_node->bb; |
1233 | if (bb != NULLnullptr) |
1234 | { |
1235 | for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++) |
1236 | { |
1237 | curr_reg_pressure[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]] = 0; |
1238 | high_pressure_start_point[ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]] = -1; |
1239 | } |
1240 | curr_bb_node = loop_tree_node; |
1241 | reg_live_out = df_get_live_out (bb); |
1242 | sparseset_clear (objects_live); |
1243 | REG_SET_TO_HARD_REG_SET (hard_regs_live, reg_live_out)do { CLEAR_HARD_REG_SET (hard_regs_live); reg_set_to_hard_reg_set (&hard_regs_live, reg_live_out); } while (0); |
1244 | hard_regs_live &= ~(eliminable_regset | ira_no_alloc_regs(this_target_ira->x_ira_no_alloc_regs)); |
1245 | for (i = 0; i < FIRST_PSEUDO_REGISTER76; i++) |
1246 | if (TEST_HARD_REG_BIT (hard_regs_live, i)) |
1247 | { |
1248 | enum reg_class aclass, pclass, cl; |
1249 | |
1250 | aclass = ira_allocno_class_translate(this_target_ira->x_ira_allocno_class_translate)[REGNO_REG_CLASS (i)(regclass_map[(i)])]; |
1251 | pclass = ira_pressure_class_translate(this_target_ira->x_ira_pressure_class_translate)[aclass]; |
1252 | for (j = 0; |
1253 | (cl = ira_reg_class_super_classes(this_target_ira_int->x_ira_reg_class_super_classes)[pclass][j]) |
1254 | != LIM_REG_CLASSES; |
1255 | j++) |
1256 | { |
1257 | if (! ira_reg_pressure_class_p(this_target_ira_int->x_ira_reg_pressure_class_p)[cl]) |
1258 | continue; |
1259 | curr_reg_pressure[cl]++; |
1260 | if (curr_bb_node->reg_pressure[cl] < curr_reg_pressure[cl]) |
1261 | curr_bb_node->reg_pressure[cl] = curr_reg_pressure[cl]; |
1262 | ira_assert (curr_reg_pressure[cl]((void)(!(curr_reg_pressure[cl] <= (this_target_ira->x_ira_class_hard_regs_num )[cl]) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 1263, __FUNCTION__), 0 : 0)) |
1263 | <= ira_class_hard_regs_num[cl])((void)(!(curr_reg_pressure[cl] <= (this_target_ira->x_ira_class_hard_regs_num )[cl]) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 1263, __FUNCTION__), 0 : 0)); |
1264 | } |
1265 | } |
1266 | EXECUTE_IF_SET_IN_BITMAP (reg_live_out, FIRST_PSEUDO_REGISTER, j, bi)for (bmp_iter_set_init (&(bi), (reg_live_out), (76), & (j)); bmp_iter_set (&(bi), &(j)); bmp_iter_next (& (bi), &(j))) |
1267 | mark_pseudo_regno_live (j); |
1268 | |
1269 | #ifdef EH_RETURN_DATA_REGNO |
1270 | process_out_of_region_eh_regs (bb); |
1271 | #endif |
1272 | |
1273 | freq = REG_FREQ_FROM_BB (bb)((optimize_function_for_size_p ((cfun + 0)) || !(cfun + 0)-> cfg->count_max.initialized_p ()) ? 1000 : ((bb)->count. to_frequency ((cfun + 0)) * 1000 / 10000) ? ((bb)->count.to_frequency ((cfun + 0)) * 1000 / 10000) : 1); |
1274 | if (freq == 0) |
1275 | freq = 1; |
1276 | |
1277 | /* Invalidate all allocno_saved_at_call entries. */ |
1278 | last_call_num++; |
1279 | |
1280 | /* Scan the code of this basic block, noting which allocnos and |
1281 | hard regs are born or die. |
1282 | |
1283 | Note that this loop treats uninitialized values as live until |
1284 | the beginning of the block. For example, if an instruction |
1285 | uses (reg:DI foo), and only (subreg:SI (reg:DI foo) 0) is ever |
1286 | set, FOO will remain live until the beginning of the block. |
1287 | Likewise if FOO is not set at all. This is unnecessarily |
1288 | pessimistic, but it probably doesn't matter much in practice. */ |
1289 | FOR_BB_INSNS_REVERSE (bb, insn)for ((insn) = (bb)->il.x.rtl->end_; (insn) && ( insn) != PREV_INSN ((bb)->il.x.head_); (insn) = PREV_INSN ( insn)) |
1290 | { |
1291 | ira_allocno_t a; |
1292 | df_ref def, use; |
1293 | bool call_p; |
1294 | |
1295 | if (!NONDEBUG_INSN_P (insn)((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN))) |
1296 | continue; |
1297 | |
1298 | if (internal_flag_ira_verbose > 2 && ira_dump_file != NULLnullptr) |
1299 | fprintf (ira_dump_file, " Insn %u(l%d): point = %d\n", |
1300 | INSN_UID (insn), loop_tree_node->parent->loop_num, |
1301 | curr_point); |
1302 | |
1303 | call_p = CALL_P (insn)(((enum rtx_code) (insn)->code) == CALL_INSN); |
1304 | ignore_reg_for_conflicts = non_conflicting_reg_copy_p (insn); |
1305 | |
1306 | /* Mark each defined value as live. We need to do this for |
1307 | unused values because they still conflict with quantities |
1308 | that are live at the time of the definition. |
1309 | |
1310 | Ignore DF_REF_MAY_CLOBBERs on a call instruction. Such |
1311 | references represent the effect of the called function |
1312 | on a call-clobbered register. Marking the register as |
1313 | live would stop us from allocating it to a call-crossing |
1314 | allocno. */ |
1315 | FOR_EACH_INSN_DEF (def, insn)for (def = (((df->insns[(INSN_UID (insn))]))->defs); def ; def = ((def)->base.next_loc)) |
1316 | if (!call_p || !DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER)((((def)->base.flags) & (DF_REF_MAY_CLOBBER)) != 0)) |
1317 | mark_ref_live (def); |
1318 | |
1319 | /* If INSN has multiple outputs, then any value used in one |
1320 | of the outputs conflicts with the other outputs. Model this |
1321 | by making the used value live during the output phase. |
1322 | |
1323 | It is unsafe to use !single_set here since it will ignore |
1324 | an unused output. Just because an output is unused does |
1325 | not mean the compiler can assume the side effect will not |
1326 | occur. Consider if ALLOCNO appears in the address of an |
1327 | output and we reload the output. If we allocate ALLOCNO |
1328 | to the same hard register as an unused output we could |
1329 | set the hard register before the output reload insn. */ |
1330 | if (GET_CODE (PATTERN (insn))((enum rtx_code) (PATTERN (insn))->code) == PARALLEL && multiple_sets (insn)) |
1331 | FOR_EACH_INSN_USE (use, insn)for (use = (((df->insns[(INSN_UID (insn))]))->uses); use ; use = ((use)->base.next_loc)) |
1332 | { |
1333 | int i; |
1334 | rtx reg; |
1335 | |
1336 | reg = DF_REF_REG (use)((use)->base.reg); |
1337 | for (i = XVECLEN (PATTERN (insn), 0)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->num_elem) - 1; i >= 0; i--) |
1338 | { |
1339 | rtx set; |
1340 | |
1341 | set = XVECEXP (PATTERN (insn), 0, i)(((((PATTERN (insn))->u.fld[0]).rt_rtvec))->elem[i]); |
1342 | if (GET_CODE (set)((enum rtx_code) (set)->code) == SET |
1343 | && reg_overlap_mentioned_p (reg, SET_DEST (set)(((set)->u.fld[0]).rt_rtx))) |
1344 | { |
1345 | /* After the previous loop, this is a no-op if |
1346 | REG is contained within SET_DEST (SET). */ |
1347 | mark_ref_live (use); |
1348 | break; |
1349 | } |
1350 | } |
1351 | } |
1352 | |
1353 | preferred_alternatives = ira_setup_alts (insn); |
1354 | process_single_reg_class_operands (false, freq); |
1355 | |
1356 | if (call_p) |
1357 | { |
1358 | /* Try to find a SET in the CALL_INSN_FUNCTION_USAGE, and from |
1359 | there, try to find a pseudo that is live across the call but |
1360 | can be cheaply reconstructed from the return value. */ |
1361 | rtx cheap_reg = find_call_crossed_cheap_reg (insn); |
1362 | if (cheap_reg != NULL_RTX(rtx) 0) |
1363 | add_reg_note (insn, REG_RETURNED, cheap_reg); |
1364 | |
1365 | last_call_num++; |
1366 | sparseset_clear (allocnos_processed); |
1367 | /* The current set of live allocnos are live across the call. */ |
1368 | EXECUTE_IF_SET_IN_SPARSESET (objects_live, i)for (sparseset_iter_init (objects_live); sparseset_iter_p (objects_live ) && (((i) = sparseset_iter_elm (objects_live)) || 1) ; sparseset_iter_next (objects_live)) |
1369 | { |
1370 | ira_object_t obj = ira_object_id_map[i]; |
1371 | a = OBJECT_ALLOCNO (obj)((obj)->allocno); |
1372 | int num = ALLOCNO_NUM (a)((a)->num); |
1373 | function_abi callee_abi = insn_callee_abi (insn); |
1374 | |
1375 | /* Don't allocate allocnos that cross setjmps or any |
1376 | call, if this function receives a nonlocal |
1377 | goto. */ |
1378 | if (cfun(cfun + 0)->has_nonlocal_label |
1379 | || (!targetm.setjmp_preserves_nonvolatile_regs_p () |
1380 | && (find_reg_note (insn, REG_SETJMP, NULL_RTX(rtx) 0) |
1381 | != NULL_RTX(rtx) 0))) |
1382 | { |
1383 | SET_HARD_REG_SET (OBJECT_CONFLICT_HARD_REGS (obj)((obj)->conflict_hard_regs)); |
1384 | SET_HARD_REG_SET (OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)((obj)->total_conflict_hard_regs)); |
1385 | } |
1386 | if (can_throw_internal (insn)) |
1387 | { |
1388 | OBJECT_CONFLICT_HARD_REGS (obj)((obj)->conflict_hard_regs) |
1389 | |= callee_abi.mode_clobbers (ALLOCNO_MODE (a)((a)->mode)); |
1390 | OBJECT_TOTAL_CONFLICT_HARD_REGS (obj)((obj)->total_conflict_hard_regs) |
1391 | |= callee_abi.mode_clobbers (ALLOCNO_MODE (a)((a)->mode)); |
1392 | } |
1393 | |
1394 | if (sparseset_bit_p (allocnos_processed, num)) |
1395 | continue; |
1396 | sparseset_set_bit (allocnos_processed, num); |
1397 | |
1398 | if (allocno_saved_at_call[num] != last_call_num) |
1399 | /* Here we are mimicking caller-save.cc behavior |
1400 | which does not save hard register at a call if |
1401 | it was saved on previous call in the same basic |
1402 | block and the hard register was not mentioned |
1403 | between the two calls. */ |
1404 | ALLOCNO_CALL_FREQ (a)((a)->call_freq) += freq; |
1405 | /* Mark it as saved at the next call. */ |
1406 | allocno_saved_at_call[num] = last_call_num + 1; |
1407 | ALLOCNO_CALLS_CROSSED_NUM (a)((a)->calls_crossed_num)++; |
1408 | ALLOCNO_CROSSED_CALLS_ABIS (a)((a)->crossed_calls_abis) |= 1 << callee_abi.id (); |
1409 | ALLOCNO_CROSSED_CALLS_CLOBBERED_REGS (a)((a)->crossed_calls_clobbered_regs) |
1410 | |= callee_abi.full_and_partial_reg_clobbers (); |
1411 | if (cheap_reg != NULL_RTX(rtx) 0 |
1412 | && ALLOCNO_REGNO (a)((a)->regno) == (int) REGNO (cheap_reg)(rhs_regno(cheap_reg))) |
1413 | ALLOCNO_CHEAP_CALLS_CROSSED_NUM (a)((a)->cheap_calls_crossed_num)++; |
1414 | } |
1415 | } |
1416 | |
1417 | /* See which defined values die here. Note that we include |
1418 | the call insn in the lifetimes of these values, so we don't |
1419 | mistakenly consider, for e.g. an addressing mode with a |
1420 | side-effect like a post-increment fetching the address, |
1421 | that the use happens before the call, and the def to happen |
1422 | after the call: we believe both to happen before the actual |
1423 | call. (We don't handle return-values here.) */ |
1424 | FOR_EACH_INSN_DEF (def, insn)for (def = (((df->insns[(INSN_UID (insn))]))->defs); def ; def = ((def)->base.next_loc)) |
1425 | if (!call_p || !DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER)((((def)->base.flags) & (DF_REF_MAY_CLOBBER)) != 0)) |
1426 | mark_ref_dead (def); |
1427 | |
1428 | make_early_clobber_and_input_conflicts (); |
1429 | |
1430 | curr_point++; |
1431 | |
1432 | /* Mark each used value as live. */ |
1433 | FOR_EACH_INSN_USE (use, insn)for (use = (((df->insns[(INSN_UID (insn))]))->uses); use ; use = ((use)->base.next_loc)) |
1434 | mark_ref_live (use); |
1435 | |
1436 | process_single_reg_class_operands (true, freq); |
1437 | |
1438 | set_p = mark_hard_reg_early_clobbers (insn, true); |
1439 | |
1440 | if (set_p) |
1441 | { |
1442 | mark_hard_reg_early_clobbers (insn, false); |
1443 | |
1444 | /* Mark each hard reg as live again. For example, a |
1445 | hard register can be in clobber and in an insn |
1446 | input. */ |
1447 | FOR_EACH_INSN_USE (use, insn)for (use = (((df->insns[(INSN_UID (insn))]))->uses); use ; use = ((use)->base.next_loc)) |
1448 | { |
1449 | rtx ureg = DF_REF_REG (use)((use)->base.reg); |
1450 | |
1451 | if (GET_CODE (ureg)((enum rtx_code) (ureg)->code) == SUBREG) |
1452 | ureg = SUBREG_REG (ureg)(((ureg)->u.fld[0]).rt_rtx); |
1453 | if (! REG_P (ureg)(((enum rtx_code) (ureg)->code) == REG) || REGNO (ureg)(rhs_regno(ureg)) >= FIRST_PSEUDO_REGISTER76) |
1454 | continue; |
1455 | |
1456 | mark_ref_live (use); |
1457 | } |
1458 | } |
1459 | |
1460 | curr_point++; |
1461 | } |
1462 | ignore_reg_for_conflicts = NULL_RTX(rtx) 0; |
1463 | |
1464 | if (bb_has_eh_pred (bb)) |
1465 | for (j = 0; ; ++j) |
1466 | { |
1467 | unsigned int regno = EH_RETURN_DATA_REGNO (j)((j) <= 1 ? (j) : (~(unsigned int) 0)); |
1468 | if (regno == INVALID_REGNUM(~(unsigned int) 0)) |
1469 | break; |
1470 | make_hard_regno_live (regno); |
1471 | } |
1472 | |
1473 | /* Allocnos can't go in stack regs at the start of a basic block |
1474 | that is reached by an abnormal edge. Likewise for registers |
1475 | that are at least partly call clobbered, because caller-save, |
1476 | fixup_abnormal_edges and possibly the table driven EH machinery |
1477 | are not quite ready to handle such allocnos live across such |
1478 | edges. */ |
1479 | if (bb_has_abnormal_pred (bb)) |
1480 | { |
1481 | #ifdef STACK_REGS |
1482 | EXECUTE_IF_SET_IN_SPARSESET (objects_live, px)for (sparseset_iter_init (objects_live); sparseset_iter_p (objects_live ) && (((px) = sparseset_iter_elm (objects_live)) || 1 ); sparseset_iter_next (objects_live)) |
1483 | { |
1484 | ira_allocno_t a = OBJECT_ALLOCNO (ira_object_id_map[px])((ira_object_id_map[px])->allocno); |
1485 | |
1486 | ALLOCNO_NO_STACK_REG_P (a)((a)->no_stack_reg_p) = true; |
1487 | ALLOCNO_TOTAL_NO_STACK_REG_P (a)((a)->total_no_stack_reg_p) = true; |
1488 | } |
1489 | for (px = FIRST_STACK_REG8; px <= LAST_STACK_REG15; px++) |
1490 | make_hard_regno_live (px); |
1491 | #endif |
1492 | /* No need to record conflicts for call clobbered regs if we |
1493 | have nonlocal labels around, as we don't ever try to |
1494 | allocate such regs in this case. */ |
1495 | if (!cfun(cfun + 0)->has_nonlocal_label |
1496 | && has_abnormal_call_or_eh_pred_edge_p (bb)) |
1497 | for (px = 0; px < FIRST_PSEUDO_REGISTER76; px++) |
1498 | if (eh_edge_abi(this_target_function_abi_info->x_function_abis[0]).clobbers_at_least_part_of_reg_p (px) |
1499 | #ifdef REAL_PIC_OFFSET_TABLE_REGNUM(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 43 : 3) |
1500 | /* We should create a conflict of PIC pseudo with |
1501 | PIC hard reg as PIC hard reg can have a wrong |
1502 | value after jump described by the abnormal edge. |
1503 | In this case we cannot allocate PIC hard reg to |
1504 | PIC pseudo as PIC pseudo will also have a wrong |
1505 | value. This code is not critical as LRA can fix |
1506 | it but it is better to have the right allocation |
1507 | earlier. */ |
1508 | || (px == REAL_PIC_OFFSET_TABLE_REGNUM(((global_options.x_ix86_isa_flags & (1UL << 1)) != 0) ? 43 : 3) |
1509 | && pic_offset_table_rtx(this_target_rtl->x_pic_offset_table_rtx) != NULL_RTX(rtx) 0 |
1510 | && REGNO (pic_offset_table_rtx)(rhs_regno((this_target_rtl->x_pic_offset_table_rtx))) >= FIRST_PSEUDO_REGISTER76) |
1511 | #endif |
1512 | ) |
1513 | make_hard_regno_live (px); |
1514 | } |
1515 | |
1516 | EXECUTE_IF_SET_IN_SPARSESET (objects_live, i)for (sparseset_iter_init (objects_live); sparseset_iter_p (objects_live ) && (((i) = sparseset_iter_elm (objects_live)) || 1) ; sparseset_iter_next (objects_live)) |
1517 | make_object_dead (ira_object_id_map[i]); |
1518 | |
1519 | curr_point++; |
1520 | |
1521 | } |
1522 | /* Propagate register pressure to upper loop tree nodes. */ |
1523 | if (loop_tree_node != ira_loop_tree_root) |
1524 | for (i = 0; i < ira_pressure_classes_num(this_target_ira->x_ira_pressure_classes_num); i++) |
1525 | { |
1526 | enum reg_class pclass; |
1527 | |
1528 | pclass = ira_pressure_classes(this_target_ira->x_ira_pressure_classes)[i]; |
1529 | if (loop_tree_node->reg_pressure[pclass] |
1530 | > loop_tree_node->parent->reg_pressure[pclass]) |
1531 | loop_tree_node->parent->reg_pressure[pclass] |
1532 | = loop_tree_node->reg_pressure[pclass]; |
1533 | } |
1534 | } |
1535 | |
1536 | /* Create and set up IRA_START_POINT_RANGES and |
1537 | IRA_FINISH_POINT_RANGES. */ |
1538 | static void |
1539 | create_start_finish_chains (void) |
1540 | { |
1541 | ira_object_t obj; |
1542 | ira_object_iterator oi; |
1543 | live_range_t r; |
1544 | |
1545 | ira_start_point_ranges |
1546 | = (live_range_t *) ira_allocate (ira_max_point * sizeof (live_range_t)); |
1547 | memset (ira_start_point_ranges, 0, ira_max_point * sizeof (live_range_t)); |
1548 | ira_finish_point_ranges |
1549 | = (live_range_t *) ira_allocate (ira_max_point * sizeof (live_range_t)); |
1550 | memset (ira_finish_point_ranges, 0, ira_max_point * sizeof (live_range_t)); |
1551 | FOR_EACH_OBJECT (obj, oi)for (ira_object_iter_init (&(oi)); ira_object_iter_cond ( &(oi), &(obj));) |
1552 | for (r = OBJECT_LIVE_RANGES (obj)((obj)->live_ranges); r != NULLnullptr; r = r->next) |
1553 | { |
1554 | r->start_next = ira_start_point_ranges[r->start]; |
1555 | ira_start_point_ranges[r->start] = r; |
1556 | r->finish_next = ira_finish_point_ranges[r->finish]; |
1557 | ira_finish_point_ranges[r->finish] = r; |
1558 | } |
1559 | } |
1560 | |
1561 | /* Rebuild IRA_START_POINT_RANGES and IRA_FINISH_POINT_RANGES after |
1562 | new live ranges and program points were added as a result if new |
1563 | insn generation. */ |
1564 | void |
1565 | ira_rebuild_start_finish_chains (void) |
1566 | { |
1567 | ira_free (ira_finish_point_ranges); |
1568 | ira_free (ira_start_point_ranges); |
1569 | create_start_finish_chains (); |
1570 | } |
1571 | |
1572 | /* Compress allocno live ranges by removing program points where |
1573 | nothing happens. */ |
1574 | static void |
1575 | remove_some_program_points_and_update_live_ranges (void) |
1576 | { |
1577 | unsigned i; |
1578 | int n; |
1579 | int *map; |
1580 | ira_object_t obj; |
1581 | ira_object_iterator oi; |
1582 | live_range_t r, prev_r, next_r; |
1583 | sbitmap_iterator sbi; |
1584 | bool born_p, dead_p, prev_born_p, prev_dead_p; |
1585 | |
1586 | auto_sbitmap born (ira_max_point); |
1587 | auto_sbitmap dead (ira_max_point); |
1588 | bitmap_clear (born); |
1589 | bitmap_clear (dead); |
1590 | FOR_EACH_OBJECT (obj, oi)for (ira_object_iter_init (&(oi)); ira_object_iter_cond ( &(oi), &(obj));) |
1591 | for (r = OBJECT_LIVE_RANGES (obj)((obj)->live_ranges); r != NULLnullptr; r = r->next) |
1592 | { |
1593 | ira_assert (r->start <= r->finish)((void)(!(r->start <= r->finish) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-lives.cc" , 1593, __FUNCTION__), 0 : 0)); |
1594 | bitmap_set_bit (born, r->start); |
1595 | bitmap_set_bit (dead, r->finish); |
1596 | } |
1597 | |
1598 | auto_sbitmap born_or_dead (ira_max_point); |
1599 | bitmap_ior (born_or_dead, born, dead); |
1600 | map = (int *) ira_allocate (sizeof (int) * ira_max_point); |
1601 | n = -1; |
1602 | prev_born_p = prev_dead_p = false; |
1603 | EXECUTE_IF_SET_IN_BITMAP (born_or_dead, 0, i, sbi)for (bmp_iter_set_init (&(sbi), (born_or_dead), (0), & (i)); bmp_iter_set (&(sbi), &(i)); bmp_iter_next (& (sbi), &(i))) |
1604 | { |
1605 | born_p = bitmap_bit_p (born, i); |
1606 | dead_p = bitmap_bit_p (dead, i); |
1607 | if ((prev_born_p && ! prev_dead_p && born_p && ! dead_p) |
1608 | || (prev_dead_p && ! prev_born_p && dead_p && ! born_p)) |
1609 | map[i] = n; |
1610 | else |
1611 | map[i] = ++n; |
1612 | prev_born_p = born_p; |
1613 | prev_dead_p = dead_p; |
1614 | } |
1615 | |
1616 | n++; |
1617 | if (internal_flag_ira_verbose > 1 && ira_dump_file != NULLnullptr) |
1618 | fprintf (ira_dump_file, "Compressing live ranges: from %d to %d - %d%%\n", |
1619 | ira_max_point, n, 100 * n / ira_max_point); |
1620 | ira_max_point = n; |
1621 | |
1622 | FOR_EACH_OBJECT (obj, oi)for (ira_object_iter_init (&(oi)); ira_object_iter_cond ( &(oi), &(obj));) |
1623 | for (r = OBJECT_LIVE_RANGES (obj)((obj)->live_ranges), prev_r = NULLnullptr; r != NULLnullptr; r = next_r) |
1624 | { |
1625 | next_r = r->next; |
1626 | r->start = map[r->start]; |
1627 | r->finish = map[r->finish]; |
1628 | if (prev_r == NULLnullptr || prev_r->start > r->finish + 1) |
1629 | { |
1630 | prev_r = r; |
1631 | continue; |
1632 | } |
1633 | prev_r->start = r->start; |
1634 | prev_r->next = next_r; |
1635 | ira_finish_live_range (r); |
1636 | } |
1637 | |
1638 | ira_free (map); |
1639 | } |
1640 | |
1641 | /* Print live ranges R to file F. */ |
1642 | void |
1643 | ira_print_live_range_list (FILE *f, live_range_t r) |
1644 | { |
1645 | for (; r != NULLnullptr; r = r->next) |
1646 | fprintf (f, " [%d..%d]", r->start, r->finish); |
1647 | fprintf (f, "\n"); |
1648 | } |
1649 | |
1650 | DEBUG_FUNCTION__attribute__ ((__used__)) void |
1651 | debug (live_range &ref) |
1652 | { |
1653 | ira_print_live_range_list (stderrstderr, &ref); |
1654 | } |
1655 | |
1656 | DEBUG_FUNCTION__attribute__ ((__used__)) void |
1657 | debug (live_range *ptr) |
1658 | { |
1659 | if (ptr) |
1660 | debug (*ptr); |
1661 | else |
1662 | fprintf (stderrstderr, "<nil>\n"); |
1663 | } |
1664 | |
1665 | /* Print live ranges R to stderr. */ |
1666 | void |
1667 | ira_debug_live_range_list (live_range_t r) |
1668 | { |
1669 | ira_print_live_range_list (stderrstderr, r); |
1670 | } |
1671 | |
1672 | /* Print live ranges of object OBJ to file F. */ |
1673 | static void |
1674 | print_object_live_ranges (FILE *f, ira_object_t obj) |
1675 | { |
1676 | ira_print_live_range_list (f, OBJECT_LIVE_RANGES (obj)((obj)->live_ranges)); |
1677 | } |
1678 | |
1679 | /* Print live ranges of allocno A to file F. */ |
1680 | static void |
1681 | print_allocno_live_ranges (FILE *f, ira_allocno_t a) |
1682 | { |
1683 | int n = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects); |
1684 | int i; |
1685 | |
1686 | for (i = 0; i < n; i++) |
1687 | { |
1688 | fprintf (f, " a%d(r%d", ALLOCNO_NUM (a)((a)->num), ALLOCNO_REGNO (a)((a)->regno)); |
1689 | if (n > 1) |
1690 | fprintf (f, " [%d]", i); |
1691 | fprintf (f, "):"); |
1692 | print_object_live_ranges (f, ALLOCNO_OBJECT (a, i)((a)->objects[i])); |
1693 | } |
1694 | } |
1695 | |
1696 | /* Print live ranges of allocno A to stderr. */ |
1697 | void |
1698 | ira_debug_allocno_live_ranges (ira_allocno_t a) |
1699 | { |
1700 | print_allocno_live_ranges (stderrstderr, a); |
1701 | } |
1702 | |
1703 | /* Print live ranges of all allocnos to file F. */ |
1704 | static void |
1705 | print_live_ranges (FILE *f) |
1706 | { |
1707 | ira_allocno_t a; |
1708 | ira_allocno_iterator ai; |
1709 | |
1710 | FOR_EACH_ALLOCNO (a, ai)for (ira_allocno_iter_init (&(ai)); ira_allocno_iter_cond (&(ai), &(a));) |
1711 | print_allocno_live_ranges (f, a); |
1712 | } |
1713 | |
1714 | /* Print live ranges of all allocnos to stderr. */ |
1715 | void |
1716 | ira_debug_live_ranges (void) |
1717 | { |
1718 | print_live_ranges (stderrstderr); |
1719 | } |
1720 | |
1721 | /* The main entry function creates live ranges, set up |
1722 | CONFLICT_HARD_REGS and TOTAL_CONFLICT_HARD_REGS for objects, and |
1723 | calculate register pressure info. */ |
1724 | void |
1725 | ira_create_allocno_live_ranges (void) |
1726 | { |
1727 | objects_live = sparseset_alloc (ira_objects_num); |
1728 | allocnos_processed = sparseset_alloc (ira_allocnos_num); |
1729 | curr_point = 0; |
1730 | last_call_num = 0; |
1731 | allocno_saved_at_call |
1732 | = (int *) ira_allocate (ira_allocnos_num * sizeof (int)); |
1733 | memset (allocno_saved_at_call, 0, ira_allocnos_num * sizeof (int)); |
1734 | ira_traverse_loop_tree (true, ira_loop_tree_root, NULLnullptr, |
1735 | process_bb_node_lives); |
1736 | ira_max_point = curr_point; |
1737 | create_start_finish_chains (); |
1738 | if (internal_flag_ira_verbose > 2 && ira_dump_file != NULLnullptr) |
1739 | print_live_ranges (ira_dump_file); |
1740 | /* Clean up. */ |
1741 | ira_free (allocno_saved_at_call); |
1742 | sparseset_free (objects_live)free(objects_live); |
1743 | sparseset_free (allocnos_processed)free(allocnos_processed); |
1744 | } |
1745 | |
1746 | /* Compress allocno live ranges. */ |
1747 | void |
1748 | ira_compress_allocno_live_ranges (void) |
1749 | { |
1750 | remove_some_program_points_and_update_live_ranges (); |
1751 | ira_rebuild_start_finish_chains (); |
1752 | if (internal_flag_ira_verbose > 2 && ira_dump_file != NULLnullptr) |
1753 | { |
1754 | fprintf (ira_dump_file, "Ranges after the compression:\n"); |
1755 | print_live_ranges (ira_dump_file); |
1756 | } |
1757 | } |
1758 | |
1759 | /* Free arrays IRA_START_POINT_RANGES and IRA_FINISH_POINT_RANGES. */ |
1760 | void |
1761 | ira_finish_allocno_live_ranges (void) |
1762 | { |
1763 | ira_free (ira_finish_point_ranges); |
1764 | ira_free (ira_start_point_ranges); |
1765 | } |