File: | build/gcc/lra-assigns.cc |
Warning: | line 1233, column 6 Value stored to 'nregs_diff' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* Assign reload pseudos. |
2 | Copyright (C) 2010-2023 Free Software Foundation, Inc. |
3 | Contributed by Vladimir Makarov <vmakarov@redhat.com>. |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free |
9 | Software Foundation; either version 3, or (at your option) any later |
10 | version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
15 | for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | |
22 | /* This file's main objective is to assign hard registers to reload |
23 | pseudos. It also tries to allocate hard registers to other |
24 | pseudos, but at a lower priority than the reload pseudos. The pass |
25 | does not transform the RTL. |
26 | |
27 | We must allocate a hard register to every reload pseudo. We try to |
28 | increase the chances of finding a viable allocation by assigning |
29 | the pseudos in order of fewest available hard registers first. If |
30 | we still fail to find a hard register, we spill other (non-reload) |
31 | pseudos in order to make room. |
32 | |
33 | find_hard_regno_for finds hard registers for allocation without |
34 | spilling. spill_for does the same with spilling. Both functions |
35 | use a cost model to determine the most profitable choice of hard |
36 | and spill registers. |
37 | |
38 | Once we have finished allocating reload pseudos, we also try to |
39 | assign registers to other (non-reload) pseudos. This is useful if |
40 | hard registers were freed up by the spilling just described. |
41 | |
42 | We try to assign hard registers by collecting pseudos into threads. |
43 | These threads contain reload and inheritance pseudos that are |
44 | connected by copies (move insns). Doing this improves the chances |
45 | of pseudos in the thread getting the same hard register and, as a |
46 | result, of allowing some move insns to be deleted. |
47 | |
48 | When we assign a hard register to a pseudo, we decrease the cost of |
49 | using the same hard register for pseudos that are connected by |
50 | copies. |
51 | |
52 | If two hard registers have the same frequency-derived cost, we |
53 | prefer hard registers with higher priorities. The mapping of |
54 | registers to priorities is controlled by the register_priority |
55 | target hook. For example, x86-64 has a few register priorities: |
56 | hard registers with and without REX prefixes have different |
57 | priorities. This permits us to generate smaller code as insns |
58 | without REX prefixes are shorter. |
59 | |
60 | If a few hard registers are still equally good for the assignment, |
61 | we choose the least used hard register. It is called leveling and |
62 | may be profitable for some targets. |
63 | |
64 | Only insns with changed allocation pseudos are processed on the |
65 | next constraint pass. |
66 | |
67 | The pseudo live-ranges are used to find conflicting pseudos. |
68 | |
69 | For understanding the code, it is important to keep in mind that |
70 | inheritance, split, and reload pseudos created since last |
71 | constraint pass have regno >= lra_constraint_new_regno_start. |
72 | Inheritance and split pseudos created on any pass are in the |
73 | corresponding bitmaps. Inheritance and split pseudos since the |
74 | last constraint pass have also the corresponding non-negative |
75 | restore_regno. */ |
76 | |
77 | #include "config.h" |
78 | #include "system.h" |
79 | #include "coretypes.h" |
80 | #include "backend.h" |
81 | #include "target.h" |
82 | #include "rtl.h" |
83 | #include "tree.h" |
84 | #include "predict.h" |
85 | #include "df.h" |
86 | #include "memmodel.h" |
87 | #include "tm_p.h" |
88 | #include "insn-config.h" |
89 | #include "regs.h" |
90 | #include "ira.h" |
91 | #include "recog.h" |
92 | #include "rtl-error.h" |
93 | #include "sparseset.h" |
94 | #include "lra.h" |
95 | #include "lra-int.h" |
96 | #include "function-abi.h" |
97 | |
98 | /* Current iteration number of the pass and current iteration number |
99 | of the pass after the latest spill pass when any former reload |
100 | pseudo was spilled. */ |
101 | int lra_assignment_iter; |
102 | int lra_assignment_iter_after_spill; |
103 | |
104 | /* Flag of spilling former reload pseudos on this pass. */ |
105 | static bool former_reload_pseudo_spill_p; |
106 | |
107 | /* Array containing corresponding values of function |
108 | lra_get_allocno_class. It is used to speed up the code. */ |
109 | static enum reg_class *regno_allocno_class_array; |
110 | |
111 | /* Array containing lengths of pseudo live ranges. It is used to |
112 | speed up the code. */ |
113 | static int *regno_live_length; |
114 | |
115 | /* Information about the thread to which a pseudo belongs. Threads are |
116 | a set of connected reload and inheritance pseudos with the same set of |
117 | available hard registers. Lone registers belong to their own threads. */ |
118 | struct regno_assign_info |
119 | { |
120 | /* First/next pseudo of the same thread. */ |
121 | int first, next; |
122 | /* Frequency of the thread (execution frequency of only reload |
123 | pseudos in the thread when the thread contains a reload pseudo). |
124 | Defined only for the first thread pseudo. */ |
125 | int freq; |
126 | }; |
127 | |
128 | /* Map regno to the corresponding regno assignment info. */ |
129 | static struct regno_assign_info *regno_assign_info; |
130 | |
131 | /* All inherited, subreg or optional pseudos created before last spill |
132 | sub-pass. Such pseudos are permitted to get memory instead of hard |
133 | regs. */ |
134 | static bitmap_head non_reload_pseudos; |
135 | |
136 | /* Process a pseudo copy with execution frequency COPY_FREQ connecting |
137 | REGNO1 and REGNO2 to form threads. */ |
138 | static void |
139 | process_copy_to_form_thread (int regno1, int regno2, int copy_freq) |
140 | { |
141 | int last, regno1_first, regno2_first; |
142 | |
143 | lra_assert (regno1 >= lra_constraint_new_regno_start((void)(!(regno1 >= lra_constraint_new_regno_start && regno2 >= lra_constraint_new_regno_start) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 144, __FUNCTION__), 0 : 0)) |
144 | && regno2 >= lra_constraint_new_regno_start)((void)(!(regno1 >= lra_constraint_new_regno_start && regno2 >= lra_constraint_new_regno_start) ? fancy_abort ( "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 144, __FUNCTION__), 0 : 0)); |
145 | regno1_first = regno_assign_info[regno1].first; |
146 | regno2_first = regno_assign_info[regno2].first; |
147 | if (regno1_first != regno2_first) |
148 | { |
149 | for (last = regno2_first; |
150 | regno_assign_info[last].next >= 0; |
151 | last = regno_assign_info[last].next) |
152 | regno_assign_info[last].first = regno1_first; |
153 | regno_assign_info[last].first = regno1_first; |
154 | regno_assign_info[last].next = regno_assign_info[regno1_first].next; |
155 | regno_assign_info[regno1_first].next = regno2_first; |
156 | regno_assign_info[regno1_first].freq |
157 | += regno_assign_info[regno2_first].freq; |
158 | } |
159 | regno_assign_info[regno1_first].freq -= 2 * copy_freq; |
160 | lra_assert (regno_assign_info[regno1_first].freq >= 0)((void)(!(regno_assign_info[regno1_first].freq >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 160, __FUNCTION__), 0 : 0)); |
161 | } |
162 | |
163 | /* Initialize REGNO_ASSIGN_INFO and form threads. */ |
164 | static void |
165 | init_regno_assign_info (void) |
166 | { |
167 | int i, regno1, regno2, max_regno = max_reg_num (); |
168 | lra_copy_t cp; |
169 | |
170 | regno_assign_info = XNEWVEC (struct regno_assign_info, max_regno)((struct regno_assign_info *) xmalloc (sizeof (struct regno_assign_info ) * (max_regno))); |
171 | for (i = FIRST_PSEUDO_REGISTER76; i < max_regno; i++) |
172 | { |
173 | regno_assign_info[i].first = i; |
174 | regno_assign_info[i].next = -1; |
175 | regno_assign_info[i].freq = lra_reg_info[i].freq; |
176 | } |
177 | /* Form the threads. */ |
178 | for (i = 0; (cp = lra_get_copy (i)) != NULLnullptr; i++) |
179 | if ((regno1 = cp->regno1) >= lra_constraint_new_regno_start |
180 | && (regno2 = cp->regno2) >= lra_constraint_new_regno_start |
181 | && reg_renumber[regno1] < 0 && lra_reg_info[regno1].nrefs != 0 |
182 | && reg_renumber[regno2] < 0 && lra_reg_info[regno2].nrefs != 0 |
183 | && (ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[regno_allocno_class_array[regno1]] |
184 | == ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[regno_allocno_class_array[regno2]])) |
185 | process_copy_to_form_thread (regno1, regno2, cp->freq); |
186 | } |
187 | |
188 | /* Free REGNO_ASSIGN_INFO. */ |
189 | static void |
190 | finish_regno_assign_info (void) |
191 | { |
192 | free (regno_assign_info); |
193 | } |
194 | |
195 | /* The function is used to sort *reload* and *inheritance* pseudos to |
196 | try to assign them hard registers. We put pseudos from the same |
197 | thread always nearby. */ |
198 | static int |
199 | reload_pseudo_compare_func (const void *v1p, const void *v2p) |
200 | { |
201 | int r1 = *(const int *) v1p, r2 = *(const int *) v2p; |
202 | enum reg_class cl1 = regno_allocno_class_array[r1]; |
203 | enum reg_class cl2 = regno_allocno_class_array[r2]; |
204 | int diff; |
205 | |
206 | lra_assert (r1 >= lra_constraint_new_regno_start((void)(!(r1 >= lra_constraint_new_regno_start && r2 >= lra_constraint_new_regno_start) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 207, __FUNCTION__), 0 : 0)) |
207 | && r2 >= lra_constraint_new_regno_start)((void)(!(r1 >= lra_constraint_new_regno_start && r2 >= lra_constraint_new_regno_start) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 207, __FUNCTION__), 0 : 0)); |
208 | |
209 | /* Prefer to assign reload registers with smaller classes first to |
210 | guarantee assignment to all reload registers. */ |
211 | if ((diff = (ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[cl1] |
212 | - ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[cl2])) != 0) |
213 | return diff; |
214 | /* Allocate bigger pseudos first to avoid register file |
215 | fragmentation. */ |
216 | if ((diff |
217 | = (ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[cl2][lra_reg_info[r2].biggest_mode] |
218 | - ira_reg_class_max_nregs(this_target_ira->x_ira_reg_class_max_nregs)[cl1][lra_reg_info[r1].biggest_mode])) != 0) |
219 | return diff; |
220 | if ((diff = (regno_assign_info[regno_assign_info[r2].first].freq |
221 | - regno_assign_info[regno_assign_info[r1].first].freq)) != 0) |
222 | return diff; |
223 | /* Put pseudos from the thread nearby. */ |
224 | if ((diff = regno_assign_info[r1].first - regno_assign_info[r2].first) != 0) |
225 | return diff; |
226 | /* Prefer pseudos with longer live ranges. It sets up better |
227 | prefered hard registers for the thread pseudos and decreases |
228 | register-register moves between the thread pseudos. */ |
229 | if ((diff = regno_live_length[r2] - regno_live_length[r1]) != 0) |
230 | return diff; |
231 | /* If regs are equally good, sort by their numbers, so that the |
232 | results of qsort leave nothing to chance. */ |
233 | return r1 - r2; |
234 | } |
235 | |
236 | /* The function is used to sort *non-reload* pseudos to try to assign |
237 | them hard registers. The order calculation is simpler than in the |
238 | previous function and based on the pseudo frequency usage. */ |
239 | static int |
240 | pseudo_compare_func (const void *v1p, const void *v2p) |
241 | { |
242 | int r1 = *(const int *) v1p, r2 = *(const int *) v2p; |
243 | int diff; |
244 | |
245 | /* Assign hard reg to static chain pointer first pseudo when |
246 | non-local goto is used. */ |
247 | if ((diff = (non_spilled_static_chain_regno_p (r2) |
248 | - non_spilled_static_chain_regno_p (r1))) != 0) |
249 | return diff; |
250 | |
251 | /* Prefer to assign more frequently used registers first. */ |
252 | if ((diff = lra_reg_info[r2].freq - lra_reg_info[r1].freq) != 0) |
253 | return diff; |
254 | |
255 | /* If regs are equally good, sort by their numbers, so that the |
256 | results of qsort leave nothing to chance. */ |
257 | return r1 - r2; |
258 | } |
259 | |
260 | /* Arrays of size LRA_LIVE_MAX_POINT mapping a program point to the |
261 | pseudo live ranges with given start point. We insert only live |
262 | ranges of pseudos interesting for assignment purposes. They are |
263 | reload pseudos and pseudos assigned to hard registers. */ |
264 | static lra_live_range_t *start_point_ranges; |
265 | |
266 | /* Used as a flag that a live range is not inserted in the start point |
267 | chain. */ |
268 | static struct lra_live_range not_in_chain_mark; |
269 | |
270 | /* Create and set up START_POINT_RANGES. */ |
271 | static void |
272 | create_live_range_start_chains (void) |
273 | { |
274 | int i, max_regno; |
275 | lra_live_range_t r; |
276 | |
277 | start_point_ranges = XCNEWVEC (lra_live_range_t, lra_live_max_point)((lra_live_range_t *) xcalloc ((lra_live_max_point), sizeof ( lra_live_range_t))); |
278 | max_regno = max_reg_num (); |
279 | for (i = FIRST_PSEUDO_REGISTER76; i < max_regno; i++) |
280 | if (i >= lra_constraint_new_regno_start || reg_renumber[i] >= 0) |
281 | { |
282 | for (r = lra_reg_info[i].live_ranges; r != NULLnullptr; r = r->next) |
283 | { |
284 | r->start_next = start_point_ranges[r->start]; |
285 | start_point_ranges[r->start] = r; |
286 | } |
287 | } |
288 | else |
289 | { |
290 | for (r = lra_reg_info[i].live_ranges; r != NULLnullptr; r = r->next) |
291 | r->start_next = ¬_in_chain_mark; |
292 | } |
293 | } |
294 | |
295 | /* Insert live ranges of pseudo REGNO into start chains if they are |
296 | not there yet. */ |
297 | static void |
298 | insert_in_live_range_start_chain (int regno) |
299 | { |
300 | lra_live_range_t r = lra_reg_info[regno].live_ranges; |
301 | |
302 | if (r->start_next != ¬_in_chain_mark) |
303 | return; |
304 | for (; r != NULLnullptr; r = r->next) |
305 | { |
306 | r->start_next = start_point_ranges[r->start]; |
307 | start_point_ranges[r->start] = r; |
308 | } |
309 | } |
310 | |
311 | /* Free START_POINT_RANGES. */ |
312 | static void |
313 | finish_live_range_start_chains (void) |
314 | { |
315 | gcc_assert (start_point_ranges != NULL)((void)(!(start_point_ranges != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 315, __FUNCTION__), 0 : 0)); |
316 | free (start_point_ranges); |
317 | start_point_ranges = NULLnullptr; |
318 | } |
319 | |
320 | /* Map: program point -> bitmap of all pseudos living at the point and |
321 | assigned to hard registers. */ |
322 | static bitmap_head *live_hard_reg_pseudos; |
323 | static bitmap_obstack live_hard_reg_pseudos_bitmap_obstack; |
324 | |
325 | /* reg_renumber corresponding to pseudos marked in |
326 | live_hard_reg_pseudos. reg_renumber might be not matched to |
327 | live_hard_reg_pseudos but live_pseudos_reg_renumber always reflects |
328 | live_hard_reg_pseudos. */ |
329 | static int *live_pseudos_reg_renumber; |
330 | |
331 | /* Sparseset used to calculate living hard reg pseudos for some program |
332 | point range. */ |
333 | static sparseset live_range_hard_reg_pseudos; |
334 | |
335 | /* Sparseset used to calculate living reload/inheritance pseudos for |
336 | some program point range. */ |
337 | static sparseset live_range_reload_inheritance_pseudos; |
338 | |
339 | /* Allocate and initialize the data about living pseudos at program |
340 | points. */ |
341 | static void |
342 | init_lives (void) |
343 | { |
344 | int i, max_regno = max_reg_num (); |
345 | |
346 | live_range_hard_reg_pseudos = sparseset_alloc (max_regno); |
347 | live_range_reload_inheritance_pseudos = sparseset_alloc (max_regno); |
348 | live_hard_reg_pseudos = XNEWVEC (bitmap_head, lra_live_max_point)((bitmap_head *) xmalloc (sizeof (bitmap_head) * (lra_live_max_point ))); |
349 | bitmap_obstack_initialize (&live_hard_reg_pseudos_bitmap_obstack); |
350 | for (i = 0; i < lra_live_max_point; i++) |
351 | bitmap_initialize (&live_hard_reg_pseudos[i], |
352 | &live_hard_reg_pseudos_bitmap_obstack); |
353 | live_pseudos_reg_renumber = XNEWVEC (int, max_regno)((int *) xmalloc (sizeof (int) * (max_regno))); |
354 | for (i = 0; i < max_regno; i++) |
355 | live_pseudos_reg_renumber[i] = -1; |
356 | } |
357 | |
358 | /* Free the data about living pseudos at program points. */ |
359 | static void |
360 | finish_lives (void) |
361 | { |
362 | sparseset_free (live_range_hard_reg_pseudos)free(live_range_hard_reg_pseudos); |
363 | sparseset_free (live_range_reload_inheritance_pseudos)free(live_range_reload_inheritance_pseudos); |
364 | free (live_hard_reg_pseudos); |
365 | bitmap_obstack_release (&live_hard_reg_pseudos_bitmap_obstack); |
366 | free (live_pseudos_reg_renumber); |
367 | } |
368 | |
369 | /* Update the LIVE_HARD_REG_PSEUDOS and LIVE_PSEUDOS_REG_RENUMBER |
370 | entries for pseudo REGNO. Assume that the register has been |
371 | spilled if FREE_P, otherwise assume that it has been assigned |
372 | reg_renumber[REGNO] (if >= 0). We also insert the pseudo live |
373 | ranges in the start chains when it is assumed to be assigned to a |
374 | hard register because we use the chains of pseudos assigned to hard |
375 | registers during allocation. */ |
376 | static void |
377 | update_lives (int regno, bool free_p) |
378 | { |
379 | int p; |
380 | lra_live_range_t r; |
381 | |
382 | if (reg_renumber[regno] < 0) |
383 | return; |
384 | live_pseudos_reg_renumber[regno] = free_p ? -1 : reg_renumber[regno]; |
385 | for (r = lra_reg_info[regno].live_ranges; r != NULLnullptr; r = r->next) |
386 | { |
387 | for (p = r->start; p <= r->finish; p++) |
388 | if (free_p) |
389 | bitmap_clear_bit (&live_hard_reg_pseudos[p], regno); |
390 | else |
391 | { |
392 | bitmap_set_bit (&live_hard_reg_pseudos[p], regno); |
393 | insert_in_live_range_start_chain (regno); |
394 | } |
395 | } |
396 | } |
397 | |
398 | /* Sparseset used to calculate reload pseudos conflicting with a given |
399 | pseudo when we are trying to find a hard register for the given |
400 | pseudo. */ |
401 | static sparseset conflict_reload_and_inheritance_pseudos; |
402 | |
403 | /* Map: program point -> bitmap of all reload and inheritance pseudos |
404 | living at the point. */ |
405 | static bitmap_head *live_reload_and_inheritance_pseudos; |
406 | static bitmap_obstack live_reload_and_inheritance_pseudos_bitmap_obstack; |
407 | |
408 | /* Allocate and initialize data about living reload pseudos at any |
409 | given program point. */ |
410 | static void |
411 | init_live_reload_and_inheritance_pseudos (void) |
412 | { |
413 | int i, p, max_regno = max_reg_num (); |
414 | lra_live_range_t r; |
415 | |
416 | conflict_reload_and_inheritance_pseudos = sparseset_alloc (max_regno); |
417 | live_reload_and_inheritance_pseudos = XNEWVEC (bitmap_head, lra_live_max_point)((bitmap_head *) xmalloc (sizeof (bitmap_head) * (lra_live_max_point ))); |
418 | bitmap_obstack_initialize (&live_reload_and_inheritance_pseudos_bitmap_obstack); |
419 | for (p = 0; p < lra_live_max_point; p++) |
420 | bitmap_initialize (&live_reload_and_inheritance_pseudos[p], |
421 | &live_reload_and_inheritance_pseudos_bitmap_obstack); |
422 | for (i = lra_constraint_new_regno_start; i < max_regno; i++) |
423 | { |
424 | for (r = lra_reg_info[i].live_ranges; r != NULLnullptr; r = r->next) |
425 | for (p = r->start; p <= r->finish; p++) |
426 | bitmap_set_bit (&live_reload_and_inheritance_pseudos[p], i); |
427 | } |
428 | } |
429 | |
430 | /* Finalize data about living reload pseudos at any given program |
431 | point. */ |
432 | static void |
433 | finish_live_reload_and_inheritance_pseudos (void) |
434 | { |
435 | sparseset_free (conflict_reload_and_inheritance_pseudos)free(conflict_reload_and_inheritance_pseudos); |
436 | free (live_reload_and_inheritance_pseudos); |
437 | bitmap_obstack_release (&live_reload_and_inheritance_pseudos_bitmap_obstack); |
438 | } |
439 | |
440 | /* The value used to check that cost of given hard reg is really |
441 | defined currently. */ |
442 | static int curr_hard_regno_costs_check = 0; |
443 | /* Array used to check that cost of the corresponding hard reg (the |
444 | array element index) is really defined currently. */ |
445 | static int hard_regno_costs_check[FIRST_PSEUDO_REGISTER76]; |
446 | /* The current costs of allocation of hard regs. Defined only if the |
447 | value of the corresponding element of the previous array is equal to |
448 | CURR_HARD_REGNO_COSTS_CHECK. */ |
449 | static int hard_regno_costs[FIRST_PSEUDO_REGISTER76]; |
450 | |
451 | /* Adjust cost of HARD_REGNO by INCR. Reset the cost first if it is |
452 | not defined yet. */ |
453 | static inline void |
454 | adjust_hard_regno_cost (int hard_regno, int incr) |
455 | { |
456 | if (hard_regno_costs_check[hard_regno] != curr_hard_regno_costs_check) |
457 | hard_regno_costs[hard_regno] = 0; |
458 | hard_regno_costs_check[hard_regno] = curr_hard_regno_costs_check; |
459 | hard_regno_costs[hard_regno] += incr; |
460 | } |
461 | |
462 | /* Try to find a free hard register for pseudo REGNO. Return the |
463 | hard register on success and set *COST to the cost of using |
464 | that register. (If several registers have equal cost, the one with |
465 | the highest priority wins.) Return -1 on failure. |
466 | |
467 | If FIRST_P, return the first available hard reg ignoring other |
468 | criteria, e.g. allocation cost. This approach results in less hard |
469 | reg pool fragmentation and permit to allocate hard regs to reload |
470 | pseudos in complicated situations where pseudo sizes are different. |
471 | |
472 | If TRY_ONLY_HARD_REGNO >= 0, consider only that hard register, |
473 | otherwise consider all hard registers in REGNO's class. |
474 | |
475 | If REGNO_SET is not empty, only hard registers from the set are |
476 | considered. */ |
477 | static int |
478 | find_hard_regno_for_1 (int regno, int *cost, int try_only_hard_regno, |
479 | bool first_p, HARD_REG_SET regno_set) |
480 | { |
481 | HARD_REG_SET conflict_set; |
482 | int best_cost = INT_MAX2147483647, best_priority = INT_MIN(-2147483647 -1), best_usage = INT_MAX2147483647; |
483 | lra_live_range_t r; |
484 | int p, i, j, rclass_size, best_hard_regno, priority, hard_regno; |
485 | int hr, conflict_hr, nregs; |
486 | machine_mode biggest_mode; |
487 | unsigned int k, conflict_regno; |
488 | poly_int64 offset; |
489 | int val, biggest_nregs, nregs_diff; |
490 | enum reg_class rclass; |
491 | bitmap_iterator bi; |
492 | bool *rclass_intersect_p; |
493 | HARD_REG_SET impossible_start_hard_regs, available_regs; |
494 | |
495 | if (hard_reg_set_empty_p (regno_set)) |
496 | conflict_set = lra_no_alloc_regs; |
497 | else |
498 | conflict_set = ~regno_set | lra_no_alloc_regs; |
499 | rclass = regno_allocno_class_array[regno]; |
500 | rclass_intersect_p = ira_reg_classes_intersect_p(this_target_ira->x_ira_reg_classes_intersect_p)[rclass]; |
501 | curr_hard_regno_costs_check++; |
502 | sparseset_clear (conflict_reload_and_inheritance_pseudos); |
503 | sparseset_clear (live_range_hard_reg_pseudos); |
504 | conflict_set |= lra_reg_info[regno].conflict_hard_regs; |
505 | biggest_mode = lra_reg_info[regno].biggest_mode; |
506 | for (r = lra_reg_info[regno].live_ranges; r != NULLnullptr; r = r->next) |
507 | { |
508 | EXECUTE_IF_SET_IN_BITMAP (&live_hard_reg_pseudos[r->start], 0, k, bi)for (bmp_iter_set_init (&(bi), (&live_hard_reg_pseudos [r->start]), (0), &(k)); bmp_iter_set (&(bi), & (k)); bmp_iter_next (&(bi), &(k))) |
509 | if (rclass_intersect_p[regno_allocno_class_array[k]]) |
510 | sparseset_set_bit (live_range_hard_reg_pseudos, k); |
511 | EXECUTE_IF_SET_IN_BITMAP (&live_reload_and_inheritance_pseudos[r->start],for (bmp_iter_set_init (&(bi), (&live_reload_and_inheritance_pseudos [r->start]), (0), &(k)); bmp_iter_set (&(bi), & (k)); bmp_iter_next (&(bi), &(k))) |
512 | 0, k, bi)for (bmp_iter_set_init (&(bi), (&live_reload_and_inheritance_pseudos [r->start]), (0), &(k)); bmp_iter_set (&(bi), & (k)); bmp_iter_next (&(bi), &(k))) |
513 | if (lra_reg_info[k].preferred_hard_regno1 >= 0 |
514 | && live_pseudos_reg_renumber[k] < 0 |
515 | && rclass_intersect_p[regno_allocno_class_array[k]]) |
516 | sparseset_set_bit (conflict_reload_and_inheritance_pseudos, k); |
517 | for (p = r->start + 1; p <= r->finish; p++) |
518 | { |
519 | lra_live_range_t r2; |
520 | |
521 | for (r2 = start_point_ranges[p]; |
522 | r2 != NULLnullptr; |
523 | r2 = r2->start_next) |
524 | { |
525 | if (r2->regno >= lra_constraint_new_regno_start |
526 | && lra_reg_info[r2->regno].preferred_hard_regno1 >= 0 |
527 | && live_pseudos_reg_renumber[r2->regno] < 0 |
528 | && rclass_intersect_p[regno_allocno_class_array[r2->regno]]) |
529 | sparseset_set_bit (conflict_reload_and_inheritance_pseudos, |
530 | r2->regno); |
531 | if (live_pseudos_reg_renumber[r2->regno] >= 0 |
532 | && rclass_intersect_p[regno_allocno_class_array[r2->regno]]) |
533 | sparseset_set_bit (live_range_hard_reg_pseudos, r2->regno); |
534 | } |
535 | } |
536 | } |
537 | if ((hard_regno = lra_reg_info[regno].preferred_hard_regno1) >= 0) |
538 | { |
539 | adjust_hard_regno_cost |
540 | (hard_regno, -lra_reg_info[regno].preferred_hard_regno_profit1); |
541 | if ((hard_regno = lra_reg_info[regno].preferred_hard_regno2) >= 0) |
542 | adjust_hard_regno_cost |
543 | (hard_regno, -lra_reg_info[regno].preferred_hard_regno_profit2); |
544 | } |
545 | #ifdef STACK_REGS |
546 | if (lra_reg_info[regno].no_stack_p) |
547 | for (i = FIRST_STACK_REG8; i <= LAST_STACK_REG15; i++) |
548 | SET_HARD_REG_BIT (conflict_set, i); |
549 | #endif |
550 | sparseset_clear_bit (conflict_reload_and_inheritance_pseudos, regno); |
551 | val = lra_reg_info[regno].val; |
552 | offset = lra_reg_info[regno].offset; |
553 | impossible_start_hard_regs = lra_reg_info[regno].exclude_start_hard_regs; |
554 | EXECUTE_IF_SET_IN_SPARSESET (live_range_hard_reg_pseudos, conflict_regno)for (sparseset_iter_init (live_range_hard_reg_pseudos); sparseset_iter_p (live_range_hard_reg_pseudos) && (((conflict_regno) = sparseset_iter_elm (live_range_hard_reg_pseudos)) || 1); sparseset_iter_next (live_range_hard_reg_pseudos)) |
555 | { |
556 | conflict_hr = live_pseudos_reg_renumber[conflict_regno]; |
557 | if (lra_reg_val_equal_p (conflict_regno, val, offset)) |
558 | { |
559 | conflict_hr = live_pseudos_reg_renumber[conflict_regno]; |
560 | nregs = hard_regno_nregs (conflict_hr, |
561 | lra_reg_info[conflict_regno].biggest_mode); |
562 | /* Remember about multi-register pseudos. For example, 2 |
563 | hard register pseudos can start on the same hard register |
564 | but cannot start on HR and HR+1/HR-1. */ |
565 | for (hr = conflict_hr + 1; |
566 | hr < FIRST_PSEUDO_REGISTER76 && hr < conflict_hr + nregs; |
567 | hr++) |
568 | SET_HARD_REG_BIT (impossible_start_hard_regs, hr); |
569 | for (hr = conflict_hr - 1; |
570 | hr >= 0 && (int) end_hard_regno (biggest_mode, hr) > conflict_hr; |
571 | hr--) |
572 | SET_HARD_REG_BIT (impossible_start_hard_regs, hr); |
573 | } |
574 | else |
575 | { |
576 | machine_mode biggest_conflict_mode |
577 | = lra_reg_info[conflict_regno].biggest_mode; |
578 | int biggest_conflict_nregs |
579 | = hard_regno_nregs (conflict_hr, biggest_conflict_mode); |
580 | |
581 | nregs_diff |
582 | = (biggest_conflict_nregs |
583 | - hard_regno_nregs (conflict_hr, |
584 | PSEUDO_REGNO_MODE (conflict_regno)((machine_mode) (regno_reg_rtx[conflict_regno])->mode))); |
585 | add_to_hard_reg_set (&conflict_set, |
586 | biggest_conflict_mode, |
587 | conflict_hr |
588 | - (WORDS_BIG_ENDIAN0 ? nregs_diff : 0)); |
589 | if (hard_reg_set_subset_p (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[rclass], |
590 | conflict_set)) |
591 | return -1; |
592 | } |
593 | } |
594 | EXECUTE_IF_SET_IN_SPARSESET (conflict_reload_and_inheritance_pseudos,for (sparseset_iter_init (conflict_reload_and_inheritance_pseudos ); sparseset_iter_p (conflict_reload_and_inheritance_pseudos) && (((conflict_regno) = sparseset_iter_elm (conflict_reload_and_inheritance_pseudos )) || 1); sparseset_iter_next (conflict_reload_and_inheritance_pseudos )) |
595 | conflict_regno)for (sparseset_iter_init (conflict_reload_and_inheritance_pseudos ); sparseset_iter_p (conflict_reload_and_inheritance_pseudos) && (((conflict_regno) = sparseset_iter_elm (conflict_reload_and_inheritance_pseudos )) || 1); sparseset_iter_next (conflict_reload_and_inheritance_pseudos )) |
596 | if (!lra_reg_val_equal_p (conflict_regno, val, offset)) |
597 | { |
598 | lra_assert (live_pseudos_reg_renumber[conflict_regno] < 0)((void)(!(live_pseudos_reg_renumber[conflict_regno] < 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 598, __FUNCTION__), 0 : 0)); |
599 | if ((hard_regno |
600 | = lra_reg_info[conflict_regno].preferred_hard_regno1) >= 0) |
601 | { |
602 | adjust_hard_regno_cost |
603 | (hard_regno, |
604 | lra_reg_info[conflict_regno].preferred_hard_regno_profit1); |
605 | if ((hard_regno |
606 | = lra_reg_info[conflict_regno].preferred_hard_regno2) >= 0) |
607 | adjust_hard_regno_cost |
608 | (hard_regno, |
609 | lra_reg_info[conflict_regno].preferred_hard_regno_profit2); |
610 | } |
611 | } |
612 | /* Make sure that all registers in a multi-word pseudo belong to the |
613 | required class. */ |
614 | conflict_set |= ~reg_class_contents(this_target_hard_regs->x_reg_class_contents)[rclass]; |
615 | lra_assert (rclass != NO_REGS)((void)(!(rclass != NO_REGS) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 615, __FUNCTION__), 0 : 0)); |
616 | rclass_size = ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[rclass]; |
617 | best_hard_regno = -1; |
618 | hard_regno = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[rclass][0]; |
619 | biggest_nregs = hard_regno_nregs (hard_regno, biggest_mode); |
620 | nregs_diff = (biggest_nregs |
621 | - hard_regno_nregs (hard_regno, PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode))); |
622 | available_regs = reg_class_contents(this_target_hard_regs->x_reg_class_contents)[rclass] & ~lra_no_alloc_regs; |
623 | for (i = 0; i < rclass_size; i++) |
624 | { |
625 | if (try_only_hard_regno >= 0) |
626 | hard_regno = try_only_hard_regno; |
627 | else |
628 | hard_regno = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[rclass][i]; |
629 | if (! overlaps_hard_reg_set_p (conflict_set, |
630 | PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode), hard_regno) |
631 | && targetm.hard_regno_mode_ok (hard_regno, |
632 | PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode)) |
633 | /* We cannot use prohibited_class_mode_regs for all classes |
634 | because it is not defined for all classes. */ |
635 | && (ira_allocno_class_translate(this_target_ira->x_ira_allocno_class_translate)[rclass] != rclass |
636 | || ! TEST_HARD_REG_BIT (ira_prohibited_class_mode_regs(this_target_ira->x_ira_prohibited_class_mode_regs) |
637 | [rclass][PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode)], |
638 | hard_regno)) |
639 | && ! TEST_HARD_REG_BIT (impossible_start_hard_regs, hard_regno) |
640 | && (nregs_diff == 0 |
641 | || (WORDS_BIG_ENDIAN0 |
642 | ? (hard_regno - nregs_diff >= 0 |
643 | && TEST_HARD_REG_BIT (available_regs, |
644 | hard_regno - nregs_diff)) |
645 | : TEST_HARD_REG_BIT (available_regs, |
646 | hard_regno + nregs_diff)))) |
647 | { |
648 | if (hard_regno_costs_check[hard_regno] |
649 | != curr_hard_regno_costs_check) |
650 | { |
651 | hard_regno_costs_check[hard_regno] = curr_hard_regno_costs_check; |
652 | hard_regno_costs[hard_regno] = 0; |
653 | } |
654 | for (j = 0; |
655 | j < hard_regno_nregs (hard_regno, PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode)); |
656 | j++) |
657 | if (! crtl(&x_rtl)->abi->clobbers_full_reg_p (hard_regno + j) |
658 | && ! df_regs_ever_live_p (hard_regno + j)) |
659 | /* It needs save restore. */ |
660 | hard_regno_costs[hard_regno] |
661 | += (2 |
662 | * REG_FREQ_FROM_BB (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb)((optimize_function_for_size_p ((cfun + 0)) || !(cfun + 0)-> cfg->count_max.initialized_p ()) ? 1000 : (((((cfun + 0))-> cfg->x_entry_block_ptr)->next_bb)->count.to_frequency ((cfun + 0)) * 1000 / 10000) ? (((((cfun + 0))->cfg->x_entry_block_ptr )->next_bb)->count.to_frequency ((cfun + 0)) * 1000 / 10000 ) : 1) |
663 | + 1); |
664 | priority = targetm.register_priority (hard_regno); |
665 | if (best_hard_regno < 0 || hard_regno_costs[hard_regno] < best_cost |
666 | || (hard_regno_costs[hard_regno] == best_cost |
667 | && (priority > best_priority |
668 | || (targetm.register_usage_leveling_p () |
669 | && priority == best_priority |
670 | && best_usage > lra_hard_reg_usage[hard_regno])))) |
671 | { |
672 | best_hard_regno = hard_regno; |
673 | best_cost = hard_regno_costs[hard_regno]; |
674 | best_priority = priority; |
675 | best_usage = lra_hard_reg_usage[hard_regno]; |
676 | } |
677 | } |
678 | if (try_only_hard_regno >= 0 || (first_p && best_hard_regno >= 0)) |
679 | break; |
680 | } |
681 | if (best_hard_regno >= 0) |
682 | *cost = best_cost - lra_reg_info[regno].freq; |
683 | return best_hard_regno; |
684 | } |
685 | |
686 | /* A wrapper for find_hard_regno_for_1 (see comments for that function |
687 | description). This function tries to find a hard register for |
688 | preferred class first if it is worth. */ |
689 | static int |
690 | find_hard_regno_for (int regno, int *cost, int try_only_hard_regno, bool first_p) |
691 | { |
692 | int hard_regno; |
693 | HARD_REG_SET regno_set; |
694 | |
695 | /* Only original pseudos can have a different preferred class. */ |
696 | if (try_only_hard_regno < 0 && regno < lra_new_regno_start) |
697 | { |
698 | enum reg_class pref_class = reg_preferred_class (regno); |
699 | |
700 | if (regno_allocno_class_array[regno] != pref_class) |
701 | { |
702 | hard_regno = find_hard_regno_for_1 (regno, cost, -1, first_p, |
703 | reg_class_contents(this_target_hard_regs->x_reg_class_contents)[pref_class]); |
704 | if (hard_regno >= 0) |
705 | return hard_regno; |
706 | } |
707 | } |
708 | CLEAR_HARD_REG_SET (regno_set); |
709 | return find_hard_regno_for_1 (regno, cost, try_only_hard_regno, first_p, |
710 | regno_set); |
711 | } |
712 | |
713 | /* Current value used for checking elements in |
714 | update_hard_regno_preference_check. */ |
715 | static int curr_update_hard_regno_preference_check; |
716 | /* If an element value is equal to the above variable value, then the |
717 | corresponding regno has been processed for preference |
718 | propagation. */ |
719 | static int *update_hard_regno_preference_check; |
720 | |
721 | /* Update the preference for using HARD_REGNO for pseudos that are |
722 | connected directly or indirectly with REGNO. Apply divisor DIV |
723 | to any preference adjustments. |
724 | |
725 | The more indirectly a pseudo is connected, the smaller its effect |
726 | should be. We therefore increase DIV on each "hop". */ |
727 | static void |
728 | update_hard_regno_preference (int regno, int hard_regno, int div) |
729 | { |
730 | int another_regno, cost; |
731 | lra_copy_t cp, next_cp; |
732 | |
733 | /* Search depth 5 seems to be enough. */ |
734 | if (div > (1 << 5)) |
735 | return; |
736 | for (cp = lra_reg_info[regno].copies; cp != NULLnullptr; cp = next_cp) |
737 | { |
738 | if (cp->regno1 == regno) |
739 | { |
740 | next_cp = cp->regno1_next; |
741 | another_regno = cp->regno2; |
742 | } |
743 | else if (cp->regno2 == regno) |
744 | { |
745 | next_cp = cp->regno2_next; |
746 | another_regno = cp->regno1; |
747 | } |
748 | else |
749 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 749, __FUNCTION__)); |
750 | if (reg_renumber[another_regno] < 0 |
751 | && (update_hard_regno_preference_check[another_regno] |
752 | != curr_update_hard_regno_preference_check)) |
753 | { |
754 | update_hard_regno_preference_check[another_regno] |
755 | = curr_update_hard_regno_preference_check; |
756 | cost = cp->freq < div ? 1 : cp->freq / div; |
757 | lra_setup_reload_pseudo_preferenced_hard_reg |
758 | (another_regno, hard_regno, cost); |
759 | update_hard_regno_preference (another_regno, hard_regno, div * 2); |
760 | } |
761 | } |
762 | } |
763 | |
764 | /* Return prefix title for pseudo REGNO. */ |
765 | static const char * |
766 | pseudo_prefix_title (int regno) |
767 | { |
768 | return |
769 | (regno < lra_constraint_new_regno_start ? "" |
770 | : bitmap_bit_p (&lra_inheritance_pseudos, regno) ? "inheritance " |
771 | : bitmap_bit_p (&lra_split_regs, regno) ? "split " |
772 | : bitmap_bit_p (&lra_optional_reload_pseudos, regno) ? "optional reload " |
773 | : bitmap_bit_p (&lra_subreg_reload_pseudos, regno) ? "subreg reload " |
774 | : "reload "); |
775 | } |
776 | |
777 | /* Update REG_RENUMBER and other pseudo preferences by assignment of |
778 | HARD_REGNO to pseudo REGNO and print about it if PRINT_P. */ |
779 | void |
780 | lra_setup_reg_renumber (int regno, int hard_regno, bool print_p) |
781 | { |
782 | int i, hr; |
783 | |
784 | /* We cannot just reassign hard register. */ |
785 | lra_assert (hard_regno < 0 || reg_renumber[regno] < 0)((void)(!(hard_regno < 0 || reg_renumber[regno] < 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 785, __FUNCTION__), 0 : 0)); |
786 | if ((hr = hard_regno) < 0) |
787 | hr = reg_renumber[regno]; |
788 | reg_renumber[regno] = hard_regno; |
789 | lra_assert (hr >= 0)((void)(!(hr >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 789, __FUNCTION__), 0 : 0)); |
790 | for (i = 0; i < hard_regno_nregs (hr, PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode)); i++) |
791 | if (hard_regno < 0) |
792 | lra_hard_reg_usage[hr + i] -= lra_reg_info[regno].freq; |
793 | else |
794 | lra_hard_reg_usage[hr + i] += lra_reg_info[regno].freq; |
795 | if (print_p && lra_dump_file != NULLnullptr) |
796 | fprintf (lra_dump_file, " Assign %d to %sr%d (freq=%d)\n", |
797 | reg_renumber[regno], pseudo_prefix_title (regno), |
798 | regno, lra_reg_info[regno].freq); |
799 | if (hard_regno >= 0) |
800 | { |
801 | curr_update_hard_regno_preference_check++; |
802 | update_hard_regno_preference (regno, hard_regno, 1); |
803 | } |
804 | } |
805 | |
806 | /* Pseudos which occur in insns containing a particular pseudo. */ |
807 | static bitmap_head insn_conflict_pseudos; |
808 | |
809 | /* Bitmaps used to contain spill pseudos for given pseudo hard regno |
810 | and best spill pseudos for given pseudo (and best hard regno). */ |
811 | static bitmap_head spill_pseudos_bitmap, best_spill_pseudos_bitmap; |
812 | |
813 | /* Current pseudo check for validity of elements in |
814 | TRY_HARD_REG_PSEUDOS. */ |
815 | static int curr_pseudo_check; |
816 | /* Array used for validity of elements in TRY_HARD_REG_PSEUDOS. */ |
817 | static int try_hard_reg_pseudos_check[FIRST_PSEUDO_REGISTER76]; |
818 | /* Pseudos who hold given hard register at the considered points. */ |
819 | static bitmap_head try_hard_reg_pseudos[FIRST_PSEUDO_REGISTER76]; |
820 | |
821 | /* Set up try_hard_reg_pseudos for given program point P and class |
822 | RCLASS. Those are pseudos living at P and assigned to a hard |
823 | register of RCLASS. In other words, those are pseudos which can be |
824 | spilled to assign a hard register of RCLASS to a pseudo living at |
825 | P. */ |
826 | static void |
827 | setup_try_hard_regno_pseudos (int p, enum reg_class rclass) |
828 | { |
829 | int i, hard_regno; |
830 | machine_mode mode; |
831 | unsigned int spill_regno; |
832 | bitmap_iterator bi; |
833 | |
834 | /* Find what pseudos could be spilled. */ |
835 | EXECUTE_IF_SET_IN_BITMAP (&live_hard_reg_pseudos[p], 0, spill_regno, bi)for (bmp_iter_set_init (&(bi), (&live_hard_reg_pseudos [p]), (0), &(spill_regno)); bmp_iter_set (&(bi), & (spill_regno)); bmp_iter_next (&(bi), &(spill_regno)) ) |
836 | { |
837 | mode = PSEUDO_REGNO_MODE (spill_regno)((machine_mode) (regno_reg_rtx[spill_regno])->mode); |
838 | hard_regno = live_pseudos_reg_renumber[spill_regno]; |
839 | if (overlaps_hard_reg_set_p (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[rclass], |
840 | mode, hard_regno)) |
841 | { |
842 | for (i = hard_regno_nregs (hard_regno, mode) - 1; i >= 0; i--) |
843 | { |
844 | if (try_hard_reg_pseudos_check[hard_regno + i] |
845 | != curr_pseudo_check) |
846 | { |
847 | try_hard_reg_pseudos_check[hard_regno + i] |
848 | = curr_pseudo_check; |
849 | bitmap_clear (&try_hard_reg_pseudos[hard_regno + i]); |
850 | } |
851 | bitmap_set_bit (&try_hard_reg_pseudos[hard_regno + i], |
852 | spill_regno); |
853 | } |
854 | } |
855 | } |
856 | } |
857 | |
858 | /* Assign temporarily HARD_REGNO to pseudo REGNO. Temporary |
859 | assignment means that we might undo the data change. */ |
860 | static void |
861 | assign_temporarily (int regno, int hard_regno) |
862 | { |
863 | int p; |
864 | lra_live_range_t r; |
865 | |
866 | for (r = lra_reg_info[regno].live_ranges; r != NULLnullptr; r = r->next) |
867 | { |
868 | for (p = r->start; p <= r->finish; p++) |
869 | if (hard_regno < 0) |
870 | bitmap_clear_bit (&live_hard_reg_pseudos[p], regno); |
871 | else |
872 | { |
873 | bitmap_set_bit (&live_hard_reg_pseudos[p], regno); |
874 | insert_in_live_range_start_chain (regno); |
875 | } |
876 | } |
877 | live_pseudos_reg_renumber[regno] = hard_regno; |
878 | } |
879 | |
880 | /* Return true iff there is a reason why pseudo SPILL_REGNO should not |
881 | be spilled. */ |
882 | static bool |
883 | must_not_spill_p (unsigned spill_regno) |
884 | { |
885 | if ((pic_offset_table_rtx(this_target_rtl->x_pic_offset_table_rtx) != NULLnullptr |
886 | && spill_regno == REGNO (pic_offset_table_rtx)(rhs_regno((this_target_rtl->x_pic_offset_table_rtx)))) |
887 | || ((int) spill_regno >= lra_constraint_new_regno_start |
888 | && ! bitmap_bit_p (&lra_inheritance_pseudos, spill_regno) |
889 | && ! bitmap_bit_p (&lra_split_regs, spill_regno) |
890 | && ! bitmap_bit_p (&lra_subreg_reload_pseudos, spill_regno) |
891 | && ! bitmap_bit_p (&lra_optional_reload_pseudos, spill_regno))) |
892 | return true; |
893 | /* A reload pseudo that requires a singleton register class should |
894 | not be spilled. |
895 | FIXME: this mitigates the issue on certain i386 patterns, but |
896 | does not solve the general case where existing reloads fully |
897 | cover a limited register class. */ |
898 | if (!bitmap_bit_p (&non_reload_pseudos, spill_regno) |
899 | && reg_class_size(this_target_hard_regs->x_reg_class_size) [reg_preferred_class (spill_regno)] == 1 |
900 | && reg_alternate_class (spill_regno) == NO_REGS) |
901 | return true; |
902 | return false; |
903 | } |
904 | |
905 | /* Array used for sorting reload pseudos for subsequent allocation |
906 | after spilling some pseudo. */ |
907 | static int *sorted_reload_pseudos; |
908 | |
909 | /* Spill some pseudos for a reload pseudo REGNO and return hard |
910 | register which should be used for pseudo after spilling. The |
911 | function adds spilled pseudos to SPILLED_PSEUDO_BITMAP. When we |
912 | choose hard register (and pseudos occupying the hard registers and |
913 | to be spilled), we take into account not only how REGNO will |
914 | benefit from the spills but also how other reload pseudos not yet |
915 | assigned to hard registers benefit from the spills too. In very |
916 | rare cases, the function can fail and return -1. |
917 | |
918 | If FIRST_P, return the first available hard reg ignoring other |
919 | criteria, e.g. allocation cost and cost of spilling non-reload |
920 | pseudos. This approach results in less hard reg pool fragmentation |
921 | and permit to allocate hard regs to reload pseudos in complicated |
922 | situations where pseudo sizes are different. */ |
923 | static int |
924 | spill_for (int regno, bitmap spilled_pseudo_bitmap, bool first_p) |
925 | { |
926 | int i, j, n, p, hard_regno, best_hard_regno, cost, best_cost, rclass_size; |
927 | int reload_hard_regno, reload_cost; |
928 | bool static_p, best_static_p; |
929 | machine_mode mode; |
930 | enum reg_class rclass; |
931 | unsigned int spill_regno, reload_regno, uid; |
932 | int insn_pseudos_num, best_insn_pseudos_num; |
933 | int bad_spills_num, smallest_bad_spills_num; |
934 | lra_live_range_t r; |
935 | bitmap_iterator bi; |
936 | |
937 | rclass = regno_allocno_class_array[regno]; |
938 | lra_assert (reg_renumber[regno] < 0 && rclass != NO_REGS)((void)(!(reg_renumber[regno] < 0 && rclass != NO_REGS ) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 938, __FUNCTION__), 0 : 0)); |
939 | bitmap_clear (&insn_conflict_pseudos); |
940 | bitmap_clear (&best_spill_pseudos_bitmap); |
941 | EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi)for (bmp_iter_set_init (&(bi), (&lra_reg_info[regno]. insn_bitmap), (0), &(uid)); bmp_iter_set (&(bi), & (uid)); bmp_iter_next (&(bi), &(uid))) |
942 | { |
943 | struct lra_insn_reg *ir; |
944 | |
945 | for (ir = lra_get_insn_regs (uid); ir != NULLnullptr; ir = ir->next) |
946 | if (ir->regno >= FIRST_PSEUDO_REGISTER76) |
947 | bitmap_set_bit (&insn_conflict_pseudos, ir->regno); |
948 | } |
949 | best_hard_regno = -1; |
950 | best_cost = INT_MAX2147483647; |
951 | best_static_p = TRUEtrue; |
952 | best_insn_pseudos_num = INT_MAX2147483647; |
953 | smallest_bad_spills_num = INT_MAX2147483647; |
954 | rclass_size = ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[rclass]; |
955 | mode = PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode); |
956 | /* Invalidate try_hard_reg_pseudos elements. */ |
957 | curr_pseudo_check++; |
958 | for (r = lra_reg_info[regno].live_ranges; r != NULLnullptr; r = r->next) |
959 | for (p = r->start; p <= r->finish; p++) |
960 | setup_try_hard_regno_pseudos (p, rclass); |
961 | for (i = 0; i < rclass_size; i++) |
962 | { |
963 | hard_regno = ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[rclass][i]; |
964 | bitmap_clear (&spill_pseudos_bitmap); |
965 | for (j = hard_regno_nregs (hard_regno, mode) - 1; j >= 0; j--) |
966 | { |
967 | if (hard_regno + j >= FIRST_PSEUDO_REGISTER76) |
968 | break; |
969 | if (try_hard_reg_pseudos_check[hard_regno + j] != curr_pseudo_check) |
970 | continue; |
971 | lra_assert (!bitmap_empty_p (&try_hard_reg_pseudos[hard_regno + j]))((void)(!(!bitmap_empty_p (&try_hard_reg_pseudos[hard_regno + j])) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 971, __FUNCTION__), 0 : 0)); |
972 | bitmap_ior_into (&spill_pseudos_bitmap, |
973 | &try_hard_reg_pseudos[hard_regno + j]); |
974 | } |
975 | /* Spill pseudos. */ |
976 | static_p = false; |
977 | EXECUTE_IF_SET_IN_BITMAP (&spill_pseudos_bitmap, 0, spill_regno, bi)for (bmp_iter_set_init (&(bi), (&spill_pseudos_bitmap ), (0), &(spill_regno)); bmp_iter_set (&(bi), &(spill_regno )); bmp_iter_next (&(bi), &(spill_regno))) |
978 | if (must_not_spill_p (spill_regno)) |
979 | goto fail; |
980 | else if (non_spilled_static_chain_regno_p (spill_regno)) |
981 | static_p = true; |
982 | insn_pseudos_num = 0; |
983 | bad_spills_num = 0; |
984 | if (lra_dump_file != NULLnullptr) |
985 | fprintf (lra_dump_file, " Trying %d:", hard_regno); |
986 | sparseset_clear (live_range_reload_inheritance_pseudos); |
987 | EXECUTE_IF_SET_IN_BITMAP (&spill_pseudos_bitmap, 0, spill_regno, bi)for (bmp_iter_set_init (&(bi), (&spill_pseudos_bitmap ), (0), &(spill_regno)); bmp_iter_set (&(bi), &(spill_regno )); bmp_iter_next (&(bi), &(spill_regno))) |
988 | { |
989 | if (bitmap_bit_p (&insn_conflict_pseudos, spill_regno)) |
990 | insn_pseudos_num++; |
991 | if (spill_regno >= (unsigned int) lra_bad_spill_regno_start) |
992 | bad_spills_num++; |
993 | for (r = lra_reg_info[spill_regno].live_ranges; |
994 | r != NULLnullptr; |
995 | r = r->next) |
996 | { |
997 | for (p = r->start; p <= r->finish; p++) |
998 | { |
999 | lra_live_range_t r2; |
1000 | |
1001 | for (r2 = start_point_ranges[p]; |
1002 | r2 != NULLnullptr; |
1003 | r2 = r2->start_next) |
1004 | if (r2->regno >= lra_constraint_new_regno_start) |
1005 | sparseset_set_bit (live_range_reload_inheritance_pseudos, |
1006 | r2->regno); |
1007 | } |
1008 | } |
1009 | } |
1010 | n = 0; |
1011 | if (sparseset_cardinality (live_range_reload_inheritance_pseudos) |
1012 | <= (unsigned)param_lra_max_considered_reload_pseudosglobal_options.x_param_lra_max_considered_reload_pseudos) |
1013 | EXECUTE_IF_SET_IN_SPARSESET (live_range_reload_inheritance_pseudos,for (sparseset_iter_init (live_range_reload_inheritance_pseudos ); sparseset_iter_p (live_range_reload_inheritance_pseudos) && (((reload_regno) = sparseset_iter_elm (live_range_reload_inheritance_pseudos )) || 1); sparseset_iter_next (live_range_reload_inheritance_pseudos )) |
1014 | reload_regno)for (sparseset_iter_init (live_range_reload_inheritance_pseudos ); sparseset_iter_p (live_range_reload_inheritance_pseudos) && (((reload_regno) = sparseset_iter_elm (live_range_reload_inheritance_pseudos )) || 1); sparseset_iter_next (live_range_reload_inheritance_pseudos )) |
1015 | if ((int) reload_regno != regno |
1016 | && (ira_reg_classes_intersect_p(this_target_ira->x_ira_reg_classes_intersect_p) |
1017 | [rclass][regno_allocno_class_array[reload_regno]]) |
1018 | && live_pseudos_reg_renumber[reload_regno] < 0 |
1019 | && find_hard_regno_for (reload_regno, &cost, -1, first_p) < 0) |
1020 | sorted_reload_pseudos[n++] = reload_regno; |
1021 | EXECUTE_IF_SET_IN_BITMAP (&spill_pseudos_bitmap, 0, spill_regno, bi)for (bmp_iter_set_init (&(bi), (&spill_pseudos_bitmap ), (0), &(spill_regno)); bmp_iter_set (&(bi), &(spill_regno )); bmp_iter_next (&(bi), &(spill_regno))) |
1022 | { |
1023 | update_lives (spill_regno, true); |
1024 | if (lra_dump_file != NULLnullptr) |
1025 | fprintf (lra_dump_file, " spill %d(freq=%d)", |
1026 | spill_regno, lra_reg_info[spill_regno].freq); |
1027 | } |
1028 | hard_regno = find_hard_regno_for (regno, &cost, -1, first_p); |
1029 | if (hard_regno >= 0) |
1030 | { |
1031 | assign_temporarily (regno, hard_regno); |
1032 | qsort (sorted_reload_pseudos, n, sizeof (int),gcc_qsort (sorted_reload_pseudos, n, sizeof (int), reload_pseudo_compare_func ) |
1033 | reload_pseudo_compare_func)gcc_qsort (sorted_reload_pseudos, n, sizeof (int), reload_pseudo_compare_func ); |
1034 | for (j = 0; j < n; j++) |
1035 | { |
1036 | reload_regno = sorted_reload_pseudos[j]; |
1037 | lra_assert (live_pseudos_reg_renumber[reload_regno] < 0)((void)(!(live_pseudos_reg_renumber[reload_regno] < 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 1037, __FUNCTION__), 0 : 0)); |
1038 | if ((reload_hard_regno |
1039 | = find_hard_regno_for (reload_regno, |
1040 | &reload_cost, -1, first_p)) >= 0) |
1041 | { |
1042 | if (lra_dump_file != NULLnullptr) |
1043 | fprintf (lra_dump_file, " assign %d(cost=%d)", |
1044 | reload_regno, reload_cost); |
1045 | assign_temporarily (reload_regno, reload_hard_regno); |
1046 | cost += reload_cost; |
1047 | } |
1048 | } |
1049 | EXECUTE_IF_SET_IN_BITMAP (&spill_pseudos_bitmap, 0, spill_regno, bi)for (bmp_iter_set_init (&(bi), (&spill_pseudos_bitmap ), (0), &(spill_regno)); bmp_iter_set (&(bi), &(spill_regno )); bmp_iter_next (&(bi), &(spill_regno))) |
1050 | { |
1051 | rtx_insn_list *x; |
1052 | |
1053 | cost += lra_reg_info[spill_regno].freq; |
1054 | if (ira_reg_equiv[spill_regno].memory != NULLnullptr |
1055 | || ira_reg_equiv[spill_regno].constant != NULLnullptr) |
1056 | for (x = ira_reg_equiv[spill_regno].init_insns; |
1057 | x != NULLnullptr; |
1058 | x = x->next ()) |
1059 | cost -= REG_FREQ_FROM_BB (BLOCK_FOR_INSN (x->insn ()))((optimize_function_for_size_p ((cfun + 0)) || !(cfun + 0)-> cfg->count_max.initialized_p ()) ? 1000 : ((BLOCK_FOR_INSN (x->insn ()))->count.to_frequency ((cfun + 0)) * 1000 / 10000) ? ((BLOCK_FOR_INSN (x->insn ()))->count.to_frequency ((cfun + 0)) * 1000 / 10000) : 1); |
1060 | } |
1061 | /* Avoid spilling static chain pointer pseudo when non-local |
1062 | goto is used. */ |
1063 | if ((! static_p && best_static_p) |
1064 | || (static_p == best_static_p |
1065 | && (best_insn_pseudos_num > insn_pseudos_num |
1066 | || (best_insn_pseudos_num == insn_pseudos_num |
1067 | && (bad_spills_num < smallest_bad_spills_num |
1068 | || (bad_spills_num == smallest_bad_spills_num |
1069 | && best_cost > cost)))))) |
1070 | { |
1071 | best_insn_pseudos_num = insn_pseudos_num; |
1072 | smallest_bad_spills_num = bad_spills_num; |
1073 | best_static_p = static_p; |
1074 | best_cost = cost; |
1075 | best_hard_regno = hard_regno; |
1076 | bitmap_copy (&best_spill_pseudos_bitmap, &spill_pseudos_bitmap); |
1077 | if (lra_dump_file != NULLnullptr) |
1078 | fprintf (lra_dump_file, |
1079 | " Now best %d(cost=%d, bad_spills=%d, insn_pseudos=%d)\n", |
1080 | hard_regno, cost, bad_spills_num, insn_pseudos_num); |
1081 | } |
1082 | assign_temporarily (regno, -1); |
1083 | for (j = 0; j < n; j++) |
1084 | { |
1085 | reload_regno = sorted_reload_pseudos[j]; |
1086 | if (live_pseudos_reg_renumber[reload_regno] >= 0) |
1087 | assign_temporarily (reload_regno, -1); |
1088 | } |
1089 | } |
1090 | if (lra_dump_file != NULLnullptr) |
1091 | fprintf (lra_dump_file, "\n"); |
1092 | /* Restore the live hard reg pseudo info for spilled pseudos. */ |
1093 | EXECUTE_IF_SET_IN_BITMAP (&spill_pseudos_bitmap, 0, spill_regno, bi)for (bmp_iter_set_init (&(bi), (&spill_pseudos_bitmap ), (0), &(spill_regno)); bmp_iter_set (&(bi), &(spill_regno )); bmp_iter_next (&(bi), &(spill_regno))) |
1094 | update_lives (spill_regno, false); |
1095 | fail: |
1096 | ; |
1097 | } |
1098 | /* Spill: */ |
1099 | EXECUTE_IF_SET_IN_BITMAP (&best_spill_pseudos_bitmap, 0, spill_regno, bi)for (bmp_iter_set_init (&(bi), (&best_spill_pseudos_bitmap ), (0), &(spill_regno)); bmp_iter_set (&(bi), &(spill_regno )); bmp_iter_next (&(bi), &(spill_regno))) |
1100 | { |
1101 | if ((int) spill_regno >= lra_constraint_new_regno_start) |
1102 | former_reload_pseudo_spill_p = true; |
1103 | if (lra_dump_file != NULLnullptr) |
1104 | fprintf (lra_dump_file, " Spill %sr%d(hr=%d, freq=%d) for r%d\n", |
1105 | pseudo_prefix_title (spill_regno), |
1106 | spill_regno, reg_renumber[spill_regno], |
1107 | lra_reg_info[spill_regno].freq, regno); |
1108 | update_lives (spill_regno, true); |
1109 | lra_setup_reg_renumber (spill_regno, -1, false); |
1110 | } |
1111 | bitmap_ior_into (spilled_pseudo_bitmap, &best_spill_pseudos_bitmap); |
1112 | return best_hard_regno; |
1113 | } |
1114 | |
1115 | /* Assign HARD_REGNO to REGNO. */ |
1116 | static void |
1117 | assign_hard_regno (int hard_regno, int regno) |
1118 | { |
1119 | int i; |
1120 | |
1121 | lra_assert (hard_regno >= 0)((void)(!(hard_regno >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 1121, __FUNCTION__), 0 : 0)); |
1122 | lra_setup_reg_renumber (regno, hard_regno, true); |
1123 | update_lives (regno, false); |
1124 | for (i = 0; |
1125 | i < hard_regno_nregs (hard_regno, lra_reg_info[regno].biggest_mode); |
1126 | i++) |
1127 | df_set_regs_ever_live (hard_regno + i, true); |
1128 | } |
1129 | |
1130 | /* Array used for sorting different pseudos. */ |
1131 | static int *sorted_pseudos; |
1132 | |
1133 | /* The constraints pass is allowed to create equivalences between |
1134 | pseudos that make the current allocation "incorrect" (in the sense |
1135 | that pseudos are assigned to hard registers from their own conflict |
1136 | sets). The global variable check_and_force_assignment_correctness_p says |
1137 | whether this might have happened. |
1138 | |
1139 | Process pseudos assigned to hard registers (less frequently used |
1140 | first), spill if a conflict is found, and mark the spilled pseudos |
1141 | in SPILLED_PSEUDO_BITMAP. Set up LIVE_HARD_REG_PSEUDOS from |
1142 | pseudos, assigned to hard registers. */ |
1143 | static void |
1144 | setup_live_pseudos_and_spill_after_risky_transforms (bitmap |
1145 | spilled_pseudo_bitmap) |
1146 | { |
1147 | int p, i, j, n, regno, hard_regno, biggest_nregs, nregs_diff; |
1148 | unsigned int k, conflict_regno; |
1149 | poly_int64 offset; |
1150 | int val; |
1151 | HARD_REG_SET conflict_set; |
1152 | machine_mode mode, biggest_mode; |
1153 | lra_live_range_t r; |
1154 | bitmap_iterator bi; |
1155 | int max_regno = max_reg_num (); |
1156 | |
1157 | if (! check_and_force_assignment_correctness_p) |
1158 | { |
1159 | for (i = FIRST_PSEUDO_REGISTER76; i < max_regno; i++) |
1160 | if (reg_renumber[i] >= 0 && lra_reg_info[i].nrefs > 0) |
1161 | update_lives (i, false); |
1162 | return; |
1163 | } |
1164 | for (n = 0, i = FIRST_PSEUDO_REGISTER76; i < max_regno; i++) |
1165 | if ((pic_offset_table_rtx(this_target_rtl->x_pic_offset_table_rtx) == NULL_RTX(rtx) 0 |
1166 | || i != (int) REGNO (pic_offset_table_rtx)(rhs_regno((this_target_rtl->x_pic_offset_table_rtx)))) |
1167 | && (hard_regno = reg_renumber[i]) >= 0 && lra_reg_info[i].nrefs > 0) |
1168 | { |
1169 | biggest_mode = lra_reg_info[i].biggest_mode; |
1170 | biggest_nregs = hard_regno_nregs (hard_regno, biggest_mode); |
1171 | nregs_diff = (biggest_nregs |
1172 | - hard_regno_nregs (hard_regno, PSEUDO_REGNO_MODE (i)((machine_mode) (regno_reg_rtx[i])->mode))); |
1173 | enum reg_class rclass = lra_get_allocno_class (i); |
1174 | |
1175 | if ((WORDS_BIG_ENDIAN0 |
1176 | && (hard_regno - nregs_diff < 0 |
1177 | || !TEST_HARD_REG_BIT (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[rclass], |
1178 | hard_regno - nregs_diff))) |
1179 | || (!WORDS_BIG_ENDIAN0 |
1180 | && (hard_regno + nregs_diff >= FIRST_PSEUDO_REGISTER76 |
1181 | || !TEST_HARD_REG_BIT (reg_class_contents(this_target_hard_regs->x_reg_class_contents)[rclass], |
1182 | hard_regno + nregs_diff)))) |
1183 | { |
1184 | /* Hard registers of paradoxical sub-registers are out of |
1185 | range of pseudo register class. Spill the pseudo. */ |
1186 | reg_renumber[i] = -1; |
1187 | continue; |
1188 | } |
1189 | sorted_pseudos[n++] = i; |
1190 | } |
1191 | qsort (sorted_pseudos, n, sizeof (int), pseudo_compare_func)gcc_qsort (sorted_pseudos, n, sizeof (int), pseudo_compare_func ); |
1192 | if (pic_offset_table_rtx(this_target_rtl->x_pic_offset_table_rtx) != NULL_RTX(rtx) 0 |
1193 | && (regno = REGNO (pic_offset_table_rtx)(rhs_regno((this_target_rtl->x_pic_offset_table_rtx)))) >= FIRST_PSEUDO_REGISTER76 |
1194 | && reg_renumber[regno] >= 0 && lra_reg_info[regno].nrefs > 0) |
1195 | sorted_pseudos[n++] = regno; |
1196 | for (i = n - 1; i >= 0; i--) |
1197 | { |
1198 | regno = sorted_pseudos[i]; |
1199 | hard_regno = reg_renumber[regno]; |
1200 | lra_assert (hard_regno >= 0)((void)(!(hard_regno >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 1200, __FUNCTION__), 0 : 0)); |
1201 | mode = lra_reg_info[regno].biggest_mode; |
1202 | sparseset_clear (live_range_hard_reg_pseudos); |
1203 | for (r = lra_reg_info[regno].live_ranges; r != NULLnullptr; r = r->next) |
1204 | { |
1205 | EXECUTE_IF_SET_IN_BITMAP (&live_hard_reg_pseudos[r->start], 0, k, bi)for (bmp_iter_set_init (&(bi), (&live_hard_reg_pseudos [r->start]), (0), &(k)); bmp_iter_set (&(bi), & (k)); bmp_iter_next (&(bi), &(k))) |
1206 | sparseset_set_bit (live_range_hard_reg_pseudos, k); |
1207 | for (p = r->start + 1; p <= r->finish; p++) |
1208 | { |
1209 | lra_live_range_t r2; |
1210 | |
1211 | for (r2 = start_point_ranges[p]; |
1212 | r2 != NULLnullptr; |
1213 | r2 = r2->start_next) |
1214 | if (live_pseudos_reg_renumber[r2->regno] >= 0) |
1215 | sparseset_set_bit (live_range_hard_reg_pseudos, r2->regno); |
1216 | } |
1217 | } |
1218 | conflict_set = lra_no_alloc_regs; |
1219 | conflict_set |= lra_reg_info[regno].conflict_hard_regs; |
1220 | val = lra_reg_info[regno].val; |
1221 | offset = lra_reg_info[regno].offset; |
1222 | EXECUTE_IF_SET_IN_SPARSESET (live_range_hard_reg_pseudos, conflict_regno)for (sparseset_iter_init (live_range_hard_reg_pseudos); sparseset_iter_p (live_range_hard_reg_pseudos) && (((conflict_regno) = sparseset_iter_elm (live_range_hard_reg_pseudos)) || 1); sparseset_iter_next (live_range_hard_reg_pseudos)) |
1223 | if (!lra_reg_val_equal_p (conflict_regno, val, offset) |
1224 | /* If it is multi-register pseudos they should start on |
1225 | the same hard register. */ |
1226 | || hard_regno != reg_renumber[conflict_regno]) |
1227 | { |
1228 | int conflict_hard_regno = reg_renumber[conflict_regno]; |
1229 | |
1230 | biggest_mode = lra_reg_info[conflict_regno].biggest_mode; |
1231 | biggest_nregs = hard_regno_nregs (conflict_hard_regno, |
1232 | biggest_mode); |
1233 | nregs_diff |
Value stored to 'nregs_diff' is never read | |
1234 | = (biggest_nregs |
1235 | - hard_regno_nregs (conflict_hard_regno, |
1236 | PSEUDO_REGNO_MODE (conflict_regno)((machine_mode) (regno_reg_rtx[conflict_regno])->mode))); |
1237 | add_to_hard_reg_set (&conflict_set, |
1238 | biggest_mode, |
1239 | conflict_hard_regno |
1240 | - (WORDS_BIG_ENDIAN0 ? nregs_diff : 0)); |
1241 | } |
1242 | if (! overlaps_hard_reg_set_p (conflict_set, mode, hard_regno)) |
1243 | { |
1244 | update_lives (regno, false); |
1245 | continue; |
1246 | } |
1247 | bitmap_set_bit (spilled_pseudo_bitmap, regno); |
1248 | for (j = 0; |
1249 | j < hard_regno_nregs (hard_regno, PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode)); |
1250 | j++) |
1251 | lra_hard_reg_usage[hard_regno + j] -= lra_reg_info[regno].freq; |
1252 | reg_renumber[regno] = -1; |
1253 | if (regno >= lra_constraint_new_regno_start) |
1254 | former_reload_pseudo_spill_p = true; |
1255 | if (lra_dump_file != NULLnullptr) |
1256 | fprintf (lra_dump_file, " Spill r%d after risky transformations\n", |
1257 | regno); |
1258 | } |
1259 | } |
1260 | |
1261 | /* Improve allocation by assigning the same hard regno of inheritance |
1262 | pseudos to the connected pseudos. We need this because inheritance |
1263 | pseudos are allocated after reload pseudos in the thread and when |
1264 | we assign a hard register to a reload pseudo we don't know yet that |
1265 | the connected inheritance pseudos can get the same hard register. |
1266 | Add pseudos with changed allocation to bitmap CHANGED_PSEUDOS. */ |
1267 | static void |
1268 | improve_inheritance (bitmap changed_pseudos) |
1269 | { |
1270 | unsigned int k; |
1271 | int regno, another_regno, hard_regno, another_hard_regno, cost, i, n; |
1272 | lra_copy_t cp, next_cp; |
1273 | bitmap_iterator bi; |
1274 | |
1275 | if (lra_inheritance_iter > LRA_MAX_INHERITANCE_PASSES2) |
1276 | return; |
1277 | n = 0; |
1278 | EXECUTE_IF_SET_IN_BITMAP (&lra_inheritance_pseudos, 0, k, bi)for (bmp_iter_set_init (&(bi), (&lra_inheritance_pseudos ), (0), &(k)); bmp_iter_set (&(bi), &(k)); bmp_iter_next (&(bi), &(k))) |
1279 | if (reg_renumber[k] >= 0 && lra_reg_info[k].nrefs != 0) |
1280 | sorted_pseudos[n++] = k; |
1281 | qsort (sorted_pseudos, n, sizeof (int), pseudo_compare_func)gcc_qsort (sorted_pseudos, n, sizeof (int), pseudo_compare_func ); |
1282 | for (i = 0; i < n; i++) |
1283 | { |
1284 | regno = sorted_pseudos[i]; |
1285 | hard_regno = reg_renumber[regno]; |
1286 | lra_assert (hard_regno >= 0)((void)(!(hard_regno >= 0) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 1286, __FUNCTION__), 0 : 0)); |
1287 | for (cp = lra_reg_info[regno].copies; cp != NULLnullptr; cp = next_cp) |
1288 | { |
1289 | if (cp->regno1 == regno) |
1290 | { |
1291 | next_cp = cp->regno1_next; |
1292 | another_regno = cp->regno2; |
1293 | } |
1294 | else if (cp->regno2 == regno) |
1295 | { |
1296 | next_cp = cp->regno2_next; |
1297 | another_regno = cp->regno1; |
1298 | } |
1299 | else |
1300 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 1300, __FUNCTION__)); |
1301 | /* Don't change reload pseudo allocation. It might have |
1302 | this allocation for a purpose and changing it can result |
1303 | in LRA cycling. */ |
1304 | if ((another_regno < lra_constraint_new_regno_start |
1305 | || bitmap_bit_p (&lra_inheritance_pseudos, another_regno)) |
1306 | && (another_hard_regno = reg_renumber[another_regno]) >= 0 |
1307 | && another_hard_regno != hard_regno) |
1308 | { |
1309 | if (lra_dump_file != NULLnullptr) |
1310 | fprintf |
1311 | (lra_dump_file, |
1312 | " Improving inheritance for %d(%d) and %d(%d)...\n", |
1313 | regno, hard_regno, another_regno, another_hard_regno); |
1314 | update_lives (another_regno, true); |
1315 | lra_setup_reg_renumber (another_regno, -1, false); |
1316 | if (hard_regno == find_hard_regno_for (another_regno, &cost, |
1317 | hard_regno, false)) |
1318 | assign_hard_regno (hard_regno, another_regno); |
1319 | else |
1320 | assign_hard_regno (another_hard_regno, another_regno); |
1321 | bitmap_set_bit (changed_pseudos, another_regno); |
1322 | } |
1323 | } |
1324 | } |
1325 | } |
1326 | |
1327 | |
1328 | /* Bitmap finally containing all pseudos spilled on this assignment |
1329 | pass. */ |
1330 | static bitmap_head all_spilled_pseudos; |
1331 | /* All pseudos whose allocation was changed. */ |
1332 | static bitmap_head changed_pseudo_bitmap; |
1333 | |
1334 | |
1335 | /* Add to LIVE_RANGE_HARD_REG_PSEUDOS all pseudos conflicting with |
1336 | REGNO and whose hard regs can be assigned to REGNO. */ |
1337 | static void |
1338 | find_all_spills_for (int regno) |
1339 | { |
1340 | int p; |
1341 | lra_live_range_t r; |
1342 | unsigned int k; |
1343 | bitmap_iterator bi; |
1344 | enum reg_class rclass; |
1345 | bool *rclass_intersect_p; |
1346 | |
1347 | rclass = regno_allocno_class_array[regno]; |
1348 | rclass_intersect_p = ira_reg_classes_intersect_p(this_target_ira->x_ira_reg_classes_intersect_p)[rclass]; |
1349 | for (r = lra_reg_info[regno].live_ranges; r != NULLnullptr; r = r->next) |
1350 | { |
1351 | EXECUTE_IF_SET_IN_BITMAP (&live_hard_reg_pseudos[r->start], 0, k, bi)for (bmp_iter_set_init (&(bi), (&live_hard_reg_pseudos [r->start]), (0), &(k)); bmp_iter_set (&(bi), & (k)); bmp_iter_next (&(bi), &(k))) |
1352 | if (rclass_intersect_p[regno_allocno_class_array[k]]) |
1353 | sparseset_set_bit (live_range_hard_reg_pseudos, k); |
1354 | for (p = r->start + 1; p <= r->finish; p++) |
1355 | { |
1356 | lra_live_range_t r2; |
1357 | |
1358 | for (r2 = start_point_ranges[p]; |
1359 | r2 != NULLnullptr; |
1360 | r2 = r2->start_next) |
1361 | { |
1362 | if (live_pseudos_reg_renumber[r2->regno] >= 0 |
1363 | && ! sparseset_bit_p (live_range_hard_reg_pseudos, r2->regno) |
1364 | && rclass_intersect_p[regno_allocno_class_array[r2->regno]] |
1365 | && ((int) r2->regno < lra_constraint_new_regno_start |
1366 | || bitmap_bit_p (&lra_inheritance_pseudos, r2->regno) |
1367 | || bitmap_bit_p (&lra_split_regs, r2->regno) |
1368 | || bitmap_bit_p (&lra_optional_reload_pseudos, r2->regno) |
1369 | /* There is no sense to consider another reload |
1370 | pseudo if it has the same class. */ |
1371 | || regno_allocno_class_array[r2->regno] != rclass)) |
1372 | sparseset_set_bit (live_range_hard_reg_pseudos, r2->regno); |
1373 | } |
1374 | } |
1375 | } |
1376 | } |
1377 | |
1378 | /* Assign hard registers to reload pseudos and other pseudos. Return |
1379 | true if we was not able to assign hard registers to all reload |
1380 | pseudos. */ |
1381 | static bool |
1382 | assign_by_spills (void) |
1383 | { |
1384 | int i, n, nfails, iter, regno, regno2, hard_regno, cost; |
1385 | rtx restore_rtx; |
1386 | bitmap_head changed_insns, do_not_assign_nonreload_pseudos; |
1387 | unsigned int u, conflict_regno; |
1388 | bitmap_iterator bi; |
1389 | bool reload_p, fails_p = false; |
1390 | int max_regno = max_reg_num (); |
1391 | |
1392 | for (n = 0, i = lra_constraint_new_regno_start; i < max_regno; i++) |
1393 | if (reg_renumber[i] < 0 && lra_reg_info[i].nrefs != 0 |
1394 | && regno_allocno_class_array[i] != NO_REGS) |
1395 | sorted_pseudos[n++] = i; |
1396 | bitmap_initialize (&insn_conflict_pseudos, ®_obstack); |
1397 | bitmap_initialize (&spill_pseudos_bitmap, ®_obstack); |
1398 | bitmap_initialize (&best_spill_pseudos_bitmap, ®_obstack); |
1399 | update_hard_regno_preference_check = XCNEWVEC (int, max_regno)((int *) xcalloc ((max_regno), sizeof (int))); |
1400 | curr_update_hard_regno_preference_check = 0; |
1401 | memset (try_hard_reg_pseudos_check, 0, sizeof (try_hard_reg_pseudos_check)); |
1402 | for (i = 0; i < FIRST_PSEUDO_REGISTER76; i++) |
1403 | bitmap_initialize (&try_hard_reg_pseudos[i], ®_obstack); |
1404 | curr_pseudo_check = 0; |
1405 | bitmap_initialize (&changed_insns, ®_obstack); |
1406 | bitmap_initialize (&non_reload_pseudos, ®_obstack); |
1407 | bitmap_ior (&non_reload_pseudos, &lra_inheritance_pseudos, &lra_split_regs); |
1408 | bitmap_ior_into (&non_reload_pseudos, &lra_subreg_reload_pseudos); |
1409 | bitmap_ior_into (&non_reload_pseudos, &lra_optional_reload_pseudos); |
1410 | for (iter = 0; iter <= 1; iter++) |
1411 | { |
1412 | qsort (sorted_pseudos, n, sizeof (int), reload_pseudo_compare_func)gcc_qsort (sorted_pseudos, n, sizeof (int), reload_pseudo_compare_func ); |
1413 | nfails = 0; |
1414 | for (i = 0; i < n; i++) |
1415 | { |
1416 | regno = sorted_pseudos[i]; |
1417 | if (reg_renumber[regno] >= 0) |
1418 | continue; |
1419 | if (lra_dump_file != NULLnullptr) |
1420 | fprintf (lra_dump_file, " Assigning to %d " |
1421 | "(cl=%s, orig=%d, freq=%d, tfirst=%d, tfreq=%d)...\n", |
1422 | regno, reg_class_names[regno_allocno_class_array[regno]], |
1423 | ORIGINAL_REGNO (regno_reg_rtx[regno])(__extension__ ({ __typeof ((regno_reg_rtx[regno])) const _rtx = ((regno_reg_rtx[regno])); if (((enum rtx_code) (_rtx)-> code) != REG) rtl_check_failed_flag ("ORIGINAL_REGNO", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 1423, __FUNCTION__); _rtx; })->u2.original_regno), |
1424 | lra_reg_info[regno].freq, regno_assign_info[regno].first, |
1425 | regno_assign_info[regno_assign_info[regno].first].freq); |
1426 | hard_regno = find_hard_regno_for (regno, &cost, -1, iter == 1); |
1427 | reload_p = ! bitmap_bit_p (&non_reload_pseudos, regno); |
1428 | if (hard_regno < 0 && reload_p) |
1429 | hard_regno = spill_for (regno, &all_spilled_pseudos, iter == 1); |
1430 | if (hard_regno < 0) |
1431 | { |
1432 | if (reload_p) { |
1433 | /* Put unassigned reload pseudo first in the |
1434 | array. */ |
1435 | regno2 = sorted_pseudos[nfails]; |
1436 | sorted_pseudos[nfails++] = regno; |
1437 | sorted_pseudos[i] = regno2; |
1438 | } |
1439 | } |
1440 | else |
1441 | { |
1442 | /* This register might have been spilled by the previous |
1443 | pass. Indicate that it is no longer spilled. */ |
1444 | bitmap_clear_bit (&all_spilled_pseudos, regno); |
1445 | assign_hard_regno (hard_regno, regno); |
1446 | if (! reload_p) |
1447 | /* As non-reload pseudo assignment is changed we |
1448 | should reconsider insns referring for the |
1449 | pseudo. */ |
1450 | bitmap_set_bit (&changed_pseudo_bitmap, regno); |
1451 | } |
1452 | } |
1453 | if (nfails == 0 || iter > 0) |
1454 | { |
1455 | fails_p = nfails != 0; |
1456 | break; |
1457 | } |
1458 | /* This is a very rare event. We cannot assign a hard register |
1459 | to reload pseudo because the hard register was assigned to |
1460 | another reload pseudo on a previous assignment pass. For x86 |
1461 | example, on the 1st pass we assigned CX (although another |
1462 | hard register could be used for this) to reload pseudo in an |
1463 | insn, on the 2nd pass we need CX (and only this) hard |
1464 | register for a new reload pseudo in the same insn. Another |
1465 | possible situation may occur in assigning to multi-regs |
1466 | reload pseudos when hard regs pool is too fragmented even |
1467 | after spilling non-reload pseudos. |
1468 | |
1469 | We should do something radical here to succeed. Here we |
1470 | spill *all* conflicting pseudos and reassign them. */ |
1471 | if (lra_dump_file != NULLnullptr) |
1472 | fprintf (lra_dump_file, " 2nd iter for reload pseudo assignments:\n"); |
1473 | sparseset_clear (live_range_hard_reg_pseudos); |
1474 | for (i = 0; i < nfails; i++) |
1475 | { |
1476 | if (lra_dump_file != NULLnullptr) |
1477 | fprintf (lra_dump_file, " Reload r%d assignment failure\n", |
1478 | sorted_pseudos[i]); |
1479 | find_all_spills_for (sorted_pseudos[i]); |
1480 | } |
1481 | EXECUTE_IF_SET_IN_SPARSESET (live_range_hard_reg_pseudos, conflict_regno)for (sparseset_iter_init (live_range_hard_reg_pseudos); sparseset_iter_p (live_range_hard_reg_pseudos) && (((conflict_regno) = sparseset_iter_elm (live_range_hard_reg_pseudos)) || 1); sparseset_iter_next (live_range_hard_reg_pseudos)) |
1482 | { |
1483 | if ((int) conflict_regno >= lra_constraint_new_regno_start) |
1484 | { |
1485 | sorted_pseudos[nfails++] = conflict_regno; |
1486 | former_reload_pseudo_spill_p = true; |
1487 | } |
1488 | else |
1489 | /* It is better to do reloads before spilling as after the |
1490 | spill-subpass we will reload memory instead of pseudos |
1491 | and this will make reusing reload pseudos more |
1492 | complicated. Going directly to the spill pass in such |
1493 | case might result in worse code performance or even LRA |
1494 | cycling if we have few registers. */ |
1495 | bitmap_set_bit (&all_spilled_pseudos, conflict_regno); |
1496 | if (lra_dump_file != NULLnullptr) |
1497 | fprintf (lra_dump_file, " Spill %s r%d(hr=%d, freq=%d)\n", |
1498 | pseudo_prefix_title (conflict_regno), conflict_regno, |
1499 | reg_renumber[conflict_regno], |
1500 | lra_reg_info[conflict_regno].freq); |
1501 | update_lives (conflict_regno, true); |
1502 | lra_setup_reg_renumber (conflict_regno, -1, false); |
1503 | } |
1504 | if (n < nfails) |
1505 | n = nfails; |
1506 | } |
1507 | improve_inheritance (&changed_pseudo_bitmap); |
1508 | bitmap_clear (&non_reload_pseudos); |
1509 | bitmap_clear (&changed_insns); |
1510 | if (! lra_simple_p) |
1511 | { |
1512 | /* We should not assign to original pseudos of inheritance |
1513 | pseudos or split pseudos if any its inheritance pseudo did |
1514 | not get hard register or any its split pseudo was not split |
1515 | because undo inheritance/split pass will extend live range of |
1516 | such inheritance or split pseudos. */ |
1517 | bitmap_initialize (&do_not_assign_nonreload_pseudos, ®_obstack); |
1518 | EXECUTE_IF_SET_IN_BITMAP (&lra_inheritance_pseudos, 0, u, bi)for (bmp_iter_set_init (&(bi), (&lra_inheritance_pseudos ), (0), &(u)); bmp_iter_set (&(bi), &(u)); bmp_iter_next (&(bi), &(u))) |
1519 | if ((restore_rtx = lra_reg_info[u].restore_rtx) != NULL_RTX(rtx) 0 |
1520 | && REG_P (restore_rtx)(((enum rtx_code) (restore_rtx)->code) == REG) |
1521 | && reg_renumber[u] < 0 |
1522 | && bitmap_bit_p (&lra_inheritance_pseudos, u)) |
1523 | bitmap_set_bit (&do_not_assign_nonreload_pseudos, REGNO (restore_rtx)(rhs_regno(restore_rtx))); |
1524 | EXECUTE_IF_SET_IN_BITMAP (&lra_split_regs, 0, u, bi)for (bmp_iter_set_init (&(bi), (&lra_split_regs), (0) , &(u)); bmp_iter_set (&(bi), &(u)); bmp_iter_next (&(bi), &(u))) |
1525 | if ((restore_rtx = lra_reg_info[u].restore_rtx) != NULL_RTX(rtx) 0 |
1526 | && reg_renumber[u] >= 0) |
1527 | { |
1528 | lra_assert (REG_P (restore_rtx))((void)(!((((enum rtx_code) (restore_rtx)->code) == REG)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 1528, __FUNCTION__), 0 : 0)); |
1529 | bitmap_set_bit (&do_not_assign_nonreload_pseudos, REGNO (restore_rtx)(rhs_regno(restore_rtx))); |
1530 | } |
1531 | for (n = 0, i = FIRST_PSEUDO_REGISTER76; i < max_regno; i++) |
1532 | if (((i < lra_constraint_new_regno_start |
1533 | && ! bitmap_bit_p (&do_not_assign_nonreload_pseudos, i)) |
1534 | || (bitmap_bit_p (&lra_inheritance_pseudos, i) |
1535 | && lra_reg_info[i].restore_rtx != NULL_RTX(rtx) 0) |
1536 | || (bitmap_bit_p (&lra_split_regs, i) |
1537 | && lra_reg_info[i].restore_rtx != NULL_RTX(rtx) 0) |
1538 | || bitmap_bit_p (&lra_subreg_reload_pseudos, i) |
1539 | || bitmap_bit_p (&lra_optional_reload_pseudos, i)) |
1540 | && reg_renumber[i] < 0 && lra_reg_info[i].nrefs != 0 |
1541 | && regno_allocno_class_array[i] != NO_REGS) |
1542 | sorted_pseudos[n++] = i; |
1543 | bitmap_clear (&do_not_assign_nonreload_pseudos); |
1544 | if (n != 0 && lra_dump_file != NULLnullptr) |
1545 | fprintf (lra_dump_file, " Reassigning non-reload pseudos\n"); |
1546 | qsort (sorted_pseudos, n, sizeof (int), pseudo_compare_func)gcc_qsort (sorted_pseudos, n, sizeof (int), pseudo_compare_func ); |
1547 | for (i = 0; i < n; i++) |
1548 | { |
1549 | regno = sorted_pseudos[i]; |
1550 | hard_regno = find_hard_regno_for (regno, &cost, -1, false); |
1551 | if (hard_regno >= 0) |
1552 | { |
1553 | assign_hard_regno (hard_regno, regno); |
1554 | /* We change allocation for non-reload pseudo on this |
1555 | iteration -- mark the pseudo for invalidation of used |
1556 | alternatives of insns containing the pseudo. */ |
1557 | bitmap_set_bit (&changed_pseudo_bitmap, regno); |
1558 | } |
1559 | else |
1560 | { |
1561 | enum reg_class rclass = lra_get_allocno_class (regno); |
1562 | enum reg_class spill_class; |
1563 | |
1564 | if (targetm.spill_class == NULLnullptr |
1565 | || lra_reg_info[regno].restore_rtx == NULL_RTX(rtx) 0 |
1566 | || ! bitmap_bit_p (&lra_inheritance_pseudos, regno) |
1567 | || (spill_class |
1568 | = ((enum reg_class) |
1569 | targetm.spill_class |
1570 | ((reg_class_t) rclass, |
1571 | PSEUDO_REGNO_MODE (regno)((machine_mode) (regno_reg_rtx[regno])->mode)))) == NO_REGS) |
1572 | continue; |
1573 | regno_allocno_class_array[regno] = spill_class; |
1574 | hard_regno = find_hard_regno_for (regno, &cost, -1, false); |
1575 | if (hard_regno < 0) |
1576 | regno_allocno_class_array[regno] = rclass; |
1577 | else |
1578 | { |
1579 | setup_reg_classes |
1580 | (regno, spill_class, spill_class, spill_class); |
1581 | assign_hard_regno (hard_regno, regno); |
1582 | bitmap_set_bit (&changed_pseudo_bitmap, regno); |
1583 | } |
1584 | } |
1585 | } |
1586 | } |
1587 | free (update_hard_regno_preference_check); |
1588 | bitmap_clear (&best_spill_pseudos_bitmap); |
1589 | bitmap_clear (&spill_pseudos_bitmap); |
1590 | bitmap_clear (&insn_conflict_pseudos); |
1591 | return fails_p; |
1592 | } |
1593 | |
1594 | /* Entry function to assign hard registers to new reload pseudos |
1595 | starting with LRA_CONSTRAINT_NEW_REGNO_START (by possible spilling |
1596 | of old pseudos) and possibly to the old pseudos. The function adds |
1597 | what insns to process for the next constraint pass. Those are all |
1598 | insns who contains non-reload and non-inheritance pseudos with |
1599 | changed allocation. |
1600 | |
1601 | Return true if we did not spill any non-reload and non-inheritance |
1602 | pseudos. Set up FAILS_P if we failed to assign hard registers to |
1603 | all reload pseudos. */ |
1604 | bool |
1605 | lra_assign (bool &fails_p) |
1606 | { |
1607 | int i; |
1608 | unsigned int u; |
1609 | bitmap_iterator bi; |
1610 | bitmap_head insns_to_process; |
1611 | bool no_spills_p; |
1612 | int max_regno = max_reg_num (); |
1613 | |
1614 | timevar_push (TV_LRA_ASSIGN); |
1615 | lra_assignment_iter++; |
1616 | if (lra_dump_file != NULLnullptr) |
1617 | fprintf (lra_dump_file, "\n********** Assignment #%d: **********\n\n", |
1618 | lra_assignment_iter); |
1619 | init_lives (); |
1620 | sorted_pseudos = XNEWVEC (int, max_regno)((int *) xmalloc (sizeof (int) * (max_regno))); |
1621 | sorted_reload_pseudos = XNEWVEC (int, max_regno)((int *) xmalloc (sizeof (int) * (max_regno))); |
1622 | regno_allocno_class_array = XNEWVEC (enum reg_class, max_regno)((enum reg_class *) xmalloc (sizeof (enum reg_class) * (max_regno ))); |
1623 | regno_live_length = XNEWVEC (int, max_regno)((int *) xmalloc (sizeof (int) * (max_regno))); |
1624 | for (i = FIRST_PSEUDO_REGISTER76; i < max_regno; i++) |
1625 | { |
1626 | int l; |
1627 | lra_live_range_t r; |
1628 | |
1629 | regno_allocno_class_array[i] = lra_get_allocno_class (i); |
1630 | for (l = 0, r = lra_reg_info[i].live_ranges; r != NULLnullptr; r = r->next) |
1631 | l += r->finish - r->start + 1; |
1632 | regno_live_length[i] = l; |
1633 | } |
1634 | former_reload_pseudo_spill_p = false; |
1635 | init_regno_assign_info (); |
1636 | bitmap_initialize (&all_spilled_pseudos, ®_obstack); |
1637 | create_live_range_start_chains (); |
1638 | setup_live_pseudos_and_spill_after_risky_transforms (&all_spilled_pseudos); |
1639 | if (! lra_hard_reg_split_p && ! lra_asm_error_p && flag_checkingglobal_options.x_flag_checking) |
1640 | /* Check correctness of allocation but only when there are no hard reg |
1641 | splits and asm errors as in the case of errors explicit insns involving |
1642 | hard regs are added or the asm is removed and this can result in |
1643 | incorrect allocation. */ |
1644 | for (i = FIRST_PSEUDO_REGISTER76; i < max_regno; i++) |
1645 | if (lra_reg_info[i].nrefs != 0 |
1646 | && reg_renumber[i] >= 0 |
1647 | && overlaps_hard_reg_set_p (lra_reg_info[i].conflict_hard_regs, |
1648 | PSEUDO_REGNO_MODE (i)((machine_mode) (regno_reg_rtx[i])->mode), reg_renumber[i])) |
1649 | gcc_unreachable ()(fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 1649, __FUNCTION__)); |
1650 | /* Setup insns to process on the next constraint pass. */ |
1651 | bitmap_initialize (&changed_pseudo_bitmap, ®_obstack); |
1652 | init_live_reload_and_inheritance_pseudos (); |
1653 | fails_p = assign_by_spills (); |
1654 | finish_live_reload_and_inheritance_pseudos (); |
1655 | bitmap_ior_into (&changed_pseudo_bitmap, &all_spilled_pseudos); |
1656 | no_spills_p = true; |
1657 | EXECUTE_IF_SET_IN_BITMAP (&all_spilled_pseudos, 0, u, bi)for (bmp_iter_set_init (&(bi), (&all_spilled_pseudos) , (0), &(u)); bmp_iter_set (&(bi), &(u)); bmp_iter_next (&(bi), &(u))) |
1658 | /* We ignore spilled pseudos created on last inheritance pass |
1659 | because they will be removed. */ |
1660 | if (lra_reg_info[u].restore_rtx == NULL_RTX(rtx) 0) |
1661 | { |
1662 | no_spills_p = false; |
1663 | break; |
1664 | } |
1665 | finish_live_range_start_chains (); |
1666 | bitmap_clear (&all_spilled_pseudos); |
1667 | bitmap_initialize (&insns_to_process, ®_obstack); |
1668 | EXECUTE_IF_SET_IN_BITMAP (&changed_pseudo_bitmap, 0, u, bi)for (bmp_iter_set_init (&(bi), (&changed_pseudo_bitmap ), (0), &(u)); bmp_iter_set (&(bi), &(u)); bmp_iter_next (&(bi), &(u))) |
1669 | bitmap_ior_into (&insns_to_process, &lra_reg_info[u].insn_bitmap); |
1670 | bitmap_clear (&changed_pseudo_bitmap); |
1671 | EXECUTE_IF_SET_IN_BITMAP (&insns_to_process, 0, u, bi)for (bmp_iter_set_init (&(bi), (&insns_to_process), ( 0), &(u)); bmp_iter_set (&(bi), &(u)); bmp_iter_next (&(bi), &(u))) |
1672 | { |
1673 | lra_push_insn_by_uid (u); |
1674 | /* Invalidate alternatives for insn should be processed. */ |
1675 | lra_set_used_insn_alternative_by_uid (u, -1); |
1676 | } |
1677 | bitmap_clear (&insns_to_process); |
1678 | finish_regno_assign_info (); |
1679 | free (regno_live_length); |
1680 | free (regno_allocno_class_array); |
1681 | free (sorted_pseudos); |
1682 | free (sorted_reload_pseudos); |
1683 | finish_lives (); |
1684 | timevar_pop (TV_LRA_ASSIGN); |
1685 | if (former_reload_pseudo_spill_p) |
1686 | lra_assignment_iter_after_spill++; |
1687 | /* This is conditional on flag_checking because valid code can take |
1688 | more than this maximum number of iteration, but at the same time |
1689 | the test can uncover errors in machine descriptions. */ |
1690 | if (flag_checkingglobal_options.x_flag_checking |
1691 | && (lra_assignment_iter_after_spill |
1692 | > LRA_MAX_ASSIGNMENT_ITERATION_NUMBER30)) |
1693 | internal_error |
1694 | ("maximum number of LRA assignment passes is achieved (%d)", |
1695 | LRA_MAX_ASSIGNMENT_ITERATION_NUMBER30); |
1696 | /* Reset the assignment correctness flag: */ |
1697 | check_and_force_assignment_correctness_p = false; |
1698 | return no_spills_p; |
1699 | } |
1700 | |
1701 | /* Find start and finish insns for reload pseudo REGNO. Return true |
1702 | if we managed to find the expected insns. Return false, |
1703 | otherwise. */ |
1704 | static bool |
1705 | find_reload_regno_insns (int regno, rtx_insn * &start, rtx_insn * &finish) |
1706 | { |
1707 | unsigned int uid; |
1708 | bitmap_iterator bi; |
1709 | int insns_num = 0; |
1710 | bool clobber_p = false; |
1711 | rtx_insn *prev_insn, *next_insn; |
1712 | rtx_insn *start_insn = NULLnullptr, *first_insn = NULLnullptr, *second_insn = NULLnullptr; |
1713 | |
1714 | EXECUTE_IF_SET_IN_BITMAP (&lra_reg_info[regno].insn_bitmap, 0, uid, bi)for (bmp_iter_set_init (&(bi), (&lra_reg_info[regno]. insn_bitmap), (0), &(uid)); bmp_iter_set (&(bi), & (uid)); bmp_iter_next (&(bi), &(uid))) |
1715 | { |
1716 | if (start_insn == NULLnullptr) |
1717 | start_insn = lra_insn_recog_data[uid]->insn; |
1718 | if (GET_CODE (PATTERN (lra_insn_recog_data[uid]->insn))((enum rtx_code) (PATTERN (lra_insn_recog_data[uid]->insn) )->code) == CLOBBER) |
1719 | clobber_p = true; |
1720 | else |
1721 | insns_num++; |
1722 | } |
1723 | /* For reload pseudo we should have at most 3 insns besides clobber referring for |
1724 | it: input/output reload insns and the original insn. */ |
1725 | if (insns_num > 3) |
1726 | return false; |
1727 | if (clobber_p) |
1728 | insns_num++; |
1729 | if (insns_num > 1) |
1730 | { |
1731 | for (prev_insn = PREV_INSN (start_insn), |
1732 | next_insn = NEXT_INSN (start_insn); |
1733 | insns_num != 1 && (prev_insn != NULLnullptr |
1734 | || (next_insn != NULLnullptr && second_insn == NULLnullptr)); ) |
1735 | { |
1736 | if (prev_insn != NULLnullptr) |
1737 | { |
1738 | if (bitmap_bit_p (&lra_reg_info[regno].insn_bitmap, |
1739 | INSN_UID (prev_insn))) |
1740 | { |
1741 | first_insn = prev_insn; |
1742 | insns_num--; |
1743 | } |
1744 | prev_insn = PREV_INSN (prev_insn); |
1745 | } |
1746 | if (next_insn != NULLnullptr && second_insn == NULLnullptr) |
1747 | { |
1748 | if (! bitmap_bit_p (&lra_reg_info[regno].insn_bitmap, |
1749 | INSN_UID (next_insn))) |
1750 | next_insn = NEXT_INSN (next_insn); |
1751 | else |
1752 | { |
1753 | second_insn = next_insn; |
1754 | insns_num--; |
1755 | } |
1756 | } |
1757 | } |
1758 | if (insns_num > 1) |
1759 | return false; |
1760 | } |
1761 | start = first_insn != NULLnullptr ? first_insn : start_insn; |
1762 | finish = second_insn != NULLnullptr ? second_insn : start_insn; |
1763 | return true; |
1764 | } |
1765 | |
1766 | /* Process reload pseudos which did not get a hard reg, split a hard |
1767 | reg live range in live range of a reload pseudo, and then return |
1768 | TRUE. If we did not split a hard reg live range, report an error, |
1769 | and return FALSE. */ |
1770 | bool |
1771 | lra_split_hard_reg_for (void) |
1772 | { |
1773 | int i, regno; |
1774 | rtx_insn *insn, *first, *last; |
1775 | unsigned int u; |
1776 | bitmap_iterator bi; |
1777 | enum reg_class rclass; |
1778 | int max_regno = max_reg_num (); |
1779 | /* We did not assign hard regs to reload pseudos after two |
1780 | iterations. Either it's an asm and something is wrong with the |
1781 | constraints, or we have run out of spill registers; error out in |
1782 | either case. */ |
1783 | bool asm_p = false, spill_p = false; |
1784 | bitmap_head failed_reload_insns, failed_reload_pseudos, over_split_insns; |
1785 | |
1786 | if (lra_dump_file != NULLnullptr) |
1787 | fprintf (lra_dump_file, |
1788 | "\n****** Splitting a hard reg after assignment #%d: ******\n\n", |
1789 | lra_assignment_iter); |
1790 | bitmap_initialize (&failed_reload_pseudos, ®_obstack); |
1791 | bitmap_initialize (&non_reload_pseudos, ®_obstack); |
1792 | bitmap_ior (&non_reload_pseudos, &lra_inheritance_pseudos, &lra_split_regs); |
1793 | bitmap_ior_into (&non_reload_pseudos, &lra_subreg_reload_pseudos); |
1794 | bitmap_ior_into (&non_reload_pseudos, &lra_optional_reload_pseudos); |
1795 | bitmap_initialize (&over_split_insns, ®_obstack); |
1796 | for (i = lra_constraint_new_regno_start; i < max_regno; i++) |
1797 | if (reg_renumber[i] < 0 && lra_reg_info[i].nrefs != 0 |
1798 | && (rclass = lra_get_allocno_class (i)) != NO_REGS |
1799 | && ! bitmap_bit_p (&non_reload_pseudos, i)) |
1800 | { |
1801 | if (! find_reload_regno_insns (i, first, last)) |
1802 | continue; |
1803 | if (BLOCK_FOR_INSN (first) == BLOCK_FOR_INSN (last)) |
1804 | { |
1805 | /* Check that we are not trying to split over the same insn |
1806 | requiring reloads to avoid splitting the same hard reg twice or |
1807 | more. If we need several hard regs splitting over the same insn |
1808 | it can be finished on the next iterations. |
1809 | |
1810 | The following loop iteration number is small as we split hard |
1811 | reg in a very small range. */ |
1812 | for (insn = first; |
1813 | insn != NEXT_INSN (last); |
1814 | insn = NEXT_INSN (insn)) |
1815 | if (bitmap_bit_p (&over_split_insns, INSN_UID (insn))) |
1816 | break; |
1817 | if (insn != NEXT_INSN (last) |
1818 | || !spill_hard_reg_in_range (i, rclass, first, last)) |
1819 | { |
1820 | bitmap_set_bit (&failed_reload_pseudos, i); |
1821 | } |
1822 | else |
1823 | { |
1824 | for (insn = first; |
1825 | insn != NEXT_INSN (last); |
1826 | insn = NEXT_INSN (insn)) |
1827 | bitmap_set_bit (&over_split_insns, INSN_UID (insn)); |
1828 | spill_p = true; |
1829 | } |
1830 | } |
1831 | } |
1832 | bitmap_clear (&over_split_insns); |
1833 | if (spill_p) |
1834 | { |
1835 | bitmap_clear (&failed_reload_pseudos); |
1836 | return true; |
1837 | } |
1838 | bitmap_clear (&non_reload_pseudos); |
1839 | bitmap_initialize (&failed_reload_insns, ®_obstack); |
1840 | EXECUTE_IF_SET_IN_BITMAP (&failed_reload_pseudos, 0, u, bi)for (bmp_iter_set_init (&(bi), (&failed_reload_pseudos ), (0), &(u)); bmp_iter_set (&(bi), &(u)); bmp_iter_next (&(bi), &(u))) |
1841 | { |
1842 | regno = u; |
1843 | bitmap_ior_into (&failed_reload_insns, |
1844 | &lra_reg_info[regno].insn_bitmap); |
1845 | lra_setup_reg_renumber |
1846 | (regno, ira_class_hard_regs(this_target_ira->x_ira_class_hard_regs)[lra_get_allocno_class (regno)][0], false); |
1847 | } |
1848 | EXECUTE_IF_SET_IN_BITMAP (&failed_reload_insns, 0, u, bi)for (bmp_iter_set_init (&(bi), (&failed_reload_insns) , (0), &(u)); bmp_iter_set (&(bi), &(u)); bmp_iter_next (&(bi), &(u))) |
1849 | { |
1850 | insn = lra_insn_recog_data[u]->insn; |
1851 | if (asm_noperands (PATTERN (insn)) >= 0) |
1852 | { |
1853 | lra_asm_error_p = asm_p = true; |
1854 | error_for_asm (insn, |
1855 | "%<asm%> operand has impossible constraints"); |
1856 | /* Avoid further trouble with this insn. */ |
1857 | if (JUMP_P (insn)(((enum rtx_code) (insn)->code) == JUMP_INSN)) |
1858 | { |
1859 | ira_nullify_asm_goto (insn); |
1860 | lra_update_insn_regno_info (insn); |
1861 | } |
1862 | else |
1863 | { |
1864 | PATTERN (insn) = gen_rtx_USE (VOIDmode, const0_rtx)gen_rtx_fmt_e_stat ((USE), ((((void) 0, E_VOIDmode))), (((const_int_rtx [64]))) ); |
1865 | lra_set_insn_deleted (insn); |
1866 | } |
1867 | } |
1868 | else if (!asm_p) |
1869 | { |
1870 | error ("unable to find a register to spill"); |
1871 | fatal_insn ("this is the insn:", insn)_fatal_insn ("this is the insn:", insn, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/lra-assigns.cc" , 1871, __FUNCTION__); |
1872 | } |
1873 | } |
1874 | bitmap_clear (&failed_reload_pseudos); |
1875 | bitmap_clear (&failed_reload_insns); |
1876 | return false; |
1877 | } |