File: | build/gcc/ira-emit.cc |
Warning: | line 796, column 7 Value stored to 'to' is never read |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* Integrated Register Allocator. Changing code and generating moves. |
2 | Copyright (C) 2006-2023 Free Software Foundation, Inc. |
3 | Contributed by Vladimir Makarov <vmakarov@redhat.com>. |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free |
9 | Software Foundation; either version 3, or (at your option) any later |
10 | version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
15 | for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | /* When we have more one region, we need to change the original RTL |
22 | code after coloring. Let us consider two allocnos representing the |
23 | same pseudo-register outside and inside a region respectively. |
24 | They can get different hard-registers. The reload pass works on |
25 | pseudo registers basis and there is no way to say the reload that |
26 | pseudo could be in different registers and it is even more |
27 | difficult to say in what places of the code the pseudo should have |
28 | particular hard-registers. So in this case IRA has to create and |
29 | use a new pseudo-register inside the region and adds code to move |
30 | allocno values on the region's borders. This is done by the code |
31 | in this file. |
32 | |
33 | The code makes top-down traversal of the regions and generate new |
34 | pseudos and the move code on the region borders. In some |
35 | complicated cases IRA can create a new pseudo used temporarily to |
36 | move allocno values when a swap of values stored in two |
37 | hard-registers is needed (e.g. two allocnos representing different |
38 | pseudos outside region got respectively hard registers 1 and 2 and |
39 | the corresponding allocnos inside the region got respectively hard |
40 | registers 2 and 1). At this stage, the new pseudo is marked as |
41 | spilled. |
42 | |
43 | IRA still creates the pseudo-register and the moves on the region |
44 | borders even when the both corresponding allocnos were assigned to |
45 | the same hard-register. It is done because, if the reload pass for |
46 | some reason spills a pseudo-register representing the original |
47 | pseudo outside or inside the region, the effect will be smaller |
48 | because another pseudo will still be in the hard-register. In most |
49 | cases, this is better then spilling the original pseudo in its |
50 | whole live-range. If reload does not change the allocation for the |
51 | two pseudo-registers, the trivial move will be removed by |
52 | post-reload optimizations. |
53 | |
54 | IRA does not generate a new pseudo and moves for the allocno values |
55 | if the both allocnos representing an original pseudo inside and |
56 | outside region assigned to the same hard register when the register |
57 | pressure in the region for the corresponding pressure class is less |
58 | than number of available hard registers for given pressure class. |
59 | |
60 | IRA also does some optimizations to remove redundant moves which is |
61 | transformed into stores by the reload pass on CFG edges |
62 | representing exits from the region. |
63 | |
64 | IRA tries to reduce duplication of code generated on CFG edges |
65 | which are enters and exits to/from regions by moving some code to |
66 | the edge sources or destinations when it is possible. */ |
67 | |
68 | #include "config.h" |
69 | #include "system.h" |
70 | #include "coretypes.h" |
71 | #include "backend.h" |
72 | #include "rtl.h" |
73 | #include "tree.h" |
74 | #include "predict.h" |
75 | #include "df.h" |
76 | #include "insn-config.h" |
77 | #include "regs.h" |
78 | #include "memmodel.h" |
79 | #include "ira.h" |
80 | #include "ira-int.h" |
81 | #include "cfgrtl.h" |
82 | #include "cfgbuild.h" |
83 | #include "expr.h" |
84 | #include "reload.h" |
85 | #include "cfgloop.h" |
86 | |
87 | |
88 | /* Data used to emit live range split insns and to flattening IR. */ |
89 | ira_emit_data_t ira_allocno_emit_data; |
90 | |
91 | /* Definitions for vectors of pointers. */ |
92 | typedef void *void_p; |
93 | |
94 | /* Pointers to data allocated for allocnos being created during |
95 | emitting. Usually there are quite few such allocnos because they |
96 | are created only for resolving loop in register shuffling. */ |
97 | static vec<void_p> new_allocno_emit_data_vec; |
98 | |
99 | /* Allocate and initiate the emit data. */ |
100 | void |
101 | ira_initiate_emit_data (void) |
102 | { |
103 | ira_allocno_t a; |
104 | ira_allocno_iterator ai; |
105 | |
106 | ira_allocno_emit_data |
107 | = (ira_emit_data_t) ira_allocate (ira_allocnos_num |
108 | * sizeof (struct ira_emit_data)); |
109 | memset (ira_allocno_emit_data, 0, |
110 | ira_allocnos_num * sizeof (struct ira_emit_data)); |
111 | FOR_EACH_ALLOCNO (a, ai)for (ira_allocno_iter_init (&(ai)); ira_allocno_iter_cond (&(ai), &(a));) |
112 | ALLOCNO_ADD_DATA (a)((a)->add_data) = ira_allocno_emit_data + ALLOCNO_NUM (a)((a)->num); |
113 | new_allocno_emit_data_vec.create (50); |
114 | |
115 | } |
116 | |
117 | /* Free the emit data. */ |
118 | void |
119 | ira_finish_emit_data (void) |
120 | { |
121 | void_p p; |
122 | ira_allocno_t a; |
123 | ira_allocno_iterator ai; |
124 | |
125 | ira_free (ira_allocno_emit_data); |
126 | FOR_EACH_ALLOCNO (a, ai)for (ira_allocno_iter_init (&(ai)); ira_allocno_iter_cond (&(ai), &(a));) |
127 | ALLOCNO_ADD_DATA (a)((a)->add_data) = NULLnullptr; |
128 | for (;new_allocno_emit_data_vec.length () != 0;) |
129 | { |
130 | p = new_allocno_emit_data_vec.pop (); |
131 | ira_free (p); |
132 | } |
133 | new_allocno_emit_data_vec.release (); |
134 | } |
135 | |
136 | /* Create and return a new allocno with given REGNO and |
137 | LOOP_TREE_NODE. Allocate emit data for it. */ |
138 | static ira_allocno_t |
139 | create_new_allocno (int regno, ira_loop_tree_node_t loop_tree_node) |
140 | { |
141 | ira_allocno_t a; |
142 | |
143 | a = ira_create_allocno (regno, false, loop_tree_node); |
144 | ALLOCNO_ADD_DATA (a)((a)->add_data) = ira_allocate (sizeof (struct ira_emit_data)); |
145 | memset (ALLOCNO_ADD_DATA (a)((a)->add_data), 0, sizeof (struct ira_emit_data)); |
146 | new_allocno_emit_data_vec.safe_push (ALLOCNO_ADD_DATA (a)((a)->add_data)); |
147 | return a; |
148 | } |
149 | |
150 | |
151 | |
152 | /* See comments below. */ |
153 | typedef struct move *move_t; |
154 | |
155 | /* The structure represents an allocno move. Both allocnos have the |
156 | same original regno but different allocation. */ |
157 | struct move |
158 | { |
159 | /* The allocnos involved in the move. */ |
160 | ira_allocno_t from, to; |
161 | /* The next move in the move sequence. */ |
162 | move_t next; |
163 | /* Used for finding dependencies. */ |
164 | bool visited_p; |
165 | /* The size of the following array. */ |
166 | int deps_num; |
167 | /* Moves on which given move depends on. Dependency can be cyclic. |
168 | It means we need a temporary to generates the moves. Sequence |
169 | A1->A2, B1->B2 where A1 and B2 are assigned to reg R1 and A2 and |
170 | B1 are assigned to reg R2 is an example of the cyclic |
171 | dependencies. */ |
172 | move_t *deps; |
173 | /* First insn generated for the move. */ |
174 | rtx_insn *insn; |
175 | }; |
176 | |
177 | /* Array of moves (indexed by BB index) which should be put at the |
178 | start/end of the corresponding basic blocks. */ |
179 | static move_t *at_bb_start, *at_bb_end; |
180 | |
181 | /* Max regno before renaming some pseudo-registers. For example, the |
182 | same pseudo-register can be renamed in a loop if its allocation is |
183 | different outside the loop. */ |
184 | static int max_regno_before_changing; |
185 | |
186 | /* Return new move of allocnos TO and FROM. */ |
187 | static move_t |
188 | create_move (ira_allocno_t to, ira_allocno_t from) |
189 | { |
190 | move_t move; |
191 | |
192 | move = (move_t) ira_allocate (sizeof (struct move)); |
193 | move->deps = NULLnullptr; |
194 | move->deps_num = 0; |
195 | move->to = to; |
196 | move->from = from; |
197 | move->next = NULLnullptr; |
198 | move->insn = NULLnullptr; |
199 | move->visited_p = false; |
200 | return move; |
201 | } |
202 | |
203 | /* Free memory for MOVE and its dependencies. */ |
204 | static void |
205 | free_move (move_t move) |
206 | { |
207 | if (move->deps != NULLnullptr) |
208 | ira_free (move->deps); |
209 | ira_free (move); |
210 | } |
211 | |
212 | /* Free memory for list of the moves given by its HEAD. */ |
213 | static void |
214 | free_move_list (move_t head) |
215 | { |
216 | move_t next; |
217 | |
218 | for (; head != NULLnullptr; head = next) |
219 | { |
220 | next = head->next; |
221 | free_move (head); |
222 | } |
223 | } |
224 | |
225 | /* Return TRUE if the move list LIST1 and LIST2 are equal (two |
226 | moves are equal if they involve the same allocnos). */ |
227 | static bool |
228 | eq_move_lists_p (move_t list1, move_t list2) |
229 | { |
230 | for (; list1 != NULLnullptr && list2 != NULLnullptr; |
231 | list1 = list1->next, list2 = list2->next) |
232 | if (list1->from != list2->from || list1->to != list2->to) |
233 | return false; |
234 | return list1 == list2; |
235 | } |
236 | |
237 | /* Print move list LIST into file F. */ |
238 | static void |
239 | print_move_list (FILE *f, move_t list) |
240 | { |
241 | for (; list != NULLnullptr; list = list->next) |
242 | fprintf (f, " a%dr%d->a%dr%d", |
243 | ALLOCNO_NUM (list->from)((list->from)->num), ALLOCNO_REGNO (list->from)((list->from)->regno), |
244 | ALLOCNO_NUM (list->to)((list->to)->num), ALLOCNO_REGNO (list->to)((list->to)->regno)); |
245 | fprintf (f, "\n"); |
246 | } |
247 | |
248 | extern void ira_debug_move_list (move_t list); |
249 | |
250 | /* Print move list LIST into stderr. */ |
251 | void |
252 | ira_debug_move_list (move_t list) |
253 | { |
254 | print_move_list (stderrstderr, list); |
255 | } |
256 | |
257 | /* This recursive function changes pseudo-registers in *LOC if it is |
258 | necessary. The function returns TRUE if a change was done. */ |
259 | static bool |
260 | change_regs (rtx *loc) |
261 | { |
262 | int i, regno, result = false; |
263 | const char *fmt; |
264 | enum rtx_code code; |
265 | rtx reg; |
266 | |
267 | if (*loc == NULL_RTX(rtx) 0) |
268 | return false; |
269 | code = GET_CODE (*loc)((enum rtx_code) (*loc)->code); |
270 | if (code == REG) |
271 | { |
272 | regno = REGNO (*loc)(rhs_regno(*loc)); |
273 | if (regno < FIRST_PSEUDO_REGISTER76) |
274 | return false; |
275 | if (regno >= max_regno_before_changing) |
276 | /* It is a shared register which was changed already. */ |
277 | return false; |
278 | if (ira_curr_regno_allocno_map[regno] == NULLnullptr) |
279 | return false; |
280 | reg = allocno_emit_reg (ira_curr_regno_allocno_map[regno]); |
281 | if (reg == *loc) |
282 | return false; |
283 | *loc = reg; |
284 | return true; |
285 | } |
286 | |
287 | fmt = GET_RTX_FORMAT (code)(rtx_format[(int) (code)]); |
288 | for (i = GET_RTX_LENGTH (code)(rtx_length[(int) (code)]) - 1; i >= 0; i--) |
289 | { |
290 | if (fmt[i] == 'e') |
291 | result = change_regs (&XEXP (*loc, i)(((*loc)->u.fld[i]).rt_rtx)) || result; |
292 | else if (fmt[i] == 'E') |
293 | { |
294 | int j; |
295 | |
296 | for (j = XVECLEN (*loc, i)(((((*loc)->u.fld[i]).rt_rtvec))->num_elem) - 1; j >= 0; j--) |
297 | result = change_regs (&XVECEXP (*loc, i, j)(((((*loc)->u.fld[i]).rt_rtvec))->elem[j])) || result; |
298 | } |
299 | } |
300 | return result; |
301 | } |
302 | |
303 | static bool |
304 | change_regs_in_insn (rtx_insn **insn_ptr) |
305 | { |
306 | rtx rtx = *insn_ptr; |
307 | bool result = change_regs (&rtx); |
308 | *insn_ptr = as_a <rtx_insn *> (rtx); |
309 | return result; |
310 | } |
311 | |
312 | /* Attach MOVE to the edge E. The move is attached to the head of the |
313 | list if HEAD_P is TRUE. */ |
314 | static void |
315 | add_to_edge_list (edge e, move_t move, bool head_p) |
316 | { |
317 | move_t last; |
318 | |
319 | if (head_p || e->aux == NULLnullptr) |
320 | { |
321 | move->next = (move_t) e->aux; |
322 | e->aux = move; |
323 | } |
324 | else |
325 | { |
326 | for (last = (move_t) e->aux; last->next != NULLnullptr; last = last->next) |
327 | ; |
328 | last->next = move; |
329 | move->next = NULLnullptr; |
330 | } |
331 | } |
332 | |
333 | /* Create and return new pseudo-register with the same attributes as |
334 | ORIGINAL_REG. */ |
335 | rtx |
336 | ira_create_new_reg (rtx original_reg) |
337 | { |
338 | rtx new_reg; |
339 | |
340 | new_reg = gen_reg_rtx (GET_MODE (original_reg)((machine_mode) (original_reg)->mode)); |
341 | ORIGINAL_REGNO (new_reg)(__extension__ ({ __typeof ((new_reg)) const _rtx = ((new_reg )); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("ORIGINAL_REGNO", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 341, __FUNCTION__); _rtx; })->u2.original_regno) = ORIGINAL_REGNO (original_reg)(__extension__ ({ __typeof ((original_reg)) const _rtx = ((original_reg )); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("ORIGINAL_REGNO", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 341, __FUNCTION__); _rtx; })->u2.original_regno); |
342 | REG_USERVAR_P (new_reg)(__extension__ ({ __typeof ((new_reg)) const _rtx = ((new_reg )); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("REG_USERVAR_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 342, __FUNCTION__); _rtx; })->volatil) = REG_USERVAR_P (original_reg)(__extension__ ({ __typeof ((original_reg)) const _rtx = ((original_reg )); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("REG_USERVAR_P", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 342, __FUNCTION__); _rtx; })->volatil); |
343 | REG_POINTER (new_reg)(__extension__ ({ __typeof ((new_reg)) const _rtx = ((new_reg )); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("REG_POINTER", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 343, __FUNCTION__); _rtx; })->frame_related) = REG_POINTER (original_reg)(__extension__ ({ __typeof ((original_reg)) const _rtx = ((original_reg )); if (((enum rtx_code) (_rtx)->code) != REG) rtl_check_failed_flag ("REG_POINTER", _rtx, "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 343, __FUNCTION__); _rtx; })->frame_related); |
344 | REG_ATTRS (new_reg)((&(new_reg)->u.reg)->attrs) = REG_ATTRS (original_reg)((&(original_reg)->u.reg)->attrs); |
345 | if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr) |
346 | fprintf (ira_dump_file, " Creating newreg=%i from oldreg=%i\n", |
347 | REGNO (new_reg)(rhs_regno(new_reg)), REGNO (original_reg)(rhs_regno(original_reg))); |
348 | ira_expand_reg_equiv (); |
349 | return new_reg; |
350 | } |
351 | |
352 | /* Return TRUE if loop given by SUBNODE inside the loop given by |
353 | NODE. */ |
354 | static bool |
355 | subloop_tree_node_p (ira_loop_tree_node_t subnode, ira_loop_tree_node_t node) |
356 | { |
357 | for (; subnode != NULLnullptr; subnode = subnode->parent) |
358 | if (subnode == node) |
359 | return true; |
360 | return false; |
361 | } |
362 | |
363 | /* Set up member `reg' to REG for allocnos which has the same regno as |
364 | ALLOCNO and which are inside the loop corresponding to ALLOCNO. */ |
365 | static void |
366 | set_allocno_reg (ira_allocno_t allocno, rtx reg) |
367 | { |
368 | int regno; |
369 | ira_allocno_t a; |
370 | ira_loop_tree_node_t node; |
371 | |
372 | node = ALLOCNO_LOOP_TREE_NODE (allocno)((allocno)->loop_tree_node); |
373 | for (a = ira_regno_allocno_map[ALLOCNO_REGNO (allocno)((allocno)->regno)]; |
374 | a != NULLnullptr; |
375 | a = ALLOCNO_NEXT_REGNO_ALLOCNO (a)((a)->next_regno_allocno)) |
376 | if (subloop_tree_node_p (ALLOCNO_LOOP_TREE_NODE (a)((a)->loop_tree_node), node)) |
377 | ALLOCNO_EMIT_DATA (a)((ira_emit_data_t) ((a)->add_data))->reg = reg; |
378 | for (a = ALLOCNO_CAP (allocno)((allocno)->cap); a != NULLnullptr; a = ALLOCNO_CAP (a)((a)->cap)) |
379 | ALLOCNO_EMIT_DATA (a)((ira_emit_data_t) ((a)->add_data))->reg = reg; |
380 | regno = ALLOCNO_REGNO (allocno)((allocno)->regno); |
381 | for (a = allocno;;) |
382 | { |
383 | if (a == NULLnullptr || (a = ALLOCNO_CAP (a)((a)->cap)) == NULLnullptr) |
384 | { |
385 | node = node->parent; |
386 | if (node == NULLnullptr) |
387 | break; |
388 | a = node->regno_allocno_map[regno]; |
389 | } |
390 | if (a == NULLnullptr) |
391 | continue; |
392 | if (ALLOCNO_EMIT_DATA (a)((ira_emit_data_t) ((a)->add_data))->child_renamed_p) |
393 | break; |
394 | ALLOCNO_EMIT_DATA (a)((ira_emit_data_t) ((a)->add_data))->child_renamed_p = true; |
395 | } |
396 | } |
397 | |
398 | /* Return true if there is an entry to given loop not from its parent |
399 | (or grandparent) block. For example, it is possible for two |
400 | adjacent loops inside another loop. */ |
401 | static bool |
402 | entered_from_non_parent_p (ira_loop_tree_node_t loop_node) |
403 | { |
404 | ira_loop_tree_node_t bb_node, src_loop_node, parent; |
405 | edge e; |
406 | edge_iterator ei; |
407 | |
408 | for (bb_node = loop_node->children; |
409 | bb_node != NULLnullptr; |
410 | bb_node = bb_node->next) |
411 | if (bb_node->bb != NULLnullptr) |
412 | { |
413 | FOR_EACH_EDGE (e, ei, bb_node->bb->preds)for ((ei) = ei_start_1 (&((bb_node->bb->preds))); ei_cond ((ei), &(e)); ei_next (&(ei))) |
414 | if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_entry_block_ptr) |
415 | && (src_loop_node = IRA_BB_NODE (e->src)__extension__ (({ ira_loop_tree_node_t _node = (&ira_bb_nodes [(e->src)->index]); if (_node->children != nullptr || _node->loop != nullptr || _node->bb == nullptr) { fprintf (stderr, "\n%s: %d: error in %s: it is not a block node\n", "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 415, __FUNCTION__); (fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 415, __FUNCTION__)); } _node; }))->parent) != loop_node) |
416 | { |
417 | for (parent = src_loop_node->parent; |
418 | parent != NULLnullptr; |
419 | parent = parent->parent) |
420 | if (parent == loop_node) |
421 | break; |
422 | if (parent != NULLnullptr) |
423 | /* That is an exit from a nested loop -- skip it. */ |
424 | continue; |
425 | for (parent = loop_node->parent; |
426 | parent != NULLnullptr; |
427 | parent = parent->parent) |
428 | if (src_loop_node == parent) |
429 | break; |
430 | if (parent == NULLnullptr) |
431 | return true; |
432 | } |
433 | } |
434 | return false; |
435 | } |
436 | |
437 | /* Set up ENTERED_FROM_NON_PARENT_P for each loop region. */ |
438 | static void |
439 | setup_entered_from_non_parent_p (void) |
440 | { |
441 | unsigned int i; |
442 | loop_p loop; |
443 | |
444 | ira_assert (current_loops != NULL)((void)(!(((cfun + 0)->x_current_loops) != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 444, __FUNCTION__), 0 : 0)); |
445 | FOR_EACH_VEC_SAFE_ELT (get_loops (cfun), i, loop)for (i = 0; vec_safe_iterate ((get_loops ((cfun + 0))), (i), & (loop)); ++(i)) |
446 | if (ira_loop_nodes[i].regno_allocno_map != NULLnullptr) |
447 | ira_loop_nodes[i].entered_from_non_parent_p |
448 | = entered_from_non_parent_p (&ira_loop_nodes[i]); |
449 | } |
450 | |
451 | /* Return TRUE if move of SRC_ALLOCNO (assigned to hard register) to |
452 | DEST_ALLOCNO (assigned to memory) can be removed because it does |
453 | not change value of the destination. One possible reason for this |
454 | is the situation when SRC_ALLOCNO is not modified in the |
455 | corresponding loop. */ |
456 | static bool |
457 | store_can_be_removed_p (ira_allocno_t src_allocno, ira_allocno_t dest_allocno) |
458 | { |
459 | int regno, orig_regno; |
460 | ira_allocno_t a; |
461 | ira_loop_tree_node_t node; |
462 | |
463 | ira_assert (ALLOCNO_CAP_MEMBER (src_allocno) == NULL((void)(!(((src_allocno)->cap_member) == nullptr && ((dest_allocno)->cap_member) == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 464, __FUNCTION__), 0 : 0)) |
464 | && ALLOCNO_CAP_MEMBER (dest_allocno) == NULL)((void)(!(((src_allocno)->cap_member) == nullptr && ((dest_allocno)->cap_member) == nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 464, __FUNCTION__), 0 : 0)); |
465 | orig_regno = ALLOCNO_REGNO (src_allocno)((src_allocno)->regno); |
466 | regno = REGNO (allocno_emit_reg (dest_allocno))(rhs_regno(allocno_emit_reg (dest_allocno))); |
467 | for (node = ALLOCNO_LOOP_TREE_NODE (src_allocno)((src_allocno)->loop_tree_node); |
468 | node != NULLnullptr; |
469 | node = node->parent) |
470 | { |
471 | a = node->regno_allocno_map[orig_regno]; |
472 | ira_assert (a != NULL)((void)(!(a != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 472, __FUNCTION__), 0 : 0)); |
473 | if (REGNO (allocno_emit_reg (a))(rhs_regno(allocno_emit_reg (a))) == (unsigned) regno) |
474 | /* We achieved the destination and everything is ok. */ |
475 | return true; |
476 | else if (bitmap_bit_p (node->modified_regnos, orig_regno)) |
477 | return false; |
478 | else if (node->entered_from_non_parent_p) |
479 | /* If there is a path from a destination loop block to the |
480 | source loop header containing basic blocks of non-parents |
481 | (grandparents) of the source loop, we should have checked |
482 | modifications of the pseudo on this path too to decide |
483 | about possibility to remove the store. It could be done by |
484 | solving a data-flow problem. Unfortunately such global |
485 | solution would complicate IR flattening. Therefore we just |
486 | prohibit removal of the store in such complicated case. */ |
487 | return false; |
488 | } |
489 | /* It is actually a loop entry -- do not remove the store. */ |
490 | return false; |
491 | } |
492 | |
493 | /* Generate and attach moves to the edge E. This looks at the final |
494 | regnos of allocnos living on the edge with the same original regno |
495 | to figure out when moves should be generated. */ |
496 | static void |
497 | generate_edge_moves (edge e) |
498 | { |
499 | ira_loop_tree_node_t src_loop_node, dest_loop_node; |
500 | unsigned int regno; |
501 | bitmap_iterator bi; |
502 | ira_allocno_t src_allocno, dest_allocno, *src_map, *dest_map; |
503 | move_t move; |
504 | bitmap regs_live_in_dest, regs_live_out_src; |
505 | |
506 | src_loop_node = IRA_BB_NODE (e->src)__extension__ (({ ira_loop_tree_node_t _node = (&ira_bb_nodes [(e->src)->index]); if (_node->children != nullptr || _node->loop != nullptr || _node->bb == nullptr) { fprintf (stderr, "\n%s: %d: error in %s: it is not a block node\n", "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 506, __FUNCTION__); (fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 506, __FUNCTION__)); } _node; }))->parent; |
507 | dest_loop_node = IRA_BB_NODE (e->dest)__extension__ (({ ira_loop_tree_node_t _node = (&ira_bb_nodes [(e->dest)->index]); if (_node->children != nullptr || _node->loop != nullptr || _node->bb == nullptr) { fprintf (stderr, "\n%s: %d: error in %s: it is not a block node\n", "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 507, __FUNCTION__); (fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 507, __FUNCTION__)); } _node; }))->parent; |
508 | e->aux = NULLnullptr; |
509 | if (src_loop_node == dest_loop_node) |
510 | return; |
511 | src_map = src_loop_node->regno_allocno_map; |
512 | dest_map = dest_loop_node->regno_allocno_map; |
513 | regs_live_in_dest = df_get_live_in (e->dest); |
514 | regs_live_out_src = df_get_live_out (e->src); |
515 | EXECUTE_IF_SET_IN_REG_SET (regs_live_in_dest,for (bmp_iter_set_init (&(bi), (regs_live_in_dest), (76), &(regno)); bmp_iter_set (&(bi), &(regno)); bmp_iter_next (&(bi), &(regno))) |
516 | FIRST_PSEUDO_REGISTER, regno, bi)for (bmp_iter_set_init (&(bi), (regs_live_in_dest), (76), &(regno)); bmp_iter_set (&(bi), &(regno)); bmp_iter_next (&(bi), &(regno))) |
517 | if (bitmap_bit_p (regs_live_out_src, regno)) |
518 | { |
519 | src_allocno = src_map[regno]; |
520 | dest_allocno = dest_map[regno]; |
521 | if (REGNO (allocno_emit_reg (src_allocno))(rhs_regno(allocno_emit_reg (src_allocno))) |
522 | == REGNO (allocno_emit_reg (dest_allocno))(rhs_regno(allocno_emit_reg (dest_allocno)))) |
523 | continue; |
524 | /* Remove unnecessary stores at the region exit. We should do |
525 | this for readonly memory for sure and this is guaranteed by |
526 | that we never generate moves on region borders (see |
527 | checking in function change_loop). */ |
528 | if (ALLOCNO_HARD_REGNO (dest_allocno)((dest_allocno)->hard_regno) < 0 |
529 | && ALLOCNO_HARD_REGNO (src_allocno)((src_allocno)->hard_regno) >= 0 |
530 | && store_can_be_removed_p (src_allocno, dest_allocno)) |
531 | { |
532 | ALLOCNO_EMIT_DATA (src_allocno)((ira_emit_data_t) ((src_allocno)->add_data))->mem_optimized_dest = dest_allocno; |
533 | ALLOCNO_EMIT_DATA (dest_allocno)((ira_emit_data_t) ((dest_allocno)->add_data))->mem_optimized_dest_p = true; |
534 | if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr) |
535 | fprintf (ira_dump_file, " Remove r%d:a%d->a%d(mem)\n", |
536 | regno, ALLOCNO_NUM (src_allocno)((src_allocno)->num), |
537 | ALLOCNO_NUM (dest_allocno)((dest_allocno)->num)); |
538 | continue; |
539 | } |
540 | move = create_move (dest_allocno, src_allocno); |
541 | add_to_edge_list (e, move, true); |
542 | } |
543 | } |
544 | |
545 | /* Bitmap of allocnos local for the current loop. */ |
546 | static bitmap local_allocno_bitmap; |
547 | |
548 | /* This bitmap is used to find that we need to generate and to use a |
549 | new pseudo-register when processing allocnos with the same original |
550 | regno. */ |
551 | static bitmap used_regno_bitmap; |
552 | |
553 | /* This bitmap contains regnos of allocnos which were renamed locally |
554 | because the allocnos correspond to disjoint live ranges in loops |
555 | with a common parent. */ |
556 | static bitmap renamed_regno_bitmap; |
557 | |
558 | /* Change (if necessary) pseudo-registers inside loop given by loop |
559 | tree node NODE. */ |
560 | static void |
561 | change_loop (ira_loop_tree_node_t node) |
562 | { |
563 | bitmap_iterator bi; |
564 | unsigned int i; |
565 | int regno; |
566 | bool used_p; |
567 | ira_allocno_t allocno, parent_allocno, *map; |
568 | rtx_insn *insn; |
569 | rtx original_reg; |
570 | enum reg_class aclass, pclass; |
571 | ira_loop_tree_node_t parent; |
572 | |
573 | if (node != ira_loop_tree_root) |
574 | { |
575 | ira_assert (current_loops != NULL)((void)(!(((cfun + 0)->x_current_loops) != nullptr) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 575, __FUNCTION__), 0 : 0)); |
576 | |
577 | if (node->bb != NULLnullptr) |
578 | { |
579 | FOR_BB_INSNS (node->bb, insn)for ((insn) = (node->bb)->il.x.head_; (insn) && (insn) != NEXT_INSN ((node->bb)->il.x.rtl->end_); ( insn) = NEXT_INSN (insn)) |
580 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN)) && change_regs_in_insn (&insn)) |
581 | { |
582 | df_insn_rescan (insn); |
583 | df_notes_rescan (insn); |
584 | } |
585 | return; |
586 | } |
587 | |
588 | if (internal_flag_ira_verbose > 3 && ira_dump_file != NULLnullptr) |
589 | fprintf (ira_dump_file, |
590 | " Changing RTL for loop %d (header bb%d)\n", |
591 | node->loop_num, node->loop->header->index); |
592 | |
593 | parent = ira_curr_loop_tree_node->parent; |
594 | map = parent->regno_allocno_map; |
595 | EXECUTE_IF_SET_IN_REG_SET (ira_curr_loop_tree_node->border_allocnos,for (bmp_iter_set_init (&(bi), (ira_curr_loop_tree_node-> border_allocnos), (0), &(i)); bmp_iter_set (&(bi), & (i)); bmp_iter_next (&(bi), &(i))) |
596 | 0, i, bi)for (bmp_iter_set_init (&(bi), (ira_curr_loop_tree_node-> border_allocnos), (0), &(i)); bmp_iter_set (&(bi), & (i)); bmp_iter_next (&(bi), &(i))) |
597 | { |
598 | allocno = ira_allocnos[i]; |
599 | regno = ALLOCNO_REGNO (allocno)((allocno)->regno); |
600 | aclass = ALLOCNO_CLASS (allocno)((allocno)->aclass); |
601 | pclass = ira_pressure_class_translate(this_target_ira->x_ira_pressure_class_translate)[aclass]; |
602 | parent_allocno = map[regno]; |
603 | ira_assert (regno < ira_reg_equiv_len)((void)(!(regno < ira_reg_equiv_len) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 603, __FUNCTION__), 0 : 0)); |
604 | /* We generate the same hard register move because the |
605 | reload pass can put an allocno into memory in this case |
606 | we will have live range splitting. If it does not happen |
607 | such the same hard register moves will be removed. The |
608 | worst case when the both allocnos are put into memory by |
609 | the reload is very rare. */ |
610 | if (parent_allocno != NULLnullptr |
611 | && (ALLOCNO_HARD_REGNO (allocno)((allocno)->hard_regno) |
612 | == ALLOCNO_HARD_REGNO (parent_allocno)((parent_allocno)->hard_regno)) |
613 | && (ALLOCNO_HARD_REGNO (allocno)((allocno)->hard_regno) < 0 |
614 | || (parent->reg_pressure[pclass] + 1 |
615 | <= ira_class_hard_regs_num(this_target_ira->x_ira_class_hard_regs_num)[pclass]) |
616 | || TEST_HARD_REG_BIT (ira_prohibited_mode_move_regs(this_target_ira_int->x_ira_prohibited_mode_move_regs) |
617 | [ALLOCNO_MODE (allocno)((allocno)->mode)], |
618 | ALLOCNO_HARD_REGNO (allocno)((allocno)->hard_regno)) |
619 | /* don't create copies because reload can spill an |
620 | allocno set by copy although the allocno will not |
621 | get memory slot. */ |
622 | || ira_equiv_no_lvalue_p (regno) |
623 | || (pic_offset_table_rtx(this_target_rtl->x_pic_offset_table_rtx) != NULLnullptr |
624 | && (ALLOCNO_REGNO (allocno)((allocno)->regno) |
625 | == (int) REGNO (pic_offset_table_rtx)(rhs_regno((this_target_rtl->x_pic_offset_table_rtx))))))) |
626 | continue; |
627 | original_reg = allocno_emit_reg (allocno); |
628 | if (parent_allocno == NULLnullptr |
629 | || (REGNO (allocno_emit_reg (parent_allocno))(rhs_regno(allocno_emit_reg (parent_allocno))) |
630 | == REGNO (original_reg)(rhs_regno(original_reg)))) |
631 | { |
632 | if (internal_flag_ira_verbose > 3 && ira_dump_file) |
633 | fprintf (ira_dump_file, " %i vs parent %i:", |
634 | ALLOCNO_HARD_REGNO (allocno)((allocno)->hard_regno), |
635 | ALLOCNO_HARD_REGNO (parent_allocno)((parent_allocno)->hard_regno)); |
636 | set_allocno_reg (allocno, ira_create_new_reg (original_reg)); |
637 | } |
638 | } |
639 | } |
640 | /* Rename locals: Local allocnos with same regno in different loops |
641 | might get the different hard register. So we need to change |
642 | ALLOCNO_REG. */ |
643 | bitmap_and_compl (local_allocno_bitmap, |
644 | ira_curr_loop_tree_node->all_allocnos, |
645 | ira_curr_loop_tree_node->border_allocnos); |
646 | EXECUTE_IF_SET_IN_REG_SET (local_allocno_bitmap, 0, i, bi)for (bmp_iter_set_init (&(bi), (local_allocno_bitmap), (0 ), &(i)); bmp_iter_set (&(bi), &(i)); bmp_iter_next (&(bi), &(i))) |
647 | { |
648 | allocno = ira_allocnos[i]; |
649 | regno = ALLOCNO_REGNO (allocno)((allocno)->regno); |
650 | if (ALLOCNO_CAP_MEMBER (allocno)((allocno)->cap_member) != NULLnullptr) |
651 | continue; |
652 | used_p = !bitmap_set_bit (used_regno_bitmap, regno); |
653 | ALLOCNO_EMIT_DATA (allocno)((ira_emit_data_t) ((allocno)->add_data))->somewhere_renamed_p = true; |
654 | if (! used_p) |
655 | continue; |
656 | bitmap_set_bit (renamed_regno_bitmap, regno); |
657 | set_allocno_reg (allocno, ira_create_new_reg (allocno_emit_reg (allocno))); |
658 | } |
659 | } |
660 | |
661 | /* Process to set up flag somewhere_renamed_p. */ |
662 | static void |
663 | set_allocno_somewhere_renamed_p (void) |
664 | { |
665 | unsigned int regno; |
666 | ira_allocno_t allocno; |
667 | ira_allocno_iterator ai; |
668 | |
669 | FOR_EACH_ALLOCNO (allocno, ai)for (ira_allocno_iter_init (&(ai)); ira_allocno_iter_cond (&(ai), &(allocno));) |
670 | { |
671 | regno = ALLOCNO_REGNO (allocno)((allocno)->regno); |
672 | if (bitmap_bit_p (renamed_regno_bitmap, regno) |
673 | && REGNO (allocno_emit_reg (allocno))(rhs_regno(allocno_emit_reg (allocno))) == regno) |
674 | ALLOCNO_EMIT_DATA (allocno)((ira_emit_data_t) ((allocno)->add_data))->somewhere_renamed_p = true; |
675 | } |
676 | } |
677 | |
678 | /* Return TRUE if move lists on all edges given in vector VEC are |
679 | equal. */ |
680 | static bool |
681 | eq_edge_move_lists_p (vec<edge, va_gc> *vec) |
682 | { |
683 | move_t list; |
684 | int i; |
685 | |
686 | list = (move_t) EDGE_I (vec, 0)(*vec)[(0)]->aux; |
687 | for (i = EDGE_COUNT (vec)vec_safe_length (vec) - 1; i > 0; i--) |
688 | if (! eq_move_lists_p (list, (move_t) EDGE_I (vec, i)(*vec)[(i)]->aux)) |
689 | return false; |
690 | return true; |
691 | } |
692 | |
693 | /* Look at all entry edges (if START_P) or exit edges of basic block |
694 | BB and put move lists at the BB start or end if it is possible. In |
695 | other words, this decreases code duplication of allocno moves. */ |
696 | static void |
697 | unify_moves (basic_block bb, bool start_p) |
698 | { |
699 | int i; |
700 | edge e; |
701 | move_t list; |
702 | vec<edge, va_gc> *vec; |
703 | |
704 | vec = (start_p ? bb->preds : bb->succs); |
705 | if (EDGE_COUNT (vec)vec_safe_length (vec) == 0 || ! eq_edge_move_lists_p (vec)) |
706 | return; |
707 | e = EDGE_I (vec, 0)(*vec)[(0)]; |
708 | list = (move_t) e->aux; |
709 | if (! start_p && control_flow_insn_p (BB_END (bb)(bb)->il.x.rtl->end_)) |
710 | return; |
711 | e->aux = NULLnullptr; |
712 | for (i = EDGE_COUNT (vec)vec_safe_length (vec) - 1; i > 0; i--) |
713 | { |
714 | e = EDGE_I (vec, i)(*vec)[(i)]; |
715 | free_move_list ((move_t) e->aux); |
716 | e->aux = NULLnullptr; |
717 | } |
718 | if (start_p) |
719 | at_bb_start[bb->index] = list; |
720 | else |
721 | at_bb_end[bb->index] = list; |
722 | } |
723 | |
724 | /* Last move (in move sequence being processed) setting up the |
725 | corresponding hard register. */ |
726 | static move_t hard_regno_last_set[FIRST_PSEUDO_REGISTER76]; |
727 | |
728 | /* If the element value is equal to CURR_TICK then the corresponding |
729 | element in `hard_regno_last_set' is defined and correct. */ |
730 | static int hard_regno_last_set_check[FIRST_PSEUDO_REGISTER76]; |
731 | |
732 | /* Last move (in move sequence being processed) setting up the |
733 | corresponding allocno. */ |
734 | static move_t *allocno_last_set; |
735 | |
736 | /* If the element value is equal to CURR_TICK then the corresponding |
737 | element in . `allocno_last_set' is defined and correct. */ |
738 | static int *allocno_last_set_check; |
739 | |
740 | /* Definition of vector of moves. */ |
741 | |
742 | /* This vec contains moves sorted topologically (depth-first) on their |
743 | dependency graph. */ |
744 | static vec<move_t> move_vec; |
745 | |
746 | /* The variable value is used to check correctness of values of |
747 | elements of arrays `hard_regno_last_set' and |
748 | `allocno_last_set_check'. */ |
749 | static int curr_tick; |
750 | |
751 | /* This recursive function traverses dependencies of MOVE and produces |
752 | topological sorting (in depth-first order). */ |
753 | static void |
754 | traverse_moves (move_t move) |
755 | { |
756 | int i; |
757 | |
758 | if (move->visited_p) |
759 | return; |
760 | move->visited_p = true; |
761 | for (i = move->deps_num - 1; i >= 0; i--) |
762 | traverse_moves (move->deps[i]); |
763 | move_vec.safe_push (move); |
764 | } |
765 | |
766 | /* Remove unnecessary moves in the LIST, makes topological sorting, |
767 | and removes cycles on hard reg dependencies by introducing new |
768 | allocnos assigned to memory and additional moves. It returns the |
769 | result move list. */ |
770 | static move_t |
771 | modify_move_list (move_t list) |
772 | { |
773 | int i, n, nregs, hard_regno; |
774 | ira_allocno_t to, from; |
775 | move_t move, new_move, set_move, first, last; |
776 | |
777 | if (list == NULLnullptr) |
778 | return NULLnullptr; |
779 | /* Create move deps. */ |
780 | curr_tick++; |
781 | for (move = list; move != NULLnullptr; move = move->next) |
782 | { |
783 | to = move->to; |
784 | if ((hard_regno = ALLOCNO_HARD_REGNO (to)((to)->hard_regno)) < 0) |
785 | continue; |
786 | nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (to)((to)->mode)); |
787 | for (i = 0; i < nregs; i++) |
788 | { |
789 | hard_regno_last_set[hard_regno + i] = move; |
790 | hard_regno_last_set_check[hard_regno + i] = curr_tick; |
791 | } |
792 | } |
793 | for (move = list; move != NULLnullptr; move = move->next) |
794 | { |
795 | from = move->from; |
796 | to = move->to; |
Value stored to 'to' is never read | |
797 | if ((hard_regno = ALLOCNO_HARD_REGNO (from)((from)->hard_regno)) >= 0) |
798 | { |
799 | nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (from)((from)->mode)); |
800 | for (n = i = 0; i < nregs; i++) |
801 | if (hard_regno_last_set_check[hard_regno + i] == curr_tick |
802 | && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)((hard_regno_last_set[hard_regno + i]->to)->regno) |
803 | != ALLOCNO_REGNO (from)((from)->regno))) |
804 | n++; |
805 | move->deps = (move_t *) ira_allocate (n * sizeof (move_t)); |
806 | for (n = i = 0; i < nregs; i++) |
807 | if (hard_regno_last_set_check[hard_regno + i] == curr_tick |
808 | && (ALLOCNO_REGNO (hard_regno_last_set[hard_regno + i]->to)((hard_regno_last_set[hard_regno + i]->to)->regno) |
809 | != ALLOCNO_REGNO (from)((from)->regno))) |
810 | move->deps[n++] = hard_regno_last_set[hard_regno + i]; |
811 | move->deps_num = n; |
812 | } |
813 | } |
814 | /* Topological sorting: */ |
815 | move_vec.truncate (0); |
816 | for (move = list; move != NULLnullptr; move = move->next) |
817 | traverse_moves (move); |
818 | last = NULLnullptr; |
819 | for (i = (int) move_vec.length () - 1; i >= 0; i--) |
820 | { |
821 | move = move_vec[i]; |
822 | move->next = NULLnullptr; |
823 | if (last != NULLnullptr) |
824 | last->next = move; |
825 | last = move; |
826 | } |
827 | first = move_vec.last (); |
828 | /* Removing cycles: */ |
829 | curr_tick++; |
830 | move_vec.truncate (0); |
831 | for (move = first; move != NULLnullptr; move = move->next) |
832 | { |
833 | from = move->from; |
834 | to = move->to; |
835 | if ((hard_regno = ALLOCNO_HARD_REGNO (from)((from)->hard_regno)) >= 0) |
836 | { |
837 | nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (from)((from)->mode)); |
838 | for (i = 0; i < nregs; i++) |
839 | if (hard_regno_last_set_check[hard_regno + i] == curr_tick |
840 | && ALLOCNO_HARD_REGNO((hard_regno_last_set[hard_regno + i]->to)->hard_regno) |
841 | (hard_regno_last_set[hard_regno + i]->to)((hard_regno_last_set[hard_regno + i]->to)->hard_regno) >= 0) |
842 | { |
843 | int n, j; |
844 | ira_allocno_t new_allocno; |
845 | |
846 | set_move = hard_regno_last_set[hard_regno + i]; |
847 | /* It does not matter what loop_tree_node (of TO or |
848 | FROM) to use for the new allocno because of |
849 | subsequent IRA internal representation |
850 | flattening. */ |
851 | new_allocno |
852 | = create_new_allocno (ALLOCNO_REGNO (set_move->to)((set_move->to)->regno), |
853 | ALLOCNO_LOOP_TREE_NODE (set_move->to)((set_move->to)->loop_tree_node)); |
854 | ALLOCNO_MODE (new_allocno)((new_allocno)->mode) = ALLOCNO_MODE (set_move->to)((set_move->to)->mode); |
855 | ira_set_allocno_class (new_allocno, |
856 | ALLOCNO_CLASS (set_move->to)((set_move->to)->aclass)); |
857 | ira_create_allocno_objects (new_allocno); |
858 | ALLOCNO_ASSIGNED_P (new_allocno)((new_allocno)->assigned_p) = true; |
859 | ALLOCNO_HARD_REGNO (new_allocno)((new_allocno)->hard_regno) = -1; |
860 | ALLOCNO_EMIT_DATA (new_allocno)((ira_emit_data_t) ((new_allocno)->add_data))->reg |
861 | = ira_create_new_reg (allocno_emit_reg (set_move->to)); |
862 | |
863 | /* Make it possibly conflicting with all earlier |
864 | created allocnos. Cases where temporary allocnos |
865 | created to remove the cycles are quite rare. */ |
866 | n = ALLOCNO_NUM_OBJECTS (new_allocno)((new_allocno)->num_objects); |
867 | gcc_assert (n == ALLOCNO_NUM_OBJECTS (set_move->to))((void)(!(n == ((set_move->to)->num_objects)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 867, __FUNCTION__), 0 : 0)); |
868 | for (j = 0; j < n; j++) |
869 | { |
870 | ira_object_t new_obj = ALLOCNO_OBJECT (new_allocno, j)((new_allocno)->objects[j]); |
871 | |
872 | OBJECT_MIN (new_obj)((new_obj)->min) = 0; |
873 | OBJECT_MAX (new_obj)((new_obj)->max) = ira_objects_num - 1; |
874 | } |
875 | |
876 | new_move = create_move (set_move->to, new_allocno); |
877 | set_move->to = new_allocno; |
878 | move_vec.safe_push (new_move); |
879 | ira_move_loops_num++; |
880 | if (internal_flag_ira_verbose > 2 && ira_dump_file != NULLnullptr) |
881 | fprintf (ira_dump_file, |
882 | " Creating temporary allocno a%dr%d\n", |
883 | ALLOCNO_NUM (new_allocno)((new_allocno)->num), |
884 | REGNO (allocno_emit_reg (new_allocno))(rhs_regno(allocno_emit_reg (new_allocno)))); |
885 | } |
886 | } |
887 | if ((hard_regno = ALLOCNO_HARD_REGNO (to)((to)->hard_regno)) < 0) |
888 | continue; |
889 | nregs = hard_regno_nregs (hard_regno, ALLOCNO_MODE (to)((to)->mode)); |
890 | for (i = 0; i < nregs; i++) |
891 | { |
892 | hard_regno_last_set[hard_regno + i] = move; |
893 | hard_regno_last_set_check[hard_regno + i] = curr_tick; |
894 | } |
895 | } |
896 | for (i = (int) move_vec.length () - 1; i >= 0; i--) |
897 | { |
898 | move = move_vec[i]; |
899 | move->next = NULLnullptr; |
900 | last->next = move; |
901 | last = move; |
902 | } |
903 | return first; |
904 | } |
905 | |
906 | /* Generate RTX move insns from the move list LIST. This updates |
907 | allocation cost using move execution frequency FREQ. */ |
908 | static rtx_insn * |
909 | emit_move_list (move_t list, int freq) |
910 | { |
911 | rtx to, from, dest; |
912 | int to_regno, from_regno, cost, regno; |
913 | rtx_insn *result, *insn; |
914 | rtx set; |
915 | machine_mode mode; |
916 | enum reg_class aclass; |
917 | |
918 | grow_reg_equivs (); |
919 | start_sequence (); |
920 | for (; list != NULLnullptr; list = list->next) |
921 | { |
922 | start_sequence (); |
923 | to = allocno_emit_reg (list->to); |
924 | to_regno = REGNO (to)(rhs_regno(to)); |
925 | from = allocno_emit_reg (list->from); |
926 | from_regno = REGNO (from)(rhs_regno(from)); |
927 | emit_move_insn (to, from); |
928 | list->insn = get_insns (); |
929 | end_sequence (); |
930 | for (insn = list->insn; insn != NULL_RTX(rtx) 0; insn = NEXT_INSN (insn)) |
931 | { |
932 | /* The reload needs to have set up insn codes. If the |
933 | reload sets up insn codes by itself, it may fail because |
934 | insns will have hard registers instead of pseudos and |
935 | there may be no machine insn with given hard |
936 | registers. */ |
937 | recog_memoized (insn); |
938 | /* Add insn to equiv init insn list if it is necessary. |
939 | Otherwise reload will not remove this insn if it decides |
940 | to use the equivalence. */ |
941 | if ((set = single_set (insn)) != NULL_RTX(rtx) 0) |
942 | { |
943 | dest = SET_DEST (set)(((set)->u.fld[0]).rt_rtx); |
944 | if (GET_CODE (dest)((enum rtx_code) (dest)->code) == SUBREG) |
945 | dest = SUBREG_REG (dest)(((dest)->u.fld[0]).rt_rtx); |
946 | ira_assert (REG_P (dest))((void)(!((((enum rtx_code) (dest)->code) == REG)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 946, __FUNCTION__), 0 : 0)); |
947 | regno = REGNO (dest)(rhs_regno(dest)); |
948 | if (regno >= ira_reg_equiv_len |
949 | || (ira_reg_equiv[regno].invariant == NULL_RTX(rtx) 0 |
950 | && ira_reg_equiv[regno].constant == NULL_RTX(rtx) 0)) |
951 | continue; /* regno has no equivalence. */ |
952 | ira_assert ((int) reg_equivs->length () > regno)((void)(!((int) reg_equivs->length () > regno) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 952, __FUNCTION__), 0 : 0)); |
953 | reg_equiv_init (regno)(*reg_equivs)[(regno)].init |
954 | = gen_rtx_INSN_LIST (VOIDmode((void) 0, E_VOIDmode), insn, reg_equiv_init (regno)(*reg_equivs)[(regno)].init); |
955 | } |
956 | } |
957 | if (ira_use_lra_p) |
958 | ira_update_equiv_info_by_shuffle_insn (to_regno, from_regno, list->insn); |
959 | emit_insn (list->insn); |
960 | mode = ALLOCNO_MODE (list->to)((list->to)->mode); |
961 | aclass = ALLOCNO_CLASS (list->to)((list->to)->aclass); |
962 | cost = 0; |
963 | if (ALLOCNO_HARD_REGNO (list->to)((list->to)->hard_regno) < 0) |
964 | { |
965 | if (ALLOCNO_HARD_REGNO (list->from)((list->from)->hard_regno) >= 0) |
966 | { |
967 | cost = ira_memory_move_cost(this_target_ira->x_ira_memory_move_cost)[mode][aclass][0] * freq; |
968 | ira_store_cost += cost; |
969 | } |
970 | } |
971 | else if (ALLOCNO_HARD_REGNO (list->from)((list->from)->hard_regno) < 0) |
972 | { |
973 | if (ALLOCNO_HARD_REGNO (list->to)((list->to)->hard_regno) >= 0) |
974 | { |
975 | cost = ira_memory_move_cost(this_target_ira->x_ira_memory_move_cost)[mode][aclass][0] * freq; |
976 | ira_load_cost += cost; |
977 | } |
978 | } |
979 | else |
980 | { |
981 | ira_init_register_move_cost_if_necessary (mode); |
982 | cost = ira_register_move_cost(this_target_ira_int->x_ira_register_move_cost)[mode][aclass][aclass] * freq; |
983 | ira_shuffle_cost += cost; |
984 | } |
985 | ira_overall_cost += cost; |
986 | } |
987 | result = get_insns (); |
988 | end_sequence (); |
989 | return result; |
990 | } |
991 | |
992 | /* Generate RTX move insns from move lists attached to basic blocks |
993 | and edges. */ |
994 | static void |
995 | emit_moves (void) |
996 | { |
997 | basic_block bb; |
998 | edge_iterator ei; |
999 | edge e; |
1000 | rtx_insn *insns, *tmp, *next; |
1001 | |
1002 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
1003 | { |
1004 | if (at_bb_start[bb->index] != NULLnullptr) |
1005 | { |
1006 | at_bb_start[bb->index] = modify_move_list (at_bb_start[bb->index]); |
1007 | insns |
1008 | = emit_move_list (at_bb_start[bb->index], REG_FREQ_FROM_BB (bb)((optimize_function_for_size_p ((cfun + 0)) || !(cfun + 0)-> cfg->count_max.initialized_p ()) ? 1000 : ((bb)->count. to_frequency ((cfun + 0)) * 1000 / 10000) ? ((bb)->count.to_frequency ((cfun + 0)) * 1000 / 10000) : 1)); |
1009 | tmp = BB_HEAD (bb)(bb)->il.x.head_; |
1010 | if (LABEL_P (tmp)(((enum rtx_code) (tmp)->code) == CODE_LABEL)) |
1011 | tmp = NEXT_INSN (tmp); |
1012 | if (NOTE_INSN_BASIC_BLOCK_P (tmp)((((enum rtx_code) (tmp)->code) == NOTE) && (((tmp )->u.fld[4]).rt_int) == NOTE_INSN_BASIC_BLOCK)) |
1013 | tmp = NEXT_INSN (tmp); |
1014 | /* Make sure to put the location of TMP or a subsequent instruction |
1015 | to avoid inheriting the location of the previous instruction. */ |
1016 | next = tmp; |
1017 | while (next && !NONDEBUG_INSN_P (next)((((enum rtx_code) (next)->code) == INSN) || (((enum rtx_code ) (next)->code) == JUMP_INSN) || (((enum rtx_code) (next)-> code) == CALL_INSN))) |
1018 | next = NEXT_INSN (next); |
1019 | if (next) |
1020 | set_insn_locations (insns, INSN_LOCATION (next)); |
1021 | if (tmp == BB_HEAD (bb)(bb)->il.x.head_) |
1022 | emit_insn_before (insns, tmp); |
1023 | else if (tmp) |
1024 | emit_insn_after (insns, PREV_INSN (tmp)); |
1025 | else |
1026 | emit_insn_after (insns, get_last_insn ()); |
1027 | } |
1028 | |
1029 | if (at_bb_end[bb->index] != NULLnullptr) |
1030 | { |
1031 | at_bb_end[bb->index] = modify_move_list (at_bb_end[bb->index]); |
1032 | insns = emit_move_list (at_bb_end[bb->index], REG_FREQ_FROM_BB (bb)((optimize_function_for_size_p ((cfun + 0)) || !(cfun + 0)-> cfg->count_max.initialized_p ()) ? 1000 : ((bb)->count. to_frequency ((cfun + 0)) * 1000 / 10000) ? ((bb)->count.to_frequency ((cfun + 0)) * 1000 / 10000) : 1)); |
1033 | ira_assert (! control_flow_insn_p (BB_END (bb)))((void)(!(! control_flow_insn_p ((bb)->il.x.rtl->end_)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 1033, __FUNCTION__), 0 : 0)); |
1034 | emit_insn_after (insns, BB_END (bb)(bb)->il.x.rtl->end_); |
1035 | } |
1036 | |
1037 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
1038 | { |
1039 | if (e->aux == NULLnullptr) |
1040 | continue; |
1041 | ira_assert ((e->flags & EDGE_ABNORMAL) == 0((void)(!((e->flags & EDGE_ABNORMAL) == 0 || ! (vec_safe_length ((e)->src->succs) >= 2 && vec_safe_length ( (e)->dest->preds) >= 2)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 1042, __FUNCTION__), 0 : 0)) |
1042 | || ! EDGE_CRITICAL_P (e))((void)(!((e->flags & EDGE_ABNORMAL) == 0 || ! (vec_safe_length ((e)->src->succs) >= 2 && vec_safe_length ( (e)->dest->preds) >= 2)) ? fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 1042, __FUNCTION__), 0 : 0)); |
1043 | e->aux = modify_move_list ((move_t) e->aux); |
1044 | insert_insn_on_edge |
1045 | (emit_move_list ((move_t) e->aux, |
1046 | REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e))(optimize_function_for_size_p ((cfun + 0)) ? 1000 : (e->count ().to_frequency ((cfun + 0)) * 1000 / 10000) ? (e->count ( ).to_frequency ((cfun + 0)) * 1000 / 10000) : 1)), |
1047 | e); |
1048 | if (e->src->next_bb != e->dest) |
1049 | ira_additional_jumps_num++; |
1050 | } |
1051 | } |
1052 | } |
1053 | |
1054 | /* Update costs of A and corresponding allocnos on upper levels on the |
1055 | loop tree from reading (if READ_P) or writing A on an execution |
1056 | path with FREQ. */ |
1057 | static void |
1058 | update_costs (ira_allocno_t a, bool read_p, int freq) |
1059 | { |
1060 | ira_loop_tree_node_t parent; |
1061 | |
1062 | for (;;) |
1063 | { |
1064 | ALLOCNO_NREFS (a)((a)->nrefs)++; |
1065 | ALLOCNO_FREQ (a)((a)->freq) += freq; |
1066 | ALLOCNO_MEMORY_COST (a)((a)->memory_cost) |
1067 | += (ira_memory_move_cost(this_target_ira->x_ira_memory_move_cost)[ALLOCNO_MODE (a)((a)->mode)][ALLOCNO_CLASS (a)((a)->aclass)] |
1068 | [read_p ? 1 : 0] * freq); |
1069 | if (ALLOCNO_CAP (a)((a)->cap) != NULLnullptr) |
1070 | a = ALLOCNO_CAP (a)((a)->cap); |
1071 | else if ((parent = ALLOCNO_LOOP_TREE_NODE (a)((a)->loop_tree_node)->parent) == NULLnullptr |
1072 | || (a = parent->regno_allocno_map[ALLOCNO_REGNO (a)((a)->regno)]) == NULLnullptr) |
1073 | break; |
1074 | } |
1075 | } |
1076 | |
1077 | /* Process moves from LIST with execution FREQ to add ranges, copies, |
1078 | and modify costs for allocnos involved in the moves. All regnos |
1079 | living through the list is in LIVE_THROUGH, and the loop tree node |
1080 | used to find corresponding allocnos is NODE. */ |
1081 | static void |
1082 | add_range_and_copies_from_move_list (move_t list, ira_loop_tree_node_t node, |
1083 | bitmap live_through, int freq) |
1084 | { |
1085 | int start, n; |
1086 | unsigned int regno; |
1087 | move_t move; |
1088 | ira_allocno_t a; |
1089 | ira_copy_t cp; |
1090 | live_range_t r; |
1091 | bitmap_iterator bi; |
1092 | HARD_REG_SET hard_regs_live; |
1093 | |
1094 | if (list == NULLnullptr) |
1095 | return; |
1096 | n = 0; |
1097 | EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)for (bmp_iter_set_init (&(bi), (live_through), (76), & (regno)); bmp_iter_set (&(bi), &(regno)); bmp_iter_next (&(bi), &(regno))) |
1098 | n++; |
1099 | REG_SET_TO_HARD_REG_SET (hard_regs_live, live_through)do { CLEAR_HARD_REG_SET (hard_regs_live); reg_set_to_hard_reg_set (&hard_regs_live, live_through); } while (0); |
1100 | /* This is a trick to guarantee that new ranges is not merged with |
1101 | the old ones. */ |
1102 | ira_max_point++; |
1103 | start = ira_max_point; |
1104 | for (move = list; move != NULLnullptr; move = move->next) |
1105 | { |
1106 | ira_allocno_t from = move->from; |
1107 | ira_allocno_t to = move->to; |
1108 | int nr, i; |
1109 | |
1110 | bitmap_clear_bit (live_through, ALLOCNO_REGNO (from)((from)->regno)); |
1111 | bitmap_clear_bit (live_through, ALLOCNO_REGNO (to)((to)->regno)); |
1112 | |
1113 | nr = ALLOCNO_NUM_OBJECTS (to)((to)->num_objects); |
1114 | for (i = 0; i < nr; i++) |
1115 | { |
1116 | ira_object_t to_obj = ALLOCNO_OBJECT (to, i)((to)->objects[i]); |
1117 | if (OBJECT_CONFLICT_ARRAY (to_obj)((to_obj)->conflicts_array) == NULLnullptr) |
1118 | { |
1119 | if (internal_flag_ira_verbose > 2 && ira_dump_file != NULLnullptr) |
1120 | fprintf (ira_dump_file, " Allocate conflicts for a%dr%d\n", |
1121 | ALLOCNO_NUM (to)((to)->num), REGNO (allocno_emit_reg (to))(rhs_regno(allocno_emit_reg (to)))); |
1122 | ira_allocate_object_conflicts (to_obj, n); |
1123 | } |
1124 | } |
1125 | ior_hard_reg_conflicts (from, hard_regs_live); |
1126 | ior_hard_reg_conflicts (to, hard_regs_live); |
1127 | |
1128 | update_costs (from, true, freq); |
1129 | update_costs (to, false, freq); |
1130 | cp = ira_add_allocno_copy (from, to, freq, false, move->insn, NULLnullptr); |
1131 | if (internal_flag_ira_verbose > 2 && ira_dump_file != NULLnullptr) |
1132 | fprintf (ira_dump_file, " Adding cp%d:a%dr%d-a%dr%d\n", |
1133 | cp->num, ALLOCNO_NUM (cp->first)((cp->first)->num), |
1134 | REGNO (allocno_emit_reg (cp->first))(rhs_regno(allocno_emit_reg (cp->first))), |
1135 | ALLOCNO_NUM (cp->second)((cp->second)->num), |
1136 | REGNO (allocno_emit_reg (cp->second))(rhs_regno(allocno_emit_reg (cp->second)))); |
1137 | |
1138 | nr = ALLOCNO_NUM_OBJECTS (from)((from)->num_objects); |
1139 | for (i = 0; i < nr; i++) |
1140 | { |
1141 | ira_object_t from_obj = ALLOCNO_OBJECT (from, i)((from)->objects[i]); |
1142 | r = OBJECT_LIVE_RANGES (from_obj)((from_obj)->live_ranges); |
1143 | if (r == NULLnullptr || r->finish >= 0) |
1144 | { |
1145 | ira_add_live_range_to_object (from_obj, start, ira_max_point); |
1146 | if (internal_flag_ira_verbose > 2 && ira_dump_file != NULLnullptr) |
1147 | fprintf (ira_dump_file, |
1148 | " Adding range [%d..%d] to allocno a%dr%d\n", |
1149 | start, ira_max_point, ALLOCNO_NUM (from)((from)->num), |
1150 | REGNO (allocno_emit_reg (from))(rhs_regno(allocno_emit_reg (from)))); |
1151 | } |
1152 | else |
1153 | { |
1154 | r->finish = ira_max_point; |
1155 | if (internal_flag_ira_verbose > 2 && ira_dump_file != NULLnullptr) |
1156 | fprintf (ira_dump_file, |
1157 | " Adding range [%d..%d] to allocno a%dr%d\n", |
1158 | r->start, ira_max_point, ALLOCNO_NUM (from)((from)->num), |
1159 | REGNO (allocno_emit_reg (from))(rhs_regno(allocno_emit_reg (from)))); |
1160 | } |
1161 | } |
1162 | ira_max_point++; |
1163 | nr = ALLOCNO_NUM_OBJECTS (to)((to)->num_objects); |
1164 | for (i = 0; i < nr; i++) |
1165 | { |
1166 | ira_object_t to_obj = ALLOCNO_OBJECT (to, i)((to)->objects[i]); |
1167 | ira_add_live_range_to_object (to_obj, ira_max_point, -1); |
1168 | } |
1169 | ira_max_point++; |
1170 | } |
1171 | for (move = list; move != NULLnullptr; move = move->next) |
1172 | { |
1173 | int nr, i; |
1174 | nr = ALLOCNO_NUM_OBJECTS (move->to)((move->to)->num_objects); |
1175 | for (i = 0; i < nr; i++) |
1176 | { |
1177 | ira_object_t to_obj = ALLOCNO_OBJECT (move->to, i)((move->to)->objects[i]); |
1178 | r = OBJECT_LIVE_RANGES (to_obj)((to_obj)->live_ranges); |
1179 | if (r->finish < 0) |
1180 | { |
1181 | r->finish = ira_max_point - 1; |
1182 | if (internal_flag_ira_verbose > 2 && ira_dump_file != NULLnullptr) |
1183 | fprintf (ira_dump_file, |
1184 | " Adding range [%d..%d] to allocno a%dr%d\n", |
1185 | r->start, r->finish, ALLOCNO_NUM (move->to)((move->to)->num), |
1186 | REGNO (allocno_emit_reg (move->to))(rhs_regno(allocno_emit_reg (move->to)))); |
1187 | } |
1188 | } |
1189 | } |
1190 | EXECUTE_IF_SET_IN_BITMAP (live_through, FIRST_PSEUDO_REGISTER, regno, bi)for (bmp_iter_set_init (&(bi), (live_through), (76), & (regno)); bmp_iter_set (&(bi), &(regno)); bmp_iter_next (&(bi), &(regno))) |
1191 | { |
1192 | ira_allocno_t to; |
1193 | int nr, i; |
1194 | |
1195 | a = node->regno_allocno_map[regno]; |
1196 | if ((to = ALLOCNO_EMIT_DATA (a)((ira_emit_data_t) ((a)->add_data))->mem_optimized_dest) != NULLnullptr) |
1197 | a = to; |
1198 | nr = ALLOCNO_NUM_OBJECTS (a)((a)->num_objects); |
1199 | for (i = 0; i < nr; i++) |
1200 | { |
1201 | ira_object_t obj = ALLOCNO_OBJECT (a, i)((a)->objects[i]); |
1202 | ira_add_live_range_to_object (obj, start, ira_max_point - 1); |
1203 | } |
1204 | if (internal_flag_ira_verbose > 2 && ira_dump_file != NULLnullptr) |
1205 | fprintf |
1206 | (ira_dump_file, |
1207 | " Adding range [%d..%d] to live through %s allocno a%dr%d\n", |
1208 | start, ira_max_point - 1, |
1209 | to != NULLnullptr ? "upper level" : "", |
1210 | ALLOCNO_NUM (a)((a)->num), REGNO (allocno_emit_reg (a))(rhs_regno(allocno_emit_reg (a)))); |
1211 | } |
1212 | } |
1213 | |
1214 | /* Process all move list to add ranges, conflicts, copies, and modify |
1215 | costs for allocnos involved in the moves. */ |
1216 | static void |
1217 | add_ranges_and_copies (void) |
1218 | { |
1219 | basic_block bb; |
1220 | edge_iterator ei; |
1221 | edge e; |
1222 | ira_loop_tree_node_t node; |
1223 | bitmap live_through; |
1224 | |
1225 | live_through = ira_allocate_bitmap (); |
1226 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
1227 | { |
1228 | /* It does not matter what loop_tree_node (of source or |
1229 | destination block) to use for searching allocnos by their |
1230 | regnos because of subsequent IR flattening. */ |
1231 | node = IRA_BB_NODE (bb)__extension__ (({ ira_loop_tree_node_t _node = (&ira_bb_nodes [(bb)->index]); if (_node->children != nullptr || _node ->loop != nullptr || _node->bb == nullptr) { fprintf (stderr , "\n%s: %d: error in %s: it is not a block node\n", "/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 1231, __FUNCTION__); (fancy_abort ("/buildworker/marxinbox-gcc-clang-static-analyzer/build/gcc/ira-emit.cc" , 1231, __FUNCTION__)); } _node; }))->parent; |
1232 | bitmap_copy (live_through, df_get_live_in (bb)); |
1233 | add_range_and_copies_from_move_list |
1234 | (at_bb_start[bb->index], node, live_through, REG_FREQ_FROM_BB (bb)((optimize_function_for_size_p ((cfun + 0)) || !(cfun + 0)-> cfg->count_max.initialized_p ()) ? 1000 : ((bb)->count. to_frequency ((cfun + 0)) * 1000 / 10000) ? ((bb)->count.to_frequency ((cfun + 0)) * 1000 / 10000) : 1)); |
1235 | bitmap_copy (live_through, df_get_live_out (bb)); |
1236 | add_range_and_copies_from_move_list |
1237 | (at_bb_end[bb->index], node, live_through, REG_FREQ_FROM_BB (bb)((optimize_function_for_size_p ((cfun + 0)) || !(cfun + 0)-> cfg->count_max.initialized_p ()) ? 1000 : ((bb)->count. to_frequency ((cfun + 0)) * 1000 / 10000) ? ((bb)->count.to_frequency ((cfun + 0)) * 1000 / 10000) : 1)); |
1238 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
1239 | { |
1240 | bitmap_and (live_through, |
1241 | df_get_live_in (e->dest), df_get_live_out (bb)); |
1242 | add_range_and_copies_from_move_list |
1243 | ((move_t) e->aux, node, live_through, |
1244 | REG_FREQ_FROM_EDGE_FREQ (EDGE_FREQUENCY (e))(optimize_function_for_size_p ((cfun + 0)) ? 1000 : (e->count ().to_frequency ((cfun + 0)) * 1000 / 10000) ? (e->count ( ).to_frequency ((cfun + 0)) * 1000 / 10000) : 1)); |
1245 | } |
1246 | } |
1247 | ira_free_bitmap (live_through); |
1248 | } |
1249 | |
1250 | /* The entry function changes code and generates shuffling allocnos on |
1251 | region borders for the regional (LOOPS_P is TRUE in this case) |
1252 | register allocation. */ |
1253 | void |
1254 | ira_emit (bool loops_p) |
1255 | { |
1256 | basic_block bb; |
1257 | rtx_insn *insn; |
1258 | edge_iterator ei; |
1259 | edge e; |
1260 | ira_allocno_t a; |
1261 | ira_allocno_iterator ai; |
1262 | size_t sz; |
1263 | |
1264 | FOR_EACH_ALLOCNO (a, ai)for (ira_allocno_iter_init (&(ai)); ira_allocno_iter_cond (&(ai), &(a));) |
1265 | ALLOCNO_EMIT_DATA (a)((ira_emit_data_t) ((a)->add_data))->reg = regno_reg_rtx[ALLOCNO_REGNO (a)((a)->regno)]; |
1266 | if (! loops_p) |
1267 | return; |
1268 | sz = sizeof (move_t) * last_basic_block_for_fn (cfun)(((cfun + 0))->cfg->x_last_basic_block); |
1269 | at_bb_start = (move_t *) ira_allocate (sz); |
1270 | memset (at_bb_start, 0, sz); |
1271 | at_bb_end = (move_t *) ira_allocate (sz); |
1272 | memset (at_bb_end, 0, sz); |
1273 | local_allocno_bitmap = ira_allocate_bitmap (); |
1274 | used_regno_bitmap = ira_allocate_bitmap (); |
1275 | renamed_regno_bitmap = ira_allocate_bitmap (); |
1276 | max_regno_before_changing = max_reg_num (); |
1277 | ira_traverse_loop_tree (true, ira_loop_tree_root, change_loop, NULLnullptr); |
1278 | set_allocno_somewhere_renamed_p (); |
1279 | ira_free_bitmap (used_regno_bitmap); |
1280 | ira_free_bitmap (renamed_regno_bitmap); |
1281 | ira_free_bitmap (local_allocno_bitmap); |
1282 | setup_entered_from_non_parent_p (); |
1283 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
1284 | { |
1285 | at_bb_start[bb->index] = NULLnullptr; |
1286 | at_bb_end[bb->index] = NULLnullptr; |
1287 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
1288 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)(((cfun + 0))->cfg->x_exit_block_ptr)) |
1289 | generate_edge_moves (e); |
1290 | } |
1291 | allocno_last_set |
1292 | = (move_t *) ira_allocate (sizeof (move_t) * max_reg_num ()); |
1293 | allocno_last_set_check |
1294 | = (int *) ira_allocate (sizeof (int) * max_reg_num ()); |
1295 | memset (allocno_last_set_check, 0, sizeof (int) * max_reg_num ()); |
1296 | memset (hard_regno_last_set_check, 0, sizeof (hard_regno_last_set_check)); |
1297 | curr_tick = 0; |
1298 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
1299 | unify_moves (bb, true); |
1300 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
1301 | unify_moves (bb, false); |
1302 | move_vec.create (ira_allocnos_num); |
1303 | emit_moves (); |
1304 | add_ranges_and_copies (); |
1305 | /* Clean up: */ |
1306 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
1307 | { |
1308 | free_move_list (at_bb_start[bb->index]); |
1309 | free_move_list (at_bb_end[bb->index]); |
1310 | FOR_EACH_EDGE (e, ei, bb->succs)for ((ei) = ei_start_1 (&((bb->succs))); ei_cond ((ei) , &(e)); ei_next (&(ei))) |
1311 | { |
1312 | free_move_list ((move_t) e->aux); |
1313 | e->aux = NULLnullptr; |
1314 | } |
1315 | } |
1316 | move_vec.release (); |
1317 | ira_free (allocno_last_set_check); |
1318 | ira_free (allocno_last_set); |
1319 | commit_edge_insertions (); |
1320 | /* Fix insn codes. It is necessary to do it before reload because |
1321 | reload assumes initial insn codes defined. The insn codes can be |
1322 | invalidated by CFG infrastructure for example in jump |
1323 | redirection. */ |
1324 | FOR_EACH_BB_FN (bb, cfun)for (bb = ((cfun + 0))->cfg->x_entry_block_ptr->next_bb ; bb != ((cfun + 0))->cfg->x_exit_block_ptr; bb = bb-> next_bb) |
1325 | FOR_BB_INSNS_REVERSE (bb, insn)for ((insn) = (bb)->il.x.rtl->end_; (insn) && ( insn) != PREV_INSN ((bb)->il.x.head_); (insn) = PREV_INSN ( insn)) |
1326 | if (INSN_P (insn)(((((enum rtx_code) (insn)->code) == INSN) || (((enum rtx_code ) (insn)->code) == JUMP_INSN) || (((enum rtx_code) (insn)-> code) == CALL_INSN)) || (((enum rtx_code) (insn)->code) == DEBUG_INSN))) |
1327 | recog_memoized (insn); |
1328 | ira_free (at_bb_end); |
1329 | ira_free (at_bb_start); |
1330 | } |